1
0
镜像自地址 https://github.com/tuna/tunasync.git 已同步 2025-12-07 23:16:47 +00:00

16 次代码提交

作者 SHA1 备注 提交日期
bigeagle
93194cde2e Merge pull request #60 from tuna/dev
Dev
2016-12-19 01:10:38 +08:00
bigeagle
aa4c31a32b feat(tunasynctl): implemented 'set-size' command to update a mirror size 2016-12-18 23:30:41 +08:00
bigeagle
4c6a407c17 feat(manager): implemented restful API for updating mirror size 2016-12-18 23:06:08 +08:00
bigeagle
939abaef9b feat(worker): TUNASYNC_LOG_DIR environment variable 2016-12-18 20:41:26 +08:00
bigeagle
d5a438462f feat(worker): map current uid and gid to docker 2016-12-18 14:28:48 +08:00
bigeagle
d4e07a7b29 fix(worker): keep the same working dir inside and outside of docker 2016-12-18 14:28:32 +08:00
bigeagle
9ac3193d50 Merge pull request #58 from tuna/dev
Dev
2016-12-12 23:46:22 +08:00
bigeagle
9ffb101cc7 chore(tunasync): bump version to 0.2-dev 2016-12-12 23:23:06 +08:00
bigeagle
fd277388d5 fix(worker): fixed multi-manager configuration
the worker must be registerred on the manager

`extra_status_manager` option is replaced by `api_base_list`, which overrides the `api_base` option
2016-12-12 23:17:50 +08:00
bigeagle
c5cba66786 Merge pull request #57 from tuna/dev
fix(cmd): make tunasynctl work with both HTTP and HTTPS
2016-12-11 02:45:19 +08:00
bigeagle
97e9725774 fix(cmd): make tunasynctl work with both HTTP and HTTPS 2016-12-11 02:13:19 +08:00
bigeagle
54740388b3 Merge pull request #56 from tuna/dev
Dev
2016-12-10 04:18:46 +08:00
bigeagle
7601e5793f fix(worker): improved cgroup creation 2016-12-10 04:14:39 +08:00
bigeagle
9645fd44ec ci(travis): Enabled docker on travis 2016-12-10 03:48:03 +08:00
bigeagle
ebd462be36 feat(worker): Implemented docker executor, close #55
if docker is enabled in configure file and `docker_image` is set on mirror config, the command would

be executed via `docker run ...`
2016-12-10 02:44:45 +08:00
bigeagle
21c832c8fb fix(worker): disabled memory limit
rsync memory is nolonger limited
2016-12-09 23:07:05 +08:00
共有 19 个文件被更改,包括 515 次插入76 次删除

查看文件

@@ -1,3 +1,5 @@
sudo: required
language: go language: go
go: go:
- 1.6 - 1.6
@@ -11,8 +13,14 @@ before_install:
os: os:
- linux - linux
services:
- docker
before_script: before_script:
- sudo cgcreate -t travis -a travis -g memory:tunasync - lssubsys -am
- sudo cgcreate -a $USER -t $USER -g cpu:tunasync
- sudo cgcreate -a $USER -t $USER -g memory:tunasync
- docker pull alpine
script: script:
- ./.testandcover.bash - ./.testandcover.bash

查看文件

@@ -134,7 +134,7 @@ func main() {
app.Name = "tunasync" app.Name = "tunasync"
app.Usage = "tunasync mirror job management tool" app.Usage = "tunasync mirror job management tool"
app.EnableBashCompletion = true app.EnableBashCompletion = true
app.Version = "0.1" app.Version = tunasync.Version
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
Name: "manager", Name: "manager",

查看文件

@@ -99,8 +99,11 @@ func initialize(c *cli.Context) error {
} }
// parse base url of the manager server // parse base url of the manager server
baseURL = fmt.Sprintf("https://%s:%d", if cfg.CACert != "" {
cfg.ManagerAddr, cfg.ManagerPort) baseURL = fmt.Sprintf("https://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
} else {
baseURL = fmt.Sprintf("http://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
}
logger.Infof("Use manager address: %s", baseURL) logger.Infof("Use manager address: %s", baseURL)
@@ -183,6 +186,57 @@ func listJobs(c *cli.Context) error {
return nil return nil
} }
func updateMirrorSize(c *cli.Context) error {
args := c.Args()
if len(args) != 2 {
return cli.NewExitError("Usage: tunasynctl -w <worker-id> <mirror> <size>", 1)
}
workerID := c.String("worker")
mirrorID := args.Get(0)
mirrorSize := args.Get(1)
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{
Name: mirrorID,
Size: mirrorSize,
}
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s/size", baseURL, workerID, mirrorID,
)
resp, err := tunasync.PostJSON(url, msg, client)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to send request to manager: %s",
err.Error()),
1)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return cli.NewExitError(
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
)
}
var status tunasync.MirrorStatus
json.Unmarshal(body, &status)
if status.Size != mirrorSize {
return cli.NewExitError(
fmt.Sprintf(
"Mirror size error, expecting %s, manager returned %s",
mirrorSize, status.Size,
), 1,
)
}
logger.Infof("Successfully updated mirror size to %s", mirrorSize)
return nil
}
func flushDisabledJobs(c *cli.Context) error { func flushDisabledJobs(c *cli.Context) error {
req, err := http.NewRequest("DELETE", baseURL+flushDisabledPath, nil) req, err := http.NewRequest("DELETE", baseURL+flushDisabledPath, nil)
if err != nil { if err != nil {
@@ -322,7 +376,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.EnableBashCompletion = true app.EnableBashCompletion = true
app.Version = "0.1" app.Version = tunasync.Version
app.Name = "tunasynctl" app.Name = "tunasynctl"
app.Usage = "control client for tunasync manager" app.Usage = "control client for tunasync manager"
@@ -382,6 +436,18 @@ func main() {
Flags: commonFlags, Flags: commonFlags,
Action: initializeWrapper(listWorkers), Action: initializeWrapper(listWorkers),
}, },
{
Name: "set-size",
Usage: "Set mirror size",
Flags: append(
commonFlags,
cli.StringFlag{
Name: "worker, w",
Usage: "specify worker-id of the mirror job",
},
),
Action: initializeWrapper(updateMirrorSize),
},
{ {
Name: "start", Name: "start",
Usage: "Start a job", Usage: "Start a job",

查看文件

@@ -5,7 +5,7 @@ import (
"time" "time"
) )
// A StatusUpdateMsg represents a msg when // A MirrorStatus represents a msg when
// a worker has done syncing // a worker has done syncing
type MirrorStatus struct { type MirrorStatus struct {
Name string `json:"name"` Name string `json:"name"`

3
internal/version.go 普通文件
查看文件

@@ -0,0 +1,3 @@
package internal
const Version string = "0.2-dev"

查看文件

@@ -125,7 +125,7 @@ func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus
bucket := tx.Bucket([]byte(_statusBucketKey)) bucket := tx.Bucket([]byte(_statusBucketKey))
v := bucket.Get([]byte(id)) v := bucket.Get([]byte(id))
if v == nil { if v == nil {
return fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID) return fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
} }
err := json.Unmarshal(v, &m) err := json.Unmarshal(v, &m)
return err return err

查看文件

@@ -1,6 +1,7 @@
package manager package manager
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
"time" "time"
@@ -87,6 +88,7 @@ func GetTUNASyncManager(cfg *Config) *Manager {
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker) workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
// post job status // post job status
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker) workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
workerValidateGroup.POST(":id/jobs/:job/size", s.updateMirrorSize)
} }
// for tunasynctl to post commands // for tunasynctl to post commands
@@ -225,6 +227,12 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
var status MirrorStatus var status MirrorStatus
c.BindJSON(&status) c.BindJSON(&status)
mirrorName := status.Name mirrorName := status.Name
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
)
}
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName) curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
@@ -235,20 +243,19 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
status.LastUpdate = curStatus.LastUpdate status.LastUpdate = curStatus.LastUpdate
} }
// Only message with meaningful size updates the mirror size
if len(curStatus.Size) > 0 && curStatus.Size != "unknown" {
if len(status.Size) == 0 || status.Size == "unknown" {
status.Size = curStatus.Size
}
}
// for logging // for logging
switch status.Status { switch status.Status {
case Success:
logger.Noticef("Job [%s] @<%s> success", status.Name, status.Worker)
case Failed:
logger.Warningf("Job [%s] @<%s> failed", status.Name, status.Worker)
case Syncing: case Syncing:
logger.Noticef("Job [%s] @<%s> starts syncing", status.Name, status.Worker) logger.Noticef("Job [%s] @<%s> starts syncing", status.Name, status.Worker)
case Disabled:
logger.Noticef("Job [%s] @<%s> disabled", status.Name, status.Worker)
case Paused:
logger.Noticef("Job [%s] @<%s> paused", status.Name, status.Worker)
default: default:
logger.Infof("Job [%s] @<%s> status: %s", status.Name, status.Worker, status.Status) logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
} }
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status) newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
@@ -263,6 +270,45 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
c.JSON(http.StatusOK, newStatus) c.JSON(http.StatusOK, newStatus)
} }
func (s *Manager) updateMirrorSize(c *gin.Context) {
workerID := c.Param("id")
type SizeMsg struct {
Name string `json:"name"`
Size string `json:"size"`
}
var msg SizeMsg
c.BindJSON(&msg)
mirrorName := msg.Name
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
if err != nil {
logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s",
mirrorName, workerID, err.Error(),
)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
// Only message with meaningful size updates the mirror size
if len(msg.Size) > 0 || msg.Size != "unknown" {
status.Size = msg.Size
}
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, newStatus)
}
func (s *Manager) handleClientCmd(c *gin.Context) { func (s *Manager) handleClientCmd(c *gin.Context) {
var clientCmd ClientCmd var clientCmd ClientCmd
c.BindJSON(&clientCmd) c.BindJSON(&clientCmd)

查看文件

@@ -99,7 +99,7 @@ func TestHTTPServer(t *testing.T) {
IsMaster: true, IsMaster: true,
Status: Success, Status: Success,
Upstream: "mirrors.tuna.tsinghua.edu.cn", Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB", Size: "unknown",
} }
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil) resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
defer resp.Body.Close() defer resp.Body.Close()
@@ -139,6 +139,47 @@ func TestHTTPServer(t *testing.T) {
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
}) })
Convey("Update size of a valid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{status.Name, "5GB"}
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("Get new size of a mirror", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
})
})
Convey("Update size of an invalid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{"Invalid mirror", "5GB"}
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
})
}) })
Convey("update mirror status of an inexisted worker", func(ctx C) { Convey("update mirror status of an inexisted worker", func(ctx C) {

查看文件

@@ -24,7 +24,9 @@ type baseProvider struct {
cgroup *cgroupHook cgroup *cgroupHook
zfs *zfsHook zfs *zfsHook
hooks []jobHook docker *dockerHook
hooks []jobHook
} }
func (p *baseProvider) Name() string { func (p *baseProvider) Name() string {
@@ -87,6 +89,8 @@ func (p *baseProvider) AddHook(hook jobHook) {
p.cgroup = v p.cgroup = v
case *zfsHook: case *zfsHook:
p.zfs = v p.zfs = v
case *dockerHook:
p.docker = v
} }
p.hooks = append(p.hooks, hook) p.hooks = append(p.hooks, hook)
} }
@@ -103,6 +107,10 @@ func (p *baseProvider) ZFS() *zfsHook {
return p.zfs return p.zfs
} }
func (p *baseProvider) Docker() *dockerHook {
return p.docker
}
func (p *baseProvider) prepareLogFile() error { func (p *baseProvider) prepareLogFile() error {
if p.LogFile() == "/dev/null" { if p.LogFile() == "/dev/null" {
p.cmd.SetLogFile(nil) p.cmd.SetLogFile(nil)

查看文件

@@ -15,35 +15,31 @@ import (
"github.com/codeskyblue/go-sh" "github.com/codeskyblue/go-sh"
) )
var cgSubsystem = "cpu"
type cgroupHook struct { type cgroupHook struct {
emptyHook emptyHook
provider mirrorProvider provider mirrorProvider
basePath string basePath string
baseGroup string baseGroup string
created bool created bool
subsystem string
memLimit string
} }
func initCgroup(basePath string) { func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
if _, err := os.Stat(filepath.Join(basePath, "memory")); err == nil {
cgSubsystem = "memory"
return
}
logger.Warning("Memory subsystem of cgroup not enabled, fallback to cpu")
}
func newCgroupHook(p mirrorProvider, basePath, baseGroup string) *cgroupHook {
if basePath == "" { if basePath == "" {
basePath = "/sys/fs/cgroup" basePath = "/sys/fs/cgroup"
} }
if baseGroup == "" { if baseGroup == "" {
baseGroup = "tunasync" baseGroup = "tunasync"
} }
if subsystem == "" {
subsystem = "cpu"
}
return &cgroupHook{ return &cgroupHook{
provider: p, provider: p,
basePath: basePath, basePath: basePath,
baseGroup: baseGroup, baseGroup: baseGroup,
subsystem: subsystem,
} }
} }
@@ -52,13 +48,15 @@ func (c *cgroupHook) preExec() error {
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil { if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
return err return err
} }
if cgSubsystem != "memory" { if c.subsystem != "memory" {
return nil return nil
} }
if c.provider.Type() == provRsync || c.provider.Type() == provTwoStageRsync { if c.memLimit != "" {
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name()) gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
return sh.Command( return sh.Command(
"cgset", "-r", "memory.limit_in_bytes=512M", gname, "cgset", "-r",
fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
gname,
).Run() ).Run()
} }
return nil return nil
@@ -76,7 +74,7 @@ func (c *cgroupHook) postExec() error {
func (c *cgroupHook) Cgroup() string { func (c *cgroupHook) Cgroup() string {
name := c.provider.Name() name := c.provider.Name()
return fmt.Sprintf("%s:%s/%s", cgSubsystem, c.baseGroup, name) return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
} }
func (c *cgroupHook) killAll() error { func (c *cgroupHook) killAll() error {
@@ -87,7 +85,7 @@ func (c *cgroupHook) killAll() error {
readTaskList := func() ([]int, error) { readTaskList := func() ([]int, error) {
taskList := []int{} taskList := []int{}
taskFile, err := os.Open(filepath.Join(c.basePath, cgSubsystem, c.baseGroup, name, "tasks")) taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
if err != nil { if err != nil {
return taskList, err return taskList, err
} }

查看文件

@@ -72,11 +72,14 @@ sleep 30
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
initCgroup("/sys/fs/cgroup") cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
provider.AddHook(cg) provider.AddHook(cg)
err = cg.preExec() err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
So(err, ShouldBeNil) So(err, ShouldBeNil)
go func() { go func() {
@@ -129,12 +132,15 @@ sleep 30
provider, err := newRsyncProvider(c) provider, err := newRsyncProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
initCgroup("/sys/fs/cgroup") cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
provider.AddHook(cg) provider.AddHook(cg)
cg.preExec() err = cg.preExec()
if cgSubsystem == "memory" { if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
if cg.subsystem == "memory" {
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes")) memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024)) So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))

查看文件

@@ -64,6 +64,7 @@ func (p *cmdProvider) Start() error {
"TUNASYNC_MIRROR_NAME": p.Name(), "TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(), "TUNASYNC_WORKING_DIR": p.WorkingDir(),
"TUNASYNC_UPSTREAM_URL": p.upstreamURL, "TUNASYNC_UPSTREAM_URL": p.upstreamURL,
"TUNASYNC_LOG_DIR": p.LogDir(),
"TUNASYNC_LOG_FILE": p.LogFile(), "TUNASYNC_LOG_FILE": p.LogFile(),
} }
for k, v := range p.env { for k, v := range p.env {

查看文件

@@ -38,6 +38,7 @@ type Config struct {
Server serverConfig `toml:"server"` Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"` Cgroup cgroupConfig `toml:"cgroup"`
ZFS zfsConfig `toml:"zfs"` ZFS zfsConfig `toml:"zfs"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"` Include includeConfig `toml:"include"`
Mirrors []mirrorConfig `toml:"mirrors"` Mirrors []mirrorConfig `toml:"mirrors"`
} }
@@ -54,12 +55,20 @@ type globalConfig struct {
} }
type managerConfig struct { type managerConfig struct {
APIBase string `toml:"api_base"` APIBase string `toml:"api_base"`
CACert string `toml:"ca_cert"` // this option overrides the APIBase
ExtraStatusAPIs []string `toml:"extra_status_managers"` APIList []string `toml:"api_base_list"`
CACert string `toml:"ca_cert"`
// Token string `toml:"token"` // Token string `toml:"token"`
} }
func (mc managerConfig) APIBaseList() []string {
if len(mc.APIList) > 0 {
return mc.APIList
}
return []string{mc.APIBase}
}
type serverConfig struct { type serverConfig struct {
Hostname string `toml:"hostname"` Hostname string `toml:"hostname"`
Addr string `toml:"listen_addr"` Addr string `toml:"listen_addr"`
@@ -69,9 +78,16 @@ type serverConfig struct {
} }
type cgroupConfig struct { type cgroupConfig struct {
Enable bool `toml:"enable"` Enable bool `toml:"enable"`
BasePath string `toml:"base_path"` BasePath string `toml:"base_path"`
Group string `toml:"group"` Group string `toml:"group"`
Subsystem string `toml:"subsystem"`
}
type dockerConfig struct {
Enable bool `toml:"enable"`
Volumes []string `toml:"volumes"`
Options []string `toml:"options"`
} }
type zfsConfig struct { type zfsConfig struct {
@@ -111,6 +127,12 @@ type mirrorConfig struct {
Username string `toml:"username"` Username string `toml:"username"`
Password string `toml:"password"` Password string `toml:"password"`
Stage1Profile string `toml:"stage1_profile"` Stage1Profile string `toml:"stage1_profile"`
MemoryLimit string `toml:"memory_limit"`
DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"`
DockerOptions []string `toml:"docker_options"`
} }
// LoadConfig loads configuration // LoadConfig loads configuration

95
worker/docker.go 普通文件
查看文件

@@ -0,0 +1,95 @@
package worker
import (
"fmt"
"os"
)
type dockerHook struct {
emptyHook
provider mirrorProvider
image string
volumes []string
options []string
}
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{}
volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...)
options := []string{}
options = append(options, gCfg.Options...)
options = append(options, mCfg.DockerOptions...)
return &dockerHook{
provider: p,
image: mCfg.DockerImage,
volumes: volumes,
options: options,
}
}
func (d *dockerHook) preExec() error {
p := d.provider
logDir := p.LogDir()
logFile := p.LogFile()
workingDir := p.WorkingDir()
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
logger.Debugf("Making dir %s", workingDir)
if err = os.MkdirAll(workingDir, 0755); err != nil {
return fmt.Errorf("Error making dir %s: %s", workingDir, err.Error())
}
}
// Override workingDir
ctx := p.EnterContext()
ctx.Set(
"volumes", []string{
fmt.Sprintf("%s:%s", logDir, logDir),
fmt.Sprintf("%s:%s", logFile, logFile),
fmt.Sprintf("%s:%s", workingDir, workingDir),
},
)
return nil
}
func (d *dockerHook) postExec() error {
// sh.Command(
// "docker", "rm", "-f", d.Name(),
// ).Run()
d.provider.ExitContext()
return nil
}
// Volumes returns the configured volumes and
// runtime-needed volumes, including mirror dirs
// and log files
func (d *dockerHook) Volumes() []string {
vols := make([]string, len(d.volumes))
copy(vols, d.volumes)
p := d.provider
ctx := p.Context()
if ivs, ok := ctx.Get("volumes"); ok {
vs := ivs.([]string)
vols = append(vols, vs...)
}
return vols
}
func (d *dockerHook) LogFile() string {
p := d.provider
ctx := p.Context()
if iv, ok := ctx.Get(_LogFileKey + ":docker"); ok {
v := iv.(string)
return v
}
return p.LogFile()
}
func (d *dockerHook) Name() string {
p := d.provider
return "tunasync-job-" + p.Name()
}

97
worker/docker_test.go 普通文件
查看文件

@@ -0,0 +1,97 @@
package worker
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/codeskyblue/go-sh"
. "github.com/smartystreets/goconvey/convey"
)
func getDockerByName(name string) (string, error) {
// docker ps -f 'name=$name' --format '{{.Names}}'
out, err := sh.Command(
"docker", "ps",
"--filter", "name="+name,
"--format", "{{.Names}}",
).Output()
return string(out), err
}
func TestDocker(t *testing.T) {
Convey("Docker Should Work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
expectedOutput := "HELLO_WORLD"
c := cmdConfig{
name: "tuna-docker",
upstreamURL: "http://mirrors.tuna.moe/",
command: "/bin/cmd.sh",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"TEST_CONTENT": expectedOutput,
},
}
cmdScriptContent := `#!/bin/sh
echo ${TEST_CONTENT}
sleep 10
`
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
d := &dockerHook{
provider: provider,
image: "alpine",
volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
},
}
provider.AddHook(d)
So(provider.Docker(), ShouldNotBeNil)
err = d.preExec()
So(err, ShouldBeNil)
go func() {
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
}()
time.Sleep(1 * time.Second)
// assert container running
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
So(names, ShouldEqual, d.Name()+"\n")
err = provider.Terminate()
So(err, ShouldBeNil)
// container should be terminated and removed
names, err = getDockerByName(d.Name())
So(err, ShouldBeNil)
So(names, ShouldEqual, "")
// check log content
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")
d.postExec()
})
}

查看文件

@@ -71,6 +71,7 @@ func (h *execPostHook) Do() error {
"TUNASYNC_MIRROR_NAME": p.Name(), "TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(), "TUNASYNC_WORKING_DIR": p.WorkingDir(),
"TUNASYNC_UPSTREAM_URL": p.Upstream(), "TUNASYNC_UPSTREAM_URL": p.Upstream(),
"TUNASYNC_LOG_DIR": p.LogDir(),
"TUNASYNC_LOG_FILE": p.LogFile(), "TUNASYNC_LOG_FILE": p.LogFile(),
"TUNASYNC_JOB_EXIT_STATUS": exitStatus, "TUNASYNC_JOB_EXIT_STATUS": exitStatus,
} }

查看文件

@@ -38,6 +38,8 @@ type mirrorProvider interface {
Cgroup() *cgroupHook Cgroup() *cgroupHook
// ZFS // ZFS
ZFS() *zfsHook ZFS() *zfsHook
// Docker
Docker() *dockerHook
AddHook(hook jobHook) AddHook(hook jobHook)
Hooks() []jobHook Hooks() []jobHook
@@ -169,10 +171,17 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool)) provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
} }
// Add Cgroup Hook // Add Docker Hook
if cfg.Cgroup.Enable { if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
} else if cfg.Cgroup.Enable {
// Add Cgroup Hook
provider.AddHook( provider.AddHook(
newCgroupHook(provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group), newCgroupHook(
provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
),
) )
} }

查看文件

@@ -2,6 +2,7 @@ package worker
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"os/exec" "os/exec"
"strings" "strings"
@@ -9,6 +10,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/codeskyblue/go-sh"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@@ -31,11 +33,44 @@ type cmdJob struct {
func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob { func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob {
var cmd *exec.Cmd var cmd *exec.Cmd
if provider.Cgroup() != nil { if d := provider.Docker(); d != nil {
c := "docker"
args := []string{
"run", "--rm",
"-a", "STDOUT", "-a", "STDERR",
"--name", d.Name(),
"-w", workingDir,
}
// specify user
args = append(
args, "-u",
fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()),
)
// add volumes
for _, vol := range d.Volumes() {
logger.Debugf("volume: %s", vol)
args = append(args, "-v", vol)
}
// set env
for k, v := range env {
kv := fmt.Sprintf("%s=%s", k, v)
args = append(args, "-e", kv)
}
// apply options
args = append(args, d.options...)
// apply image and command
args = append(args, d.image)
// apply command
args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...)
} else if provider.Cgroup() != nil {
c := "cgexec" c := "cgexec"
args := []string{"-g", provider.Cgroup().Cgroup()} args := []string{"-g", provider.Cgroup().Cgroup()}
args = append(args, cmdAndArgs...) args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...) cmd = exec.Command(c, args...)
} else { } else {
if len(cmdAndArgs) == 1 { if len(cmdAndArgs) == 1 {
cmd = exec.Command(cmdAndArgs[0]) cmd = exec.Command(cmdAndArgs[0])
@@ -48,25 +83,28 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
} }
} }
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir) if provider.Docker() == nil {
if _, err := os.Stat(workingDir); os.IsNotExist(err) { logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
logger.Debugf("Making dir %s", workingDir) if _, err := os.Stat(workingDir); os.IsNotExist(err) {
if err = os.MkdirAll(workingDir, 0755); err != nil { logger.Debugf("Making dir %s", workingDir)
logger.Errorf("Error making dir %s", workingDir) if err = os.MkdirAll(workingDir, 0755); err != nil {
logger.Errorf("Error making dir %s: %s", workingDir, err.Error())
}
} }
cmd.Dir = workingDir
cmd.Env = newEnviron(env, true)
} }
cmd.Dir = workingDir
cmd.Env = newEnviron(env, true)
return &cmdJob{ return &cmdJob{
cmd: cmd, cmd: cmd,
workingDir: workingDir, workingDir: workingDir,
env: env, env: env,
provider: provider,
} }
} }
func (c *cmdJob) Start() error { func (c *cmdJob) Start() error {
// logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1) c.finished = make(chan empty, 1)
return c.cmd.Start() return c.cmd.Start()
} }
@@ -95,6 +133,14 @@ func (c *cmdJob) Terminate() error {
if c.cmd == nil || c.cmd.Process == nil { if c.cmd == nil || c.cmd.Process == nil {
return errProcessNotStarted return errProcessNotStarted
} }
if d := c.provider.Docker(); d != nil {
sh.Command(
"docker", "stop", "-t", "2", d.Name(),
).Run()
return nil
}
err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM) err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM)
if err != nil { if err != nil {
return err return err

查看文件

@@ -55,9 +55,6 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
w.httpClient = httpClient w.httpClient = httpClient
} }
if cfg.Cgroup.Enable {
initCgroup(cfg.Cgroup.BasePath)
}
w.initJobs() w.initJobs()
w.makeHTTPServer() w.makeHTTPServer()
tunasyncWorker = w tunasyncWorker = w
@@ -389,18 +386,17 @@ func (w *Worker) URL() string {
} }
func (w *Worker) registorWorker() { func (w *Worker) registorWorker() {
url := fmt.Sprintf(
"%s/workers",
w.cfg.Manager.APIBase,
)
msg := WorkerStatus{ msg := WorkerStatus{
ID: w.Name(), ID: w.Name(),
URL: w.URL(), URL: w.URL(),
} }
if _, err := PostJSON(url, msg, w.httpClient); err != nil { for _, root := range w.cfg.Manager.APIBaseList() {
logger.Errorf("Failed to register worker") url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
}
} }
} }
@@ -416,12 +412,11 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
ErrorMsg: jobMsg.msg, ErrorMsg: jobMsg.msg,
} }
apiBases := []string{w.cfg.Manager.APIBase} for _, root := range w.cfg.Manager.APIBaseList() {
apiBases = append(apiBases, w.cfg.Manager.ExtraStatusAPIs...)
for _, root := range apiBases {
url := fmt.Sprintf( url := fmt.Sprintf(
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name, "%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
) )
logger.Debugf("reporting on manager url: %s", url)
if _, err := PostJSON(url, smsg, w.httpClient); err != nil { if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error()) logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
} }
@@ -430,12 +425,9 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
func (w *Worker) fetchJobStatus() []MirrorStatus { func (w *Worker) fetchJobStatus() []MirrorStatus {
var mirrorList []MirrorStatus var mirrorList []MirrorStatus
apiBase := w.cfg.Manager.APIBaseList()[0]
url := fmt.Sprintf( url := fmt.Sprintf("%s/workers/%s/jobs", apiBase, w.Name())
"%s/workers/%s/jobs",
w.cfg.Manager.APIBase,
w.Name(),
)
if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil { if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
logger.Errorf("Failed to fetch job status: %s", err.Error()) logger.Errorf("Failed to fetch job status: %s", err.Error())