镜像自地址
https://github.com/tuna/tunasync.git
已同步 2025-12-08 15:36:47 +00:00
比较提交
14 次代码提交
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
a065a11b38 | ||
|
|
b4fe4db82a | ||
|
|
839363aaaa | ||
|
|
08aee8eb42 | ||
|
|
501f77ee41 | ||
|
|
9e91fd706e | ||
|
|
94cf0b4bdb | ||
|
|
8fd2059013 | ||
|
|
6b56c4254c | ||
|
|
3872c41607 | ||
|
|
30259da0f0 | ||
|
|
4854d9b981 | ||
|
|
06fce98c00 | ||
|
|
8408236646 |
16
README.md
16
README.md
@@ -40,14 +40,14 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
|
|||||||
# Job Run Process
|
# Job Run Process
|
||||||
|
|
||||||
|
|
||||||
PreSyncing Syncing Success
|
PreSyncing Syncing Success
|
||||||
+-----------+ +-----------+ +-------------+ +--------------+
|
+-----------+ +----------+ +-----------+ +-------------+ +--------------+
|
||||||
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success |
|
| pre-job +--+->| pre-exec +--->| job run +--->| post-exec +-+-->| post-success |
|
||||||
+-----------+ ^ +-----------+ +-------------+ | +--------------+
|
+-----------+ ^ +----------+ +-----------+ +-------------+ | +--------------+
|
||||||
| |
|
| |
|
||||||
| +-----------------+ | Failed
|
| +-----------------+ | Failed
|
||||||
+------+ post-fail |<---------+
|
+----------------+ post-fail |<---------------+
|
||||||
+-----------------+
|
+-----------------+
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
先确定已经给tunasynctl写好config文件:`~/.config/tunasync/ctl.conf`
|
先确定已经给tunasynctl写好config文件:`~/.config/tunasync/ctl.conf`
|
||||||
|
|
||||||
```
|
```toml
|
||||||
manager_addr = "127.0.0.1"
|
manager_addr = "127.0.0.1"
|
||||||
manager_port = 12345
|
manager_port = 12345
|
||||||
ca_cert = ""
|
ca_cert = ""
|
||||||
@@ -10,7 +10,7 @@ ca_cert = ""
|
|||||||
|
|
||||||
接着
|
接着
|
||||||
|
|
||||||
```
|
```shell
|
||||||
$ tunasynctl disable -w <worker_id> <mirror_name>
|
$ tunasynctl disable -w <worker_id> <mirror_name>
|
||||||
$ tunasynctl flush
|
$ tunasynctl flush
|
||||||
```
|
```
|
||||||
@@ -18,8 +18,9 @@ $ tunasynctl flush
|
|||||||
|
|
||||||
## 热重载 `worker.conf`
|
## 热重载 `worker.conf`
|
||||||
|
|
||||||
`$ tunasynctl reload -w <worker_id>`
|
```shell
|
||||||
|
$ tunasynctl reload -w <worker_id>
|
||||||
|
```
|
||||||
|
|
||||||
e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
||||||
|
|
||||||
@@ -29,7 +30,7 @@ e.g. 删除 `test_worker` 的 `elvish` 镜像:
|
|||||||
|
|
||||||
3. 接着操作:
|
3. 接着操作:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
$ tunasynctl reload -w test_worker
|
$ tunasynctl reload -w test_worker
|
||||||
$ tunasynctl disable -w test_worker elvish
|
$ tunasynctl disable -w test_worker elvish
|
||||||
$ tunasynctl flush
|
$ tunasynctl flush
|
||||||
@@ -40,15 +41,53 @@ $ tunasynctl flush
|
|||||||
|
|
||||||
## 删除worker
|
## 删除worker
|
||||||
|
|
||||||
`$ tunasynctl rm-worker -w <worker_id>`
|
```shell
|
||||||
|
$ tunasynctl rm-worker -w <worker_id>
|
||||||
|
```
|
||||||
|
|
||||||
e.g. `$ tunasynctl rm-worker -w test_worker`
|
e.g.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ tunasynctl rm-worker -w test_worker
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## 更新镜像的大小
|
## 更新镜像的大小
|
||||||
|
|
||||||
`$ tunasynctl set-size -w <worker_id> <mirror_name> <size>`
|
```shell
|
||||||
|
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
|
||||||
|
```
|
||||||
|
|
||||||
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
|
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
|
||||||
|
|
||||||
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
|
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
|
||||||
|
|
||||||
|
|
||||||
|
## Btrfs 文件系统快照
|
||||||
|
|
||||||
|
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像,tunasync 在每次成功同步后更新其快照。
|
||||||
|
|
||||||
|
在 `worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[btrfs_snapshot]
|
||||||
|
enable = true
|
||||||
|
snapshot_path = "/path/to/snapshot/directory"
|
||||||
|
```
|
||||||
|
|
||||||
|
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
|
||||||
|
|
||||||
|
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[[mirrors]]
|
||||||
|
name = "elvish"
|
||||||
|
provider = "rsync"
|
||||||
|
upstream = "rsync://rsync.elvish.io/elvish/"
|
||||||
|
interval = 1440
|
||||||
|
snapshot_path = "/data/publish/elvish"
|
||||||
|
```
|
||||||
|
|
||||||
|
**提示:**
|
||||||
|
|
||||||
|
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ func TestHTTPServer(t *testing.T) {
|
|||||||
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
|
||||||
})
|
})
|
||||||
|
|
||||||
Convey("flush disabled jobs", func(ctx C) {
|
Convey("flush disabled jobs", func(ctx C) {
|
||||||
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
|
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
|
||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
clt := &http.Client{}
|
clt := &http.Client{}
|
||||||
|
|||||||
90
worker/btrfs_snapshot_hook.go
普通文件
90
worker/btrfs_snapshot_hook.go
普通文件
@@ -0,0 +1,90 @@
|
|||||||
|
package worker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/dennwc/btrfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type btrfsSnapshotHook struct {
|
||||||
|
provider mirrorProvider
|
||||||
|
mirrorSnapshotPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
|
||||||
|
// TODO: check if the filesystem is Btrfs
|
||||||
|
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
|
||||||
|
mirrorSnapshotPath := mirror.SnapshotPath
|
||||||
|
if mirrorSnapshotPath == "" {
|
||||||
|
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
|
||||||
|
}
|
||||||
|
return &btrfsSnapshotHook{
|
||||||
|
provider: provider,
|
||||||
|
mirrorSnapshotPath: mirrorSnapshotPath,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if path `snapshotPath/providerName` exists
|
||||||
|
// Case 1: Not exists => create a new subvolume
|
||||||
|
// Case 2: Exists as a subvolume => nothing to do
|
||||||
|
// Case 3: Exists as a directory => error detected
|
||||||
|
func (h *btrfsSnapshotHook) preJob() error {
|
||||||
|
path := h.provider.WorkingDir()
|
||||||
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
// create subvolume
|
||||||
|
err := btrfs.CreateSubVolume(path)
|
||||||
|
if err != nil {
|
||||||
|
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Noticef("created new Btrfs subvolume %s", path)
|
||||||
|
} else {
|
||||||
|
if is, err := btrfs.IsSubVolume(path); err != nil {
|
||||||
|
return err
|
||||||
|
} else if !is {
|
||||||
|
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *btrfsSnapshotHook) preExec() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *btrfsSnapshotHook) postExec() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete old snapshot if exists, then create a new snapshot
|
||||||
|
func (h *btrfsSnapshotHook) postSuccess() error {
|
||||||
|
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
|
||||||
|
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if !isSubVol {
|
||||||
|
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
|
||||||
|
}
|
||||||
|
// is old snapshot => delete it
|
||||||
|
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
|
||||||
|
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
|
||||||
|
}
|
||||||
|
// create a new writable snapshot
|
||||||
|
// (the snapshot is writable so that it can be deleted easily)
|
||||||
|
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
|
||||||
|
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep the old snapshot => nothing to do
|
||||||
|
func (h *btrfsSnapshotHook) postFail() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -17,7 +17,6 @@ import (
|
|||||||
|
|
||||||
type cgroupHook struct {
|
type cgroupHook struct {
|
||||||
emptyHook
|
emptyHook
|
||||||
provider mirrorProvider
|
|
||||||
basePath string
|
basePath string
|
||||||
baseGroup string
|
baseGroup string
|
||||||
created bool
|
created bool
|
||||||
@@ -36,7 +35,9 @@ func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit st
|
|||||||
subsystem = "cpu"
|
subsystem = "cpu"
|
||||||
}
|
}
|
||||||
return &cgroupHook{
|
return &cgroupHook{
|
||||||
provider: p,
|
emptyHook: emptyHook{
|
||||||
|
provider: p,
|
||||||
|
},
|
||||||
basePath: basePath,
|
basePath: basePath,
|
||||||
baseGroup: baseGroup,
|
baseGroup: baseGroup,
|
||||||
subsystem: subsystem,
|
subsystem: subsystem,
|
||||||
|
|||||||
@@ -33,14 +33,15 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
|
|||||||
|
|
||||||
// Config represents worker config options
|
// Config represents worker config options
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Global globalConfig `toml:"global"`
|
Global globalConfig `toml:"global"`
|
||||||
Manager managerConfig `toml:"manager"`
|
Manager managerConfig `toml:"manager"`
|
||||||
Server serverConfig `toml:"server"`
|
Server serverConfig `toml:"server"`
|
||||||
Cgroup cgroupConfig `toml:"cgroup"`
|
Cgroup cgroupConfig `toml:"cgroup"`
|
||||||
ZFS zfsConfig `toml:"zfs"`
|
ZFS zfsConfig `toml:"zfs"`
|
||||||
Docker dockerConfig `toml:"docker"`
|
BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
|
||||||
Include includeConfig `toml:"include"`
|
Docker dockerConfig `toml:"docker"`
|
||||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
Include includeConfig `toml:"include"`
|
||||||
|
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type globalConfig struct {
|
type globalConfig struct {
|
||||||
@@ -96,6 +97,11 @@ type zfsConfig struct {
|
|||||||
Zpool string `toml:"zpool"`
|
Zpool string `toml:"zpool"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type btrfsSnapshotConfig struct {
|
||||||
|
Enable bool `toml:"enable"`
|
||||||
|
SnapshotPath string `toml:"snapshot_path"`
|
||||||
|
}
|
||||||
|
|
||||||
type includeConfig struct {
|
type includeConfig struct {
|
||||||
IncludeMirrors string `toml:"include_mirrors"`
|
IncludeMirrors string `toml:"include_mirrors"`
|
||||||
}
|
}
|
||||||
@@ -136,6 +142,8 @@ type mirrorConfig struct {
|
|||||||
DockerImage string `toml:"docker_image"`
|
DockerImage string `toml:"docker_image"`
|
||||||
DockerVolumes []string `toml:"docker_volumes"`
|
DockerVolumes []string `toml:"docker_volumes"`
|
||||||
DockerOptions []string `toml:"docker_options"`
|
DockerOptions []string `toml:"docker_options"`
|
||||||
|
|
||||||
|
SnapshotPath string `toml:"snapshot_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads configuration
|
// LoadConfig loads configuration
|
||||||
|
|||||||
@@ -7,10 +7,9 @@ import (
|
|||||||
|
|
||||||
type dockerHook struct {
|
type dockerHook struct {
|
||||||
emptyHook
|
emptyHook
|
||||||
provider mirrorProvider
|
image string
|
||||||
image string
|
volumes []string
|
||||||
volumes []string
|
options []string
|
||||||
options []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
|
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
|
||||||
@@ -23,10 +22,12 @@ func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dock
|
|||||||
options = append(options, mCfg.DockerOptions...)
|
options = append(options, mCfg.DockerOptions...)
|
||||||
|
|
||||||
return &dockerHook{
|
return &dockerHook{
|
||||||
provider: p,
|
emptyHook: emptyHook{
|
||||||
image: mCfg.DockerImage,
|
provider: p,
|
||||||
volumes: volumes,
|
},
|
||||||
options: options,
|
image: mCfg.DockerImage,
|
||||||
|
volumes: volumes,
|
||||||
|
options: options,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -55,8 +55,10 @@ sleep 10
|
|||||||
So(err, ShouldBeNil)
|
So(err, ShouldBeNil)
|
||||||
|
|
||||||
d := &dockerHook{
|
d := &dockerHook{
|
||||||
provider: provider,
|
emptyHook: emptyHook{
|
||||||
image: "alpine",
|
provider: provider,
|
||||||
|
},
|
||||||
|
image: "alpine",
|
||||||
volumes: []string{
|
volumes: []string{
|
||||||
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
|
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ const (
|
|||||||
|
|
||||||
type execPostHook struct {
|
type execPostHook struct {
|
||||||
emptyHook
|
emptyHook
|
||||||
provider mirrorProvider
|
|
||||||
|
|
||||||
// exec on success or on failure
|
// exec on success or on failure
|
||||||
execOn uint8
|
execOn uint8
|
||||||
@@ -37,9 +36,11 @@ func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*ex
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &execPostHook{
|
return &execPostHook{
|
||||||
provider: provider,
|
emptyHook: emptyHook{
|
||||||
execOn: execOn,
|
provider: provider,
|
||||||
command: cmd,
|
},
|
||||||
|
execOn: execOn,
|
||||||
|
command: cmd,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
|
|||||||
managerChan <- jobMessage{
|
managerChan <- jobMessage{
|
||||||
tunasync.Failed, m.Name(),
|
tunasync.Failed, m.Name(),
|
||||||
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
|
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
|
||||||
false,
|
true,
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,12 +14,13 @@ import (
|
|||||||
|
|
||||||
type logLimiter struct {
|
type logLimiter struct {
|
||||||
emptyHook
|
emptyHook
|
||||||
provider mirrorProvider
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLogLimiter(provider mirrorProvider) *logLimiter {
|
func newLogLimiter(provider mirrorProvider) *logLimiter {
|
||||||
return &logLimiter{
|
return &logLimiter{
|
||||||
provider: provider,
|
emptyHook: emptyHook{
|
||||||
|
provider: provider,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -119,10 +119,10 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
|||||||
env: mirror.Env,
|
env: mirror.Env,
|
||||||
}
|
}
|
||||||
p, err := newCmdProvider(pc)
|
p, err := newCmdProvider(pc)
|
||||||
p.isMaster = isMaster
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
p.isMaster = isMaster
|
||||||
provider = p
|
provider = p
|
||||||
case provRsync:
|
case provRsync:
|
||||||
rc := rsyncConfig{
|
rc := rsyncConfig{
|
||||||
@@ -141,10 +141,10 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
|||||||
retry: mirror.Retry,
|
retry: mirror.Retry,
|
||||||
}
|
}
|
||||||
p, err := newRsyncProvider(rc)
|
p, err := newRsyncProvider(rc)
|
||||||
p.isMaster = isMaster
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
p.isMaster = isMaster
|
||||||
provider = p
|
provider = p
|
||||||
case provTwoStageRsync:
|
case provTwoStageRsync:
|
||||||
rc := twoStageRsyncConfig{
|
rc := twoStageRsyncConfig{
|
||||||
@@ -163,10 +163,10 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
|||||||
retry: mirror.Retry,
|
retry: mirror.Retry,
|
||||||
}
|
}
|
||||||
p, err := newTwoStageRsyncProvider(rc)
|
p, err := newTwoStageRsyncProvider(rc)
|
||||||
p.isMaster = isMaster
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
p.isMaster = isMaster
|
||||||
provider = p
|
provider = p
|
||||||
default:
|
default:
|
||||||
panic(errors.New("Invalid mirror provider"))
|
panic(errors.New("Invalid mirror provider"))
|
||||||
@@ -180,6 +180,11 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
|||||||
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
|
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add Btrfs Snapshot Hook
|
||||||
|
if cfg.BtrfsSnapshot.Enable {
|
||||||
|
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
|
||||||
|
}
|
||||||
|
|
||||||
// Add Docker Hook
|
// Add Docker Hook
|
||||||
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
|
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
|
||||||
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
|
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ func TestWorker(t *testing.T) {
|
|||||||
} else if sch, ok := data.(MirrorSchedules); ok {
|
} else if sch, ok := data.(MirrorSchedules); ok {
|
||||||
So(len(sch.Schedules), ShouldEqual, 0)
|
So(len(sch.Schedules), ShouldEqual, 0)
|
||||||
}
|
}
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(2 * time.Second):
|
||||||
So(registered, ShouldBeTrue)
|
So(registered, ShouldBeTrue)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -178,7 +178,7 @@ func TestWorker(t *testing.T) {
|
|||||||
So(status.Status, ShouldNotEqual, Failed)
|
So(status.Status, ShouldNotEqual, Failed)
|
||||||
lastStatus = status.Status
|
lastStatus = status.Status
|
||||||
}
|
}
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(2 * time.Second):
|
||||||
So(url, ShouldNotEqual, "")
|
So(url, ShouldNotEqual, "")
|
||||||
So(jobRunning, ShouldBeFalse)
|
So(jobRunning, ShouldBeFalse)
|
||||||
So(lastStatus, ShouldEqual, Success)
|
So(lastStatus, ShouldEqual, Success)
|
||||||
@@ -239,7 +239,7 @@ func TestWorker(t *testing.T) {
|
|||||||
}
|
}
|
||||||
lastStatus[status.Name] = status.Status
|
lastStatus[status.Name] = status.Status
|
||||||
}
|
}
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(2 * time.Second):
|
||||||
So(len(lastStatus), ShouldEqual, 3)
|
So(len(lastStatus), ShouldEqual, 3)
|
||||||
So(len(nextSch), ShouldEqual, 3)
|
So(len(nextSch), ShouldEqual, 3)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -11,14 +11,15 @@ import (
|
|||||||
|
|
||||||
type zfsHook struct {
|
type zfsHook struct {
|
||||||
emptyHook
|
emptyHook
|
||||||
provider mirrorProvider
|
zpool string
|
||||||
zpool string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
|
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
|
||||||
return &zfsHook{
|
return &zfsHook{
|
||||||
provider: provider,
|
emptyHook: emptyHook{
|
||||||
zpool: zpool,
|
provider: provider,
|
||||||
|
},
|
||||||
|
zpool: zpool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,12 +41,12 @@ func (z *zfsHook) printHelpMessage() {
|
|||||||
func (z *zfsHook) preJob() error {
|
func (z *zfsHook) preJob() error {
|
||||||
workingDir := z.provider.WorkingDir()
|
workingDir := z.provider.WorkingDir()
|
||||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||||
logger.Errorf("Directory %s doesn't exist", workingDir);
|
logger.Errorf("Directory %s doesn't exist", workingDir)
|
||||||
z.printHelpMessage()
|
z.printHelpMessage()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
|
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
|
||||||
logger.Errorf("%s is not a mount point", workingDir);
|
logger.Errorf("%s is not a mount point", workingDir)
|
||||||
z.printHelpMessage()
|
z.printHelpMessage()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户