1
0
镜像自地址 https://github.com/tuna/tunasync.git 已同步 2025-12-06 22:46:47 +00:00

162 次代码提交

作者 SHA1 备注 提交日期
zyx
0392ef28c7 bump version to 0.6.3 2020-05-25 19:21:27 +08:00
zyx
b2a22a9bbc update editor config 2020-05-25 19:16:53 +08:00
zyx
31862210ba implement the timeout 2020-05-25 19:15:05 +08:00
zyx
e47ba2097e add a timeout field to providers 2020-05-25 18:24:05 +08:00
zyx
e8c7ff3d7f config items of timeout 2020-05-25 18:08:31 +08:00
Yuxiang Zhang
7e7b469f1e Update workers.conf 2020-05-23 15:28:32 +08:00
Yuxiang Zhang
eac66c7554 add config examples of the worker (#118) 2020-05-23 15:23:15 +08:00
z4yx
38b0156fae [bug fix] provider is not terminated if premature stop command received 2020-05-09 18:42:54 +08:00
z4yx
c8e7d29f34 bump version to 0.6.2 2020-04-08 20:12:41 +08:00
Yuxiang Zhang
d40638d738 Merge pull request #116 from BITNP/laststarted
Add MirrorStatus.LastStarted property
2020-04-06 23:01:58 +08:00
Phy
471d865042 Add LastStarted test case 2020-04-05 01:07:46 -04:00
Phy
c1641b6714 Add MirrorStatus.LastStarted property
- status.Status is in PreSyncing, and
- curStatus.Status is not in PreSyncing
2020-04-05 00:12:10 -04:00
z4yx
b8edc1f714 bump version to 0.6 2020-03-29 12:48:29 +08:00
z4yx
001703a059 CI runs slower, give it more time 2020-03-29 12:01:39 +08:00
z4yx
2bbd4afda8 remove logger.Error() 2020-03-29 11:54:39 +08:00
z4yx
e8e6ab6ed6 Merge branch 'wip-newlog' 2020-03-29 11:47:53 +08:00
Yuxiang Zhang
3fed3f1cf3 Merge pull request #114 from tuna/nest_mirror
Support nested mirror config
2020-03-29 11:44:40 +08:00
z4yx
1491b6c42b format the code 2020-03-29 09:06:19 +08:00
Miao Wang
7a9895350b Support nested mirror config 2020-03-29 00:24:58 +08:00
z4yx
95d6acb026 tunasynctl: print command results with plain text instead of logging messages 2020-03-28 17:07:53 +08:00
z4yx
b132192448 Add a debugging log level to tunasynctl 2020-03-28 16:33:56 +08:00
z4yx
91209cab60 translate rsync exit code to error message (solve #20). May help #109 and #110 2020-03-28 16:26:40 +08:00
z4yx
1fb9f85862 closing log files where they were opened 2020-03-28 16:26:40 +08:00
Yuxiang Zhang
d10387e40b Merge pull request #112 from BITNP/cli-logging
Use proper logging for some debug output
2020-03-23 22:21:08 +08:00
Phy
5c01e3fa22 Use fmt.Println for cli JSON output 2020-03-23 10:19:55 -04:00
Phy
a44891d3e8 Set proper logging level on tunasynctl-cmd 2020-03-23 01:21:16 -04:00
Phy
4d461bd172 Use logger to print some debug messages than fmt.print 2020-03-23 01:20:49 -04:00
zyx
c5ed682a49 Bump version to 0.5.1 2020-03-20 10:39:34 +08:00
zyx
2c33380ce0 fix util_test 2020-03-20 10:35:54 +08:00
zyx
70cb22096f Merge branch 'master' of github.ip4.run:tuna/tunasync 2020-03-20 10:30:53 +08:00
zyx
b1f2679fbf [cmd provider] add support of match size in logs 2020-03-20 10:30:44 +08:00
Yuxiang Zhang
92a255fd3c Update tunasync.yml 2020-03-16 22:43:41 +08:00
zyx
aee1a705b7 remove "--contimeout=120" from default rsync options 2020-03-16 22:23:47 +08:00
zyx
c99916cc2a Bump version to 0.4.3 2020-03-16 22:03:40 +08:00
zyx
9eb72c5db0 fix misuse of variables 2020-03-16 21:59:34 +08:00
z4yx
b490c22984 add test of rsyncEnv 2020-03-16 21:16:23 +08:00
z4yx
ae5ff25d20 in case rsyncEnv is nil 2020-03-16 21:11:15 +08:00
z4yx
365f49e6d3 add support of env config for rsync provider 2020-03-16 20:59:08 +08:00
z4yx
fddb793ca1 v0.4.2 2020-03-14 11:30:44 +08:00
z4yx
c41d7a4038 Bring docker test back
commit 4540ba24c72cb2d24e2e04870025dfa233dedf30
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 11:16:13 2020 +0800

    wait longer

commit c8f07b81a7fe5fdef9224e8bc187500c4d67f049
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:55:49 2020 +0800

    try to Terminate

commit 10d2d4b9d0756cf8f60fe27e1e41ae29b5ea6cbe
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:50:26 2020 +0800

    forward the error

commit 38c96ee44d31088b9e6de67ebb745358fac8d49a
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:31:39 2020 +0800

    now enable the assertion

commit 3b3c46a065a035d906d4cc5022d42e30b1f52a08
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:26:40 2020 +0800

    rm un-related info

commit dd7ef7e3d0a0765c1fc48296d70966b3b4d581dd
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:12:01 2020 +0800

    print err of provider.Run

commit 49a7b57dbf52d410c0dfe796be9c2f6213884931
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:55:48 2020 +0800

    wait until it exits

commit a3e8f699072e3252b3300c667f1425a966aedb39
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:54:19 2020 +0800

    targeting alpine:3.8

commit f30b8565049bb373a1a91a34ad07c8c3df8e1036
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:47:27 2020 +0800

    see what happens

commit 8c21229a8be8e2ac0737bbc4bb88ba54e9fb7a20
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:30:50 2020 +0800

    remove one assertion

commit 123368e6ef07aa63c489bb49bdf370d3abdd17bb
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:32:45 2020 +0800

    docker test somehow works now

commit 94fa294a9bbedb569e6dd9cc7e4f27e73ed97443
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:27:12 2020 +0800

    should use -d

commit b35bae2a9cb5e006c513da95377ab9487fc4341a
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:22:25 2020 +0800

    docker run not working??

commit 9aea0036f434d333087f0cff3ce5165a53554e5f
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:12:39 2020 +0800

    test if docker works

commit f92578b159587a8bbda296bbf9261fb4c5e2f186
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 17:42:00 2020 +0800

    debugging docker_test

commit b649e32f76549711af597ce3a642309a41a08bf9
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 17:27:55 2020 +0800

    Revert "remove docker_test.go"

    This reverts commit a517a4bb64.
2020-03-14 11:23:19 +08:00
z4yx
8b0ef2bb53 fix the test 2020-03-14 11:11:10 +08:00
z4yx
b25be80670 extra options should only be applied to the second stage 2020-03-14 11:01:34 +08:00
z4yx
07cb51076e Bump version to 0.4.1 2020-03-13 17:53:28 +08:00
z4yx
3a2888dd5d Bye Travis! 2020-03-13 17:24:57 +08:00
z4yx
ada881850a maybe we don't have to install docker.io 2020-03-13 17:17:02 +08:00
z4yx
6f51188021 create release on tags 2020-03-13 17:14:49 +08:00
z4yx
a517a4bb64 remove docker_test.go 2020-03-13 17:08:39 +08:00
z4yx
b816803eaf Revert "disable docker_test"
This reverts commit 6d17d6b4ca.
2020-03-13 17:08:19 +08:00
z4yx
6d17d6b4ca disable docker_test 2020-03-13 17:04:36 +08:00
z4yx
51e7f1d573 add TestRsyncProviderWithOverriddenOptions 2020-03-13 17:02:38 +08:00
z4yx
c99095267e [docker test] Wait for docker running 2020-03-13 16:57:07 +08:00
z4yx
5c140035ec [worker teset] Wait for http server starting 2020-03-13 16:41:30 +08:00
z4yx
6ef9ccdfe6 unmask docker 2020-03-13 16:00:55 +08:00
z4yx
8df5e41d5b systemctl start docker 2020-03-13 15:57:36 +08:00
z4yx
a38a88cf41 run apt update 2020-03-13 15:49:39 +08:00
z4yx
f603aebec9 add test steps 2020-03-13 15:46:56 +08:00
z4yx
80ad3247a0 fix build 2020-03-13 15:16:39 +08:00
z4yx
02468e21c0 add an option "rsync_override" 2020-03-13 15:12:52 +08:00
z4yx
d48815b817 update the condition 2020-03-13 15:06:26 +08:00
z4yx
07cd7b5f1f update go.mod 2020-03-13 15:04:06 +08:00
z4yx
3f45e8b02b Merge commit 'ad28e8aacc124ffb751cc77a5e3c3a3ad8d0a97c' into wip-ga 2020-03-13 15:03:39 +08:00
Yuxiang Zhang
ed1f20b1e6 Hello Github Actions 2020-03-13 14:59:56 +08:00
z4yx
ad28e8aacc using Go Modules 2020-03-13 13:55:57 +08:00
Yuxiang Zhang
230d63e871 Merge pull request #108 from tuna/wip-fail-on-match (close #87)
[mirror config] job fails on pattern match in log
2020-03-09 21:54:34 +08:00
z4yx
908f098c72 [mirror config] job fails on pattern match in log 2020-03-09 21:48:06 +08:00
Yuxiang Zhang
22cfdfc9c2 [mirror config] extra rsync options (#107)
* support "rsync_options" array in config

* add test for new options

* fix tests
2020-03-09 20:48:09 +08:00
z4yx
36010dc33e Merge branch 'master' of github.com:tuna/tunasync 2019-11-30 00:08:59 +08:00
z4yx
bc416a6088 add tests for post-hooks 2019-11-30 00:08:46 +08:00
z4yx
a065a11b38 change timeout in tests 2019-11-07 12:29:57 +08:00
z4yx
b4fe4db82a Merge remote-tracking branch 'origin/dev' 2019-11-04 23:11:34 +08:00
z4yx
839363aaaa reschedule the job if any hook fails 2019-11-04 22:52:03 +08:00
Yuxiang Zhang
08aee8eb42 Merge pull request #98 from ziqin/feature/btrfs-snapshot
Reimplement Btrfs snapshots hook
2019-08-31 10:57:44 +08:00
Jeeken Wang
501f77ee41 Merge branch 'master' into feature/btrfs-snapshot 2019-08-15 01:26:28 +08:00
z4yx
9e91fd706e Merge branch 'dev' 2019-08-13 23:10:43 +08:00
z4yx
94cf0b4bdb fix possible null dereferencing, reported by #96 2019-08-13 23:07:01 +08:00
WANG Ziqin
8fd2059013 add doc for setup btrfs snapshots 2019-08-02 13:31:33 +08:00
WANG Ziqin
6b56c4254c feat(btrfs_snapshot_hook): reimplemented Btrfs snapshots
TODO: test coverage
2019-08-02 13:31:33 +08:00
Yuxiang Zhang
3872c41607 Merge pull request #97 from ziqin/master
Refine: remove outer `provider`s which shadow the embedded `provider`s provided by `emptyHook`
2019-08-02 09:27:04 +08:00
WANG Ziqin
30259da0f0 fix nil pointer dereference: check err first 2019-08-02 02:15:22 +08:00
WANG Ziqin
4854d9b981 Fix test: initialize dockerHook with embedded provider 2019-07-31 17:29:28 +08:00
WANG Ziqin
06fce98c00 Eliminate duplicate mirrorProvider in Hooks 2019-07-31 16:11:56 +08:00
Jeeken Wang
8408236646 Update "Job Run Process" diagram according to runJobWrapper 2019-07-31 12:26:09 +08:00
z4yx
540eea8aeb set golang version to 1.11 2019-07-05 16:54:29 +08:00
z4yx
a6fc97889d [bug fix] stalled scheduler if post-sync hook runs for a time which is longer than the sync interval 2019-07-05 16:29:00 +08:00
Yuxiang Zhang
5f7d974469 Merge pull request #93 from vgxbj/patch-1
Fix ascii chart for `Job Run Process`
2019-05-30 10:16:22 +08:00
Guō Xīng
3b52f93e7e Fix ascii chart for Job Run Process 2019-05-29 14:32:50 +08:00
zyx
1025189542 fix possible null dereferencing in server_test 2019-04-13 11:13:17 +08:00
zyx
9f91d90fc5 check Retry configuration in providers 2019-04-13 11:01:56 +08:00
zyx
1aa4ae9cc1 Merge remote-tracking branch 'kinosang/master' into wip-test-pr 2019-04-13 02:07:41 +08:00
zyx
d0deeb19a9 extract mirror size from rsync provider automatically 2019-04-13 01:27:35 +08:00
zyx
a283328dc4 increase test converage of worker 2019-04-12 09:43:57 +08:00
zyx
1890bbed3c add tests for last commit 2019-04-11 12:36:43 +08:00
zyx
ddc9efd155 report next scheduled sync time 2019-04-11 12:36:18 +08:00
zyx
7eb119b892 singleton of worker is not used, so remove it 2019-04-11 10:07:42 +08:00
zyx
96f11f57ed throw an error if executing reload command without worker id 2019-04-09 22:30:08 +08:00
Yuxiang Zhang
3e6e6f9b14 Update tips.md 2019-04-07 21:48:57 +08:00
Yuxiang Zhang
b06cadfe06 Update tips.md 2019-04-07 21:48:00 +08:00
Yuxiang Zhang
9c34372ae4 add link to tips.md 2019-04-07 21:35:40 +08:00
Yuxiang Zhang
ebbfff40f6 Merge pull request #91 from SCU-MingYuan/master
Added some control tips
2019-04-07 21:33:33 +08:00
GaryH4
5eeade22fc Update tips.md 2019-04-07 19:55:13 +08:00
GaryH4
4b3741308b Update tips.md 2019-04-06 23:48:33 +08:00
GaryH4
7d495c1956 Update tips.md 2019-04-06 23:40:43 +08:00
GaryH4
0bf8400077 Added some tips 2019-04-06 23:30:04 +08:00
Yuxiang Zhang
c611759394 Update get_started.md 2019-04-06 11:21:22 +08:00
Yuxiang Zhang
279aa32b68 Update get_started.md 2019-04-06 11:09:24 +08:00
Yuxiang Zhang
025544449a remove section of certificate generation 2019-04-06 10:56:38 +08:00
zyx
90d419ca66 add tests for last commit 2019-03-31 12:16:45 +08:00
zyx
96cb975412 Let user create ZFS dataset manually due to security considerations 2019-03-31 12:09:42 +08:00
王邈
ff3e690497 Revert "change owner of folder to current user after creating zfs dataset (close #89)"
This reverts commit a58e6d37ae and
re-opens #89.

Signed-off-by: 王邈 <shankerwangmiao@gmail.com>
2019-03-26 00:30:06 +08:00
zyx
a58e6d37ae change owner of folder to current user after creating zfs dataset (close #89) 2019-03-25 23:40:04 +08:00
zhang
7a4a8ad486 Merge branch 'master' of github.com:tuna/tunasync 2018-10-25 22:52:21 +08:00
zhang
e1c0c25efa add example of worker config 2018-10-25 22:52:02 +08:00
z4yx
9ac374527a regenerate travis deploy key 2018-10-25 17:27:32 +08:00
z4yx
f03626d4e1 update Get Started document 2018-10-25 17:23:02 +08:00
z4yx
23bf4890cc bump version to v0.3.2 2018-10-25 17:07:04 +08:00
z4yx
2f6a61aee5 increse test coverage 2018-10-25 17:02:05 +08:00
z4yx
b6043142e1 test if it works with golang 1.8 2018-10-25 16:16:04 +08:00
zhang
6241576b12 bug fix: tunasynctl failed to parse datetime when you list jobs of specific worker 2018-06-13 10:28:48 +08:00
bigeagle
ef78563b8c Merge pull request #74 from houbaron/patch-1
Update README.md
2018-05-31 21:25:55 +08:00
bigeagle
ca106f1360 Merge pull request #82 from tuna/dev
New feature: remove a worker with tunasynctl
2018-05-31 21:22:46 +08:00
Miao Wang
628266ac5a Merge pull request #81 from tuna/wip-override-concurrent-limit
New feature: run "tunasynctl start" with "-f" to override the limit of concurrent jobs
2018-05-31 14:22:03 +08:00
Yuxiang Zhang
7e601d9fff New feature: remove a worker with tunasynctl
Fix #78
2018-05-31 12:32:22 +08:00
z4yx
c750aa1871 new feature: run "tunasynctl start" with "-f" to override concurrent job limit 2018-05-30 18:59:24 +08:00
Yuxiang Zhang
6cbe91b4f1 new command: jobForceStart 2018-05-30 16:07:07 +08:00
Yuxiang Zhang
89a792986d increase test coverage rate of job & provider 2018-05-30 14:00:10 +08:00
Yuxiang Zhang
0fdb07d061 bug fix: log over-written in twoStageRsyncProvider
solve more DATA RACE problem
2018-05-30 12:28:09 +08:00
Yuxiang Zhang
c5bb172f99 increase test coverage rate of job.go 2018-05-30 11:45:05 +08:00
Yuxiang Zhang
79e6167028 fix race condition on logFile of baseProvider 2018-05-30 01:46:16 +08:00
Miao Wang
285ffb2f2f Merge pull request #80 from tuna/dev
Fix the "list" command of tunasynctl
2018-05-29 21:42:57 +08:00
Yuxiang Zhang
95bb4bbd5e report the last ended time (updated whether successful or not) of jobs 2018-05-29 21:21:03 +08:00
Yuxiang Zhang
6bca9d2cd5 fix TestHTTPServer in manager package 2018-05-29 19:07:01 +08:00
Yuxiang Zhang
4fe7d03e54 Move the WebMirrorStatus to internal package. Fix the list command of tunasynctl 2018-05-29 18:48:33 +08:00
Baron Hou
1fe9499728 Update README.md 2017-09-29 18:14:11 +08:00
bigeagle
a475b044c6 feat(worker): add 'use_ipv4' option for rsync provider 2017-09-08 00:15:48 +08:00
bigeagle
a50a360a91 Revert "feat(worker): add '-4' option to rsync when 'use_ipv6' is false"
This reverts commit d536aca2ac.
2017-09-08 00:12:40 +08:00
bigeagle
d536aca2ac feat(worker): add '-4' option to rsync when 'use_ipv6' is false 2017-09-06 23:22:55 +08:00
bigeagle
28545d61e7 Merge pull request #68 from l2dy/master
Update README.md
2017-05-29 11:03:27 -05:00
Zero King
a87fb0f8b4 Update README.md 2017-05-29 15:42:10 +00:00
Jason Lau
095e7c6320 Merge pull request #65 from felixonmars/patch-1
Fix a typo: Fisrt -> First
2017-03-30 15:31:46 +08:00
Felix Yan
7b441312f4 Fix a typo: Fisrt -> First 2017-03-30 13:27:40 +08:00
7IN0SAN9
563860d424 fix #63 2017-03-27 13:09:56 +08:00
bigeagle
93194cde2e Merge pull request #60 from tuna/dev
Dev
2016-12-19 01:10:38 +08:00
bigeagle
aa4c31a32b feat(tunasynctl): implemented 'set-size' command to update a mirror size 2016-12-18 23:30:41 +08:00
bigeagle
4c6a407c17 feat(manager): implemented restful API for updating mirror size 2016-12-18 23:06:08 +08:00
bigeagle
939abaef9b feat(worker): TUNASYNC_LOG_DIR environment variable 2016-12-18 20:41:26 +08:00
bigeagle
d5a438462f feat(worker): map current uid and gid to docker 2016-12-18 14:28:48 +08:00
bigeagle
d4e07a7b29 fix(worker): keep the same working dir inside and outside of docker 2016-12-18 14:28:32 +08:00
bigeagle
9ac3193d50 Merge pull request #58 from tuna/dev
Dev
2016-12-12 23:46:22 +08:00
bigeagle
9ffb101cc7 chore(tunasync): bump version to 0.2-dev 2016-12-12 23:23:06 +08:00
bigeagle
fd277388d5 fix(worker): fixed multi-manager configuration
the worker must be registerred on the manager

`extra_status_manager` option is replaced by `api_base_list`, which overrides the `api_base` option
2016-12-12 23:17:50 +08:00
bigeagle
c5cba66786 Merge pull request #57 from tuna/dev
fix(cmd): make tunasynctl work with both HTTP and HTTPS
2016-12-11 02:45:19 +08:00
bigeagle
97e9725774 fix(cmd): make tunasynctl work with both HTTP and HTTPS 2016-12-11 02:13:19 +08:00
bigeagle
54740388b3 Merge pull request #56 from tuna/dev
Dev
2016-12-10 04:18:46 +08:00
bigeagle
7601e5793f fix(worker): improved cgroup creation 2016-12-10 04:14:39 +08:00
bigeagle
9645fd44ec ci(travis): Enabled docker on travis 2016-12-10 03:48:03 +08:00
bigeagle
ebd462be36 feat(worker): Implemented docker executor, close #55
if docker is enabled in configure file and `docker_image` is set on mirror config, the command would

be executed via `docker run ...`
2016-12-10 02:44:45 +08:00
bigeagle
21c832c8fb fix(worker): disabled memory limit
rsync memory is nolonger limited
2016-12-09 23:07:05 +08:00
bigeagle
81a15e7dd1 Merge pull request #54 from tuna/dev
Dev
2016-12-07 00:11:34 +08:00
bigeagle
3f31e83c14 feat(manager): let illegal status records be flushed with disabled jobs 2016-12-07 00:08:16 +08:00
bigeagle
a0b8ef08ab feat(worker): implemented extra_status_manager option to enable a worker reporting status to multi 2016-12-06 23:59:15 +08:00
bigeagle
86153c59e3 feat(worker): ZFS support: isolate mirrors in zfs datasets 2016-12-05 00:44:55 +08:00
bigeagle
96f9db8bb8 fix(worker): extended rsync memory limit to 512MB 2016-12-04 22:56:48 +08:00
共有 54 个文件被更改,包括 4277 次插入605 次删除

54
.github/workflows/release.yml vendored 普通文件
查看文件

@@ -0,0 +1,54 @@
name: release
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
tar -jcf build/tunasync-linux-bin.tar.bz2 -C build tunasync tunasynctl
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./build/tunasync-linux-bin.tar.bz2
asset_name: tunasync-linux-bin.tar.bz2
asset_content_type: application/x-bzip2

73
.github/workflows/tunasync.yml vendored 普通文件
查看文件

@@ -0,0 +1,73 @@
name: tunasync
on: [push]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
- name: Keep artifacts
uses: actions/upload-artifact@v1
with:
name: tunasync-bin
path: build/
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Setup test dependencies
run: |
sudo apt-get update
sudo apt-get install -y cgroup-bin
docker pull alpine:3.8
lssubsys -am
sudo cgcreate -a $USER -t $USER -g cpu:tunasync
sudo cgcreate -a $USER -t $USER -g memory:tunasync
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Run Unit tests.
run: make test
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.0
with:
infile: profile.cov
outfile: coverage.lcov
- name: Coveralls
uses: coverallsapp/github-action@v1.0.1
with:
github-token: ${{ secrets.github_token }}
path-to-lcov: coverage.lcov

查看文件

@@ -1,30 +0,0 @@
#!/bin/bash
function die() {
echo $*
exit 1
}
export GOPATH=`pwd`:$GOPATH
make travis
# Initialize profile.cov
echo "mode: count" > profile.cov
# Initialize error tracking
ERROR=""
# Test each package and append coverage profile info to profile.cov
for pkg in `cat .testpackages.txt`
do
go test -v -covermode=count -coverprofile=profile_tmp.cov $pkg || ERROR="Error testing $pkg"
[ -f profile_tmp.cov ] && {
tail -n +2 profile_tmp.cov >> profile.cov || die "Unable to append coverage for $pkg"
}
done
if [ ! -z "$ERROR" ]
then
die "Encountered error, last error was: $ERROR"
fi

查看文件

@@ -1,35 +0,0 @@
language: go
go:
- 1.6
before_install:
- sudo apt-get install cgroup-bin
- go get github.com/smartystreets/goconvey
- go get golang.org/x/tools/cmd/cover
- go get -v github.com/mattn/goveralls
os:
- linux
before_script:
- sudo cgcreate -t travis -a travis -g memory:tunasync
script:
- ./.testandcover.bash
after_success:
- goveralls -coverprofile=profile.cov -service=travis-ci
before_deploy: "echo 'ready to deploy?'"
deploy:
provider: releases
file:
- "build/tunasync-linux-bin.tar.gz"
api_key:
secure: "F9kaVaR1mxEh2+EL9Nm8GZmbVY98pXCJA0LGDNrq1C2vU61AUNOeX6yI1mMklHNZPLBqoFDvGN1M5HnJ+xWCFH+KnJgLD2GVIAcAxFNpcNWQe8XKE5heklNsIQNQfuh/rJKM6YzeDB9G5RN4Y76iL4WIAXhNnMm48W6jLnWhf70="
skip_cleanup: true
overwrite: true
on:
tags: true
all_branches: true

13
.vscode/settings.json vendored 普通文件
查看文件

@@ -0,0 +1,13 @@
{
"cSpell.words": [
"Btrfs",
"Debugf",
"Infof",
"Noticef",
"Warningf",
"cgroup",
"mergo",
"tmpl",
"zpool"
]
}

查看文件

@@ -2,8 +2,6 @@ LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`
all: get tunasync tunasynctl all: get tunasync tunasynctl
travis: get tunasync tunasynctl travis-package
get: get:
go get ./cmd/tunasync go get ./cmd/tunasync
go get ./cmd/tunasynctl go get ./cmd/tunasynctl
@@ -17,5 +15,5 @@ tunasync: build
tunasynctl: build tunasynctl: build
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
travis-package: tunasync tunasynctl test:
tar zcf build/tunasync-linux-bin.tar.gz -C build tunasync tunasynctl go test -v -covermode=count -coverprofile=profile.cov ./...

查看文件

@@ -1,8 +1,8 @@
tunasync tunasync
======== ========
[![Build Status](https://travis-ci.org/tuna/tunasync.svg?branch=dev)](https://travis-ci.org/tuna/tunasync) ![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=dev)](https://coveralls.io/github/tuna/tunasync?branch=dev) [![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master)
[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/) [![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)
![GPLv3](https://img.shields.io/badge/license-GPLv3-blue.svg) ![GPLv3](https://img.shields.io/badge/license-GPLv3-blue.svg)
@@ -19,7 +19,7 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
``` ```
# Architecture # Architecture
- Manager: Centural instance on status and job management - Manager: Central instance for status and job management
- Worker: Runs mirror jobs - Worker: Runs mirror jobs
+------------+ +---+ +---+ +------------+ +---+ +---+
@@ -40,84 +40,23 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
# Job Run Process # Job Run Process
PreSyncing Syncing Success PreSyncing Syncing Success
+-----------+ +-----------+ +-------------+ +--------------+ +-----------+ +----------+ +-----------+ +-------------+ +--------------+
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success | | pre-job +--+->| pre-exec +--->| job run +--->| post-exec +-+-->| post-success |
+-----------+ ^ +-----------+ +-------------+ | +--------------+ +-----------+ ^ +----------+ +-----------+ +-------------+ | +--------------+
| | | |
| +-----------------+ | Failed | +-----------------+ | Failed
+------+ post-fail |<---------+ +----------------+ post-fail |<---------------+
+-----------------+ +-----------------+
``` ```
## Generate Self-Signed Certificate
Fisrt, create root CA
```
openssl genrsa -out rootCA.key 2048
openssl req -x509 -new -nodes -key rootCA.key -days 365 -out rootCA.crt
```
Create host key
```
openssl genrsa -out host.key 2048
```
Now create CSR, before that, write a `req.cnf`
```
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
[req_distinguished_name]
countryName = Country Name (2 letter code)
countryName_default = CN
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default = BJ
localityName = Locality Name (eg, city)
localityName_default = Beijing
organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_default = TUNA
commonName = Common Name (server FQDN or domain name)
commonName_default = <server_FQDN>
commonName_max = 64
[v3_req]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = <server_FQDN_1>
DNS.2 = <server_FQDN_2>
```
Substitute `<server_FQDN>` with your server's FQDN, then run
```
openssl req -new -key host.key -out host.csr -config req.cnf
```
Finally generate and sign host cert with root CA
```
openssl x509 -req -in host.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out host.crt -days 365 -extensions v3_req -extfile req.cnf
```
## Building ## Building
Setup GOPATH like [this](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable). Go version: 1.13
Then:
``` ```
go get -d github.com/tuna/tunasync/cmd/tunasync make all
cd $GOPATH/src/github.com/tuna/tunasync
make
``` ```
If you have multiple `GOPATH`s, replace the `$GOPATH` with your first one. Binaries in the `build/`.

查看文件

@@ -11,7 +11,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/profile" "github.com/pkg/profile"
"gopkg.in/op/go-logging.v1" "gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli"
tunasync "github.com/tuna/tunasync/internal" tunasync "github.com/tuna/tunasync/internal"
"github.com/tuna/tunasync/manager" "github.com/tuna/tunasync/manager"
@@ -60,7 +60,7 @@ func startWorker(c *cli.Context) error {
os.Exit(1) os.Exit(1)
} }
w := worker.GetTUNASyncWorker(cfg) w := worker.NewTUNASyncWorker(cfg)
if w == nil { if w == nil {
logger.Errorf("Error intializing TUNA sync worker.") logger.Errorf("Error intializing TUNA sync worker.")
os.Exit(1) os.Exit(1)
@@ -134,7 +134,7 @@ func main() {
app.Name = "tunasync" app.Name = "tunasync"
app.Usage = "tunasync mirror job management tool" app.Usage = "tunasync mirror job management tool"
app.EnableBashCompletion = true app.EnableBashCompletion = true
app.Version = "0.1" app.Version = tunasync.Version
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
Name: "manager", Name: "manager",

查看文件

@@ -11,8 +11,8 @@ import (
"time" "time"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/urfave/cli"
"gopkg.in/op/go-logging.v1" "gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1"
tunasync "github.com/tuna/tunasync/internal" tunasync "github.com/tuna/tunasync/internal"
) )
@@ -32,7 +32,7 @@ const (
userCfgFile = "$HOME/.config/tunasync/ctl.conf" // user-specific conf userCfgFile = "$HOME/.config/tunasync/ctl.conf" // user-specific conf
) )
var logger = logging.MustGetLogger("tunasynctl-cmd") var logger = logging.MustGetLogger("tunasynctl")
var baseURL string var baseURL string
var client *http.Client var client *http.Client
@@ -41,7 +41,7 @@ func initializeWrapper(handler cli.ActionFunc) cli.ActionFunc {
return func(c *cli.Context) error { return func(c *cli.Context) error {
err := initialize(c) err := initialize(c)
if err != nil { if err != nil {
return cli.NewExitError("", 1) return cli.NewExitError(err.Error(), 1)
} }
return handler(c) return handler(c)
} }
@@ -55,8 +55,9 @@ type config struct {
func loadConfig(cfgFile string, cfg *config) error { func loadConfig(cfgFile string, cfg *config) error {
if cfgFile != "" { if cfgFile != "" {
logger.Infof("Loading config: %s", cfgFile)
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil { if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
logger.Errorf(err.Error()) // logger.Errorf(err.Error())
return err return err
} }
} }
@@ -66,7 +67,7 @@ func loadConfig(cfgFile string, cfg *config) error {
func initialize(c *cli.Context) error { func initialize(c *cli.Context) error {
// init logger // init logger
tunasync.InitLogger(c.Bool("verbose"), c.Bool("verbose"), false) tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), false)
cfg := new(config) cfg := new(config)
@@ -76,14 +77,23 @@ func initialize(c *cli.Context) error {
// find config file and load config // find config file and load config
if _, err := os.Stat(systemCfgFile); err == nil { if _, err := os.Stat(systemCfgFile); err == nil {
loadConfig(systemCfgFile, cfg) err = loadConfig(systemCfgFile, cfg)
if err != nil {
return err
}
} }
fmt.Println(os.ExpandEnv(userCfgFile)) logger.Debug("user config file: %s", os.ExpandEnv(userCfgFile))
if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil { if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil {
loadConfig(os.ExpandEnv(userCfgFile), cfg) err = loadConfig(os.ExpandEnv(userCfgFile), cfg)
if err != nil {
return err
}
} }
if c.String("config") != "" { if c.String("config") != "" {
loadConfig(c.String("config"), cfg) err := loadConfig(c.String("config"), cfg)
if err != nil {
return err
}
} }
// override config using the command-line arguments // override config using the command-line arguments
@@ -99,8 +109,11 @@ func initialize(c *cli.Context) error {
} }
// parse base url of the manager server // parse base url of the manager server
baseURL = fmt.Sprintf("https://%s:%d", if cfg.CACert != "" {
cfg.ManagerAddr, cfg.ManagerPort) baseURL = fmt.Sprintf("https://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
} else {
baseURL = fmt.Sprintf("http://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
}
logger.Infof("Use manager address: %s", baseURL) logger.Infof("Use manager address: %s", baseURL)
@@ -109,7 +122,7 @@ func initialize(c *cli.Context) error {
client, err = tunasync.CreateHTTPClient(cfg.CACert) client, err = tunasync.CreateHTTPClient(cfg.CACert)
if err != nil { if err != nil {
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error()) err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
logger.Error(err.Error()) // logger.Error(err.Error())
return err return err
} }
@@ -132,14 +145,14 @@ func listWorkers(c *cli.Context) error {
err.Error()), err.Error()),
1) 1)
} }
fmt.Print(string(b)) fmt.Println(string(b))
return nil return nil
} }
func listJobs(c *cli.Context) error { func listJobs(c *cli.Context) error {
// FIXME: there should be an API on manager server side that return MirrorStatus list to tunasynctl var genericJobs interface{}
var jobs []tunasync.MirrorStatus
if c.Bool("all") { if c.Bool("all") {
var jobs []tunasync.WebMirrorStatus
_, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client) _, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client)
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
@@ -147,8 +160,10 @@ func listJobs(c *cli.Context) error {
"of all jobs from manager server: %s", err.Error()), "of all jobs from manager server: %s", err.Error()),
1) 1)
} }
genericJobs = jobs
} else { } else {
var jobs []tunasync.MirrorStatus
args := c.Args() args := c.Args()
if len(args) == 0 { if len(args) == 0 {
return cli.NewExitError( return cli.NewExitError(
@@ -162,24 +177,129 @@ func listJobs(c *cli.Context) error {
_, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs", _, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs",
baseURL, workerID), &workerJobs, client) baseURL, workerID), &workerJobs, client)
if err != nil { if err != nil {
logger.Errorf("Filed to correctly get jobs"+ logger.Infof("Failed to correctly get jobs"+
" for worker %s: %s", workerID, err.Error()) " for worker %s: %s", workerID, err.Error())
} }
ans <- workerJobs ans <- workerJobs
}(workerID) }(workerID)
} }
for range args { for range args {
jobs = append(jobs, <-ans...) job := <-ans
if job == nil {
return cli.NewExitError(
fmt.Sprintf("Failed to correctly get information "+
"of jobs from at least one manager"),
1)
}
jobs = append(jobs, job...)
} }
genericJobs = jobs
} }
b, err := json.MarshalIndent(jobs, "", " ") b, err := json.MarshalIndent(genericJobs, "", " ")
if err != nil { if err != nil {
return cli.NewExitError( return cli.NewExitError(
fmt.Sprintf("Error printing out informations: %s", err.Error()), fmt.Sprintf("Error printing out information: %s", err.Error()),
1) 1)
} }
fmt.Printf(string(b)) fmt.Println(string(b))
return nil
}
func updateMirrorSize(c *cli.Context) error {
args := c.Args()
if len(args) != 2 {
return cli.NewExitError("Usage: tunasynctl -w <worker-id> <mirror> <size>", 1)
}
workerID := c.String("worker")
mirrorID := args.Get(0)
mirrorSize := args.Get(1)
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{
Name: mirrorID,
Size: mirrorSize,
}
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s/size", baseURL, workerID, mirrorID,
)
resp, err := tunasync.PostJSON(url, msg, client)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to send request to manager: %s",
err.Error()),
1)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return cli.NewExitError(
fmt.Sprintf("Manager failed to update mirror size: %s", body), 1,
)
}
var status tunasync.MirrorStatus
json.Unmarshal(body, &status)
if status.Size != mirrorSize {
return cli.NewExitError(
fmt.Sprintf(
"Mirror size error, expecting %s, manager returned %s",
mirrorSize, status.Size,
), 1,
)
}
fmt.Printf("Successfully updated mirror size to %s\n", mirrorSize)
return nil
}
func removeWorker(c *cli.Context) error {
args := c.Args()
if len(args) != 0 {
return cli.NewExitError("Usage: tunasynctl -w <worker-id>", 1)
}
workerID := c.String("worker")
if len(workerID) == 0 {
return cli.NewExitError("Please specify the <worker-id>", 1)
}
url := fmt.Sprintf("%s/workers/%s", baseURL, workerID)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
logger.Panicf("Invalid HTTP Request: %s", err.Error())
}
resp, err := client.Do(req)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to send request to manager: %s", err.Error()), 1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
1)
}
return cli.NewExitError(fmt.Sprintf("Failed to correctly send"+
" command: HTTP status code is not 200: %s", body),
1)
}
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
if res["message"] == "deleted" {
fmt.Println("Successfully removed the worker")
} else {
return cli.NewExitError("Failed to remove the worker", 1)
}
return nil return nil
} }
@@ -211,7 +331,7 @@ func flushDisabledJobs(c *cli.Context) error {
1) 1)
} }
logger.Info("Successfully flushed disabled jobs") fmt.Println("Successfully flushed disabled jobs")
return nil return nil
} }
@@ -232,11 +352,16 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
"argument WORKER", 1) "argument WORKER", 1)
} }
options := map[string]bool{}
if c.Bool("force") {
options["force"] = true
}
cmd := tunasync.ClientCmd{ cmd := tunasync.ClientCmd{
Cmd: cmd, Cmd: cmd,
MirrorID: mirrorID, MirrorID: mirrorID,
WorkerID: c.String("worker"), WorkerID: c.String("worker"),
Args: argsList, Args: argsList,
Options: options,
} }
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client) resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
if err != nil { if err != nil {
@@ -259,7 +384,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body), " command: HTTP status code is not 200: %s", body),
1) 1)
} }
logger.Info("Succesfully send command") fmt.Println("Successfully send the command")
return nil return nil
} }
@@ -267,6 +392,11 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc { func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
return func(c *cli.Context) error { return func(c *cli.Context) error {
if c.String("worker") == "" {
return cli.NewExitError("Please specify the worker with -w <worker-id>", 1)
}
cmd := tunasync.ClientCmd{ cmd := tunasync.ClientCmd{
Cmd: cmd, Cmd: cmd,
WorkerID: c.String("worker"), WorkerID: c.String("worker"),
@@ -292,7 +422,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body), " command: HTTP status code is not 200: %s", body),
1) 1)
} }
logger.Info("Succesfully send command") fmt.Println("Successfully send the command")
return nil return nil
} }
@@ -322,7 +452,7 @@ func main() {
app := cli.NewApp() app := cli.NewApp()
app.EnableBashCompletion = true app.EnableBashCompletion = true
app.Version = "0.1" app.Version = tunasync.Version
app.Name = "tunasynctl" app.Name = "tunasynctl"
app.Usage = "control client for tunasync manager" app.Usage = "control client for tunasync manager"
@@ -349,6 +479,10 @@ func main() {
Name: "verbose, v", Name: "verbose, v",
Usage: "Enable verbosely logging", Usage: "Enable verbosely logging",
}, },
cli.BoolFlag{
Name: "debug",
Usage: "Enable debugging logging",
},
} }
cmdFlags := []cli.Flag{ cmdFlags := []cli.Flag{
cli.StringFlag{ cli.StringFlag{
@@ -357,6 +491,11 @@ func main() {
}, },
} }
forceStartFlag := cli.BoolFlag{
Name: "force, f",
Usage: "Override the concurrent limit",
}
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
Name: "list", Name: "list",
@@ -382,10 +521,34 @@ func main() {
Flags: commonFlags, Flags: commonFlags,
Action: initializeWrapper(listWorkers), Action: initializeWrapper(listWorkers),
}, },
{
Name: "rm-worker",
Usage: "Remove a worker",
Flags: append(
commonFlags,
cli.StringFlag{
Name: "worker, w",
Usage: "worker-id of the worker to be removed",
},
),
Action: initializeWrapper(removeWorker),
},
{
Name: "set-size",
Usage: "Set mirror size",
Flags: append(
commonFlags,
cli.StringFlag{
Name: "worker, w",
Usage: "specify worker-id of the mirror job",
},
),
Action: initializeWrapper(updateMirrorSize),
},
{ {
Name: "start", Name: "start",
Usage: "Start a job", Usage: "Start a job",
Flags: append(commonFlags, cmdFlags...), Flags: append(append(commonFlags, cmdFlags...), forceStartFlag),
Action: initializeWrapper(cmdJob(tunasync.CmdStart)), Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
}, },
{ {

查看文件

@@ -42,7 +42,7 @@ interval = 1
[manager] [manager]
api_base = "http://localhost:12345" api_base = "http://localhost:12345"
token = "some_token" token = ""
ca_cert = "" ca_cert = ""
[cgroup] [cgroup]
@@ -90,6 +90,30 @@ $ tunasync worker --config ~/tunasync_demo/worker.conf
本例中,镜像的数据在`/tmp/tunasync/` 本例中,镜像的数据在`/tmp/tunasync/`
### 控制
查看同步状态
```
$ tunasynctl list -p 12345 --all
```
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
配置文件内容为:
```
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
### 安全
worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都是在本机,那么没必要使用 https。此时 manager 就不指定 `ssl_key``ssl_cert`,留空;worker 的 `ca_cert` 留空,`api_base``http://` 开头。
如果需要加密的通信,manager 需要指定 `ssl_key``ssl_cert`,worker 要指定 `ca_cert`,并且 `api_base` 应该是 `https://` 开头。
## 更进一步 ## 更进一步
可以参看 可以参看
@@ -100,3 +124,7 @@ $ tunasync worker --help
``` ```
可以看一下 log 目录 可以看一下 log 目录
一些 worker 配置文件示例 [workers.conf](workers.conf)
你可能会用到的操作 [tips.md](tips.md)

93
docs/zh_CN/tips.md 普通文件
查看文件

@@ -0,0 +1,93 @@
## 删除某worker的某镜像
先确定已经给tunasynctl写好config文件`~/.config/tunasync/ctl.conf`
```toml
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
接着
```shell
$ tunasynctl disable -w <worker_id> <mirror_name>
$ tunasynctl flush
```
## 热重载 `worker.conf`
```shell
$ tunasynctl reload -w <worker_id>
```
e.g. 删除 `test_worker``elvish` 镜像:
1. 删除存放镜像的文件夹
2. 删除 `worker.conf` 中对应的 `mirror` 段落
3. 接着操作:
```shell
$ tunasynctl reload -w test_worker
$ tunasynctl disable -w test_worker elvish
$ tunasynctl flush
```
4. (可选)最后删除日志文件夹里的日志
## 删除worker
```shell
$ tunasynctl rm-worker -w <worker_id>
```
e.g.
```shell
$ tunasynctl rm-worker -w test_worker
```
## 更新镜像的大小
```shell
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
```
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
## Btrfs 文件系统快照
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像,tunasync 在每次成功同步后更新其快照。
`worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
```toml
[btrfs_snapshot]
enable = true
snapshot_path = "/path/to/snapshot/directory"
```
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
```toml
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elvish.io/elvish/"
interval = 1440
snapshot_path = "/data/publish/elvish"
```
**提示:**
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。

817
docs/zh_CN/workers.conf 普通文件
查看文件

@@ -0,0 +1,817 @@
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
[global]
name = "mirror_worker"
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/srv/tunasync"
concurrent = 10
interval = 1
[manager]
api_base = "http://localhost:12345"
token = "some_token"
ca_cert = ""
[cgroup]
enable = false
base_path = "/sys/fs/cgroup"
group = "tunasync"
[server]
hostname = "localhost"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = ""
ssl_key = ""
[[mirrors]]
name = "adobe-fonts"
interval = 1440
provider = "command"
upstream = "https://github.com/adobe-fonts"
command = "/home/scripts/adobe-fonts.sh"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "AdoptOpenJDK"
interval = 5760
provider = "command"
command = "/home/scripts/adoptopenjdk.py"
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "alpine"
provider = "rsync"
upstream = "rsync://rsync.alpinelinux.org/alpine/"
memory_limit = "256M"
[[mirrors]]
name = "anaconda"
provider = "command"
upstream = "https://repo.continuum.io/"
command = "/home/scripts/anaconda.py --delete"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "apache"
provider = "rsync"
upstream = "rsync://rsync.apache.org/apache-dist/"
use_ipv4 = true
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "armbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/apt/"
memory_limit = "256M"
[[mirrors]]
name = "armbian-releases"
provider = "rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/dl/"
memory_limit = "256M"
[[mirrors]]
name = "bananian"
provider = "command"
upstream = "https://dl.bananian.org/"
command = "/home/scripts/lftp.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "bioconductor"
provider = "rsync"
upstream = "master.bioconductor.org:./"
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
exclude_file = "/etc/excludes/bioconductor.txt"
memory_limit = "256M"
[[mirrors]]
name = "blender"
provider = "rsync"
upstream = "rsync://mirrors.dotsrc.org/blender/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/blender.txt"
interval = 1440
memory_limit = "256M"
[[mirrors]]
name = "chakra"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/packages/"
memory_limit = "256M"
[[mirrors]]
name = "chakra-releases"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/releases/"
memory_limit = "256M"
[[mirrors]]
name = "chef"
interval = 1440
provider = "command"
upstream = "https://packages.chef.io/repos"
command = "/home/scripts/chef.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "clickhouse"
interval = 2880
provider = "rsync"
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
exclude_file = "/etc/excludes/clickhouse.txt"
memory_limit = "256M"
[[mirrors]]
name = "clojars"
provider = "command"
upstream = "s3://clojars-repo-production/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "CPAN"
provider = "rsync"
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
memory_limit = "256M"
[[mirrors]]
name = "CRAN"
provider = "rsync"
upstream = "rsync://cran.r-project.org/CRAN/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "CTAN"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/CTAN/"
memory_limit = "256M"
[[mirrors]]
name = "dart-pub"
provider = "command"
upstream = "https://pub.dev/api"
command = "/home/scripts/pub.sh"
interval = 30
docker_image = "tunathu/pub-mirror:latest"
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
[[mirrors]]
name = "debian"
provider = "command"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
command = "/home/scripts/debian.sh sync:archive:debian"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/ftpsync"
docker_volumes = [
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
"/log/ftpsync:/home/log/tunasync/ftpsync",
]
[mirrors.env]
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
[[mirrors]]
name = "docker-ce"
provider = "command"
upstream = "https://download.docker.com/"
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ELK"
interval = 1440
provider = "command"
upstream = "https://packages.elastic.co"
command = "/home/scripts/ELK.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
WGET_OPTIONS = "-6"
[[mirrors]]
name = "elasticstack"
interval = 1440
provider = "command"
upstream = "https://artifacts.elastic.co/"
command = "/home/scripts/elastic.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "erlang-solutions"
interval = 1440
provider = "command"
upstream = "https://packages.erlang-solutions.com"
command = "/home/scripts/erlang.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "flutter"
interval = 1440
provider = "command"
upstream = "https://storage.googleapis.com/flutter_infra/"
command = "/home/scripts/flutter.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "github-release"
provider = "command"
upstream = "https://api.github.com/repos/"
command = "/home/scripts/github-release.py --workers 5"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
GITHUB_TOKEN = "xxxxx"
[[mirrors]]
name = "gitlab-ce"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-ee"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-runner"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
command = "/home/scripts/gitlab-runner.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "grafana"
interval = 1440
provider = "command"
upstream = "https://packages.grafana.com/oss"
command = "/home/scripts/grafana.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "hackage"
provider = "command"
command = "/home/scripts/hackage.sh"
upstream = "https://hackage.haskell.org/"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew-bottles"
provider = "command"
upstream = "https://homebrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "influxdata"
interval = 1440
provider = "command"
upstream = "https://repos.influxdata.com"
command = "/home/scripts/influxdata.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "kali"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.nluug.nl/kali/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "kali-images"
provider = "rsync"
upstream = "rsync://ftp.nluug.nl/kali-images/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "KaOS"
provider = "rsync"
upstream = "rsync://kaosx.tk/kaos/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kernel"
provider = "rsync"
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kicad"
provider = "command"
upstream = "s3://kicad-downloads/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "kodi"
provider = "rsync"
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
use_ipv6 = true
[[mirrors]]
name = "kubernetes"
interval = 2880
provider = "command"
upstream = "http://packages.cloud.google.com"
command = "/home/scripts/kubernetes.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "linuxbrew-bottles"
provider = "command"
upstream = "https://linuxbrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
RUN_LINUXBREW = "true"
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "linuxmint"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "lxc-images"
provider = "command"
upstream = "https://us.images.linuxcontainers.org/"
command = "/home/scripts/lxc-images.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 720
[[mirrors]]
name = "lyx"
provider = "command"
upstream = "ftp://ftp.lyx.org/pub/lyx/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "mongodb"
interval = 1440
provider = "command"
upstream = "https://repo.mongodb.org"
command = "/home/scripts/mongodb.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "msys2"
provider = "command"
upstream = "http://repo.msys2.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "mysql"
interval = 30
provider = "command"
upstream = "https://repo.mysql.com"
command = "/home/scripts/mysql.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
USE_IPV6 = "1"
[[mirrors]]
name = "nix"
interval = 1440
provider = "command"
upstream = "s3://nix-releases/nix/"
command = "/home/scripts/nix.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
[[mirrors]]
name = "nix-channels"
interval = 300
provider = "command"
upstream = "https://nixos.org/channels"
command = "timeout 20h /home/scripts/nix-channels.py"
docker_image = "tunathu/nix-channels:latest"
docker_options = [
"--cpus", "20",
]
[[mirrors]]
name = "nodesource"
provider = "command"
upstream = "https://deb.nodesource.com/"
command = "/home/scripts/nodesource.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "openresty"
provider = "command"
upstream = "https://openresty.org/package/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "packagist"
provider = "command"
upstream = "http://packagist.org/"
command = "/home/scripts/packagist.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "proxmox"
interval = 1440
provider = "command"
upstream = "http://download.proxmox.com"
command = "/home/scripts/proxmox.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "pypi"
provider = "command"
upstream = "https://pypi.python.org/"
command = "/home/scripts/pypi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 5
[[mirrors]]
name = "qt"
provider = "rsync"
upstream = "rsync://master.qt-project.org/qt-all/"
exclude_file = "/etc/excludes/qt.txt"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "raspberrypi"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
memory_limit = "256M"
[[mirrors]]
name = "raspbian-images"
interval = 5760
provider = "command"
upstream = "https://downloads.raspberrypi.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
[[mirrors]]
name = "raspbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.raspbian.org/archive/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "redhat"
provider = "rsync"
upstream = "rsync://ftp.redhat.com/redhat/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
exclude_file = "/etc/excludes/redhat.txt"
interval = 1440
[mirrors.env]
RSYNC_PROXY="127.0.0.1:8123"
[[mirrors]]
name = "remi"
interval = 1440
provider = "command"
upstream = "rsync://rpms.remirepo.net"
command = "/home/scripts/remi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "repo-ck"
provider = "command"
upstream = "http://repo-ck.com"
command = "/home/scripts/repo-ck.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ros"
provider = "rsync"
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
memory_limit = "256M"
[[mirrors]]
name = "ros2"
interval = 1440
provider = "command"
upstream = "http://packages.ros.org/ros2"
command = "/home/scripts/ros2.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rubygems"
provider = "command"
upstream = "https://rubygems.org"
command = "/home/scripts/rubygems.sh"
docker_image = "tunathu/rubygems-mirror"
interval = 60
# set environment varialbes
[mirrors.env]
INIT = "0"
[[mirrors]]
name = "rudder"
interval = 2880
provider = "command"
upstream = "https://repository.rudder.io"
command = "/home/scripts/rudder.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rustup"
provider = "command"
upstream = "https://rustup.rs/"
command = "/home/scripts/rustup.sh"
interval = 1440
docker_image = "tunathu/rustup-mirror:latest"
docker_volumes = [
]
docker_options = [
]
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
[[mirrors]]
name = "saltstack"
interval = 1440 # required on http://repo.saltstack.com/#mirror
provider = "command"
upstream = "s3://s3/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
[[mirrors]]
name = "solus"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/solus/"
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
memory_limit = "256M"
[[mirrors]]
name = "stackage"
provider = "command"
command = "/home/scripts/stackage.py"
upstream = "https://www.stackage.org/"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
GIT_COMMITTER_NAME = "TUNA mirrors"
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
[[mirrors]]
name = "steamos"
interval = 1440
provider = "command"
upstream = "http://repo.steampowered.com"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
[[mirrors]]
name = "termux"
interval = 1440
provider = "command"
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
command = "/home/scripts/termux.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ubuntu"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.ubuntu.com/ubuntu/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "ubuntu-ports"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
memory_limit = "256M"
[[mirrors]]
name = "virtualbox"
interval = 1440
provider = "command"
upstream = "http://download.virtualbox.org/virtualbox"
command = "/home/scripts/virtualbox.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "winehq"
provider = "command"
upstream = "ftp://ftp.winehq.org/pub/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
[[mirrors]]
name = "zabbix"
provider = "rsync"
upstream = "rsync://repo.zabbix.com/mirror/"
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
memory_limit = "256M"
[[mirrors]]
name = "AOSP"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://android.googlesource.com/mirror/manifest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "lineageOS"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://github.com/LineageOS/mirror"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "chromiumos"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/cros.sh"
upstream = "https://chromium.googlesource.com"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
USE_BITMAP_INDEX = "1"
CONCURRENT_JOBS = "20"
[[mirrors]]
name = "crates.io-index.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "https://github.com/rust-lang/crates.io-index.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "flutter-sdk.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/flutter/flutter.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gcc.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://gcc.gnu.org/git/gcc.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gentoo-portage.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/gentoo-mirror/gentoo.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "git-repo"
provider = "command"
command = "/home/tunasync-scripts/git-repo.sh"
upstream = "https://gerrit.googlesource.com/git-repo"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew"
provider = "command"
command = "/home/tunasync-scripts/homebrew.sh"
upstream = "https://github.com/Homebrew"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "CocoaPods"
provider = "command"
command = "/home/tunasync-scripts/cocoapods.sh"
upstream = "https://github.com/CocoaPods"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "pybombs"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/pybombs.sh"
upstream = "https://github.com/scateu/pybombs-mirror/"
docker_image = "tunathu/tunasync-scripts:latest"
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[mirrors.env]
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
[[mirrors]]
name = "llvm"
provider = "command"
command = "/home/tunasync-scripts/llvm.sh"
upstream = "https://git.llvm.org/git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
# vim: ft=toml

21
go.mod 普通文件
查看文件

@@ -0,0 +1,21 @@
module github.com/tuna/tunasync
go 1.13
require (
github.com/BurntSushi/toml v0.3.1
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
github.com/boltdb/bolt v1.3.1
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035
github.com/gin-gonic/gin v1.5.0
github.com/imdario/mergo v0.3.9
github.com/mattn/goveralls v0.0.5 // indirect
github.com/pkg/profile v1.4.0
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
github.com/smartystreets/goconvey v1.6.4
github.com/urfave/cli v1.22.3
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 // indirect
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
)

104
go.sum 普通文件
查看文件

@@ -0,0 +1,104 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035 h1:4e+UEZaKPx0ZEiCMPUHMV51RGwbb1VJGCYqRFn/qmWM=
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.5.0 h1:fi+bqFAx/oLK54somfCtEZs9HeH1LHVoEPUgARpTqyc=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/goveralls v0.0.5 h1:spfq8AyZ0cCk57Za6/juJ5btQxeE1FaEGMdfcI+XO48=
github.com/mattn/goveralls v0.0.5/go.mod h1:Xg2LHi51faXLyKXwsndxiW6uxEEQT9+3sjGzzwU4xy0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU=
github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200113040837-eac381796e91/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 h1:6TB4+MaZlkcSsJDu+BS5yxSEuZIYhjWz+jhbSLEZylI=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

查看文件

@@ -24,9 +24,12 @@ func InitLogger(verbose, debug, withSystemd bool) {
if debug { if debug {
logging.SetLevel(logging.DEBUG, "tunasync") logging.SetLevel(logging.DEBUG, "tunasync")
logging.SetLevel(logging.DEBUG, "tunasynctl")
} else if verbose { } else if verbose {
logging.SetLevel(logging.INFO, "tunasync") logging.SetLevel(logging.INFO, "tunasync")
logging.SetLevel(logging.INFO, "tunasynctl")
} else { } else {
logging.SetLevel(logging.NOTICE, "tunasync") logging.SetLevel(logging.NOTICE, "tunasync")
logging.SetLevel(logging.NOTICE, "tunasynctl")
} }
} }

查看文件

@@ -5,17 +5,20 @@ import (
"time" "time"
) )
// A StatusUpdateMsg represents a msg when // A MirrorStatus represents a msg when
// a worker has done syncing // a worker has done syncing
type MirrorStatus struct { type MirrorStatus struct {
Name string `json:"name"` Name string `json:"name"`
Worker string `json:"worker"` Worker string `json:"worker"`
IsMaster bool `json:"is_master"` IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"` Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"` LastUpdate time.Time `json:"last_update"`
Upstream string `json:"upstream"` LastStarted time.Time `json:"last_started"`
Size string `json:"size"` LastEnded time.Time `json:"last_ended"`
ErrorMsg string `json:"error_msg"` Scheduled time.Time `json:"next_schedule"`
Upstream string `json:"upstream"`
Size string `json:"size"`
ErrorMsg string `json:"error_msg"`
} }
// A WorkerStatus is the information struct that describe // A WorkerStatus is the information struct that describe
@@ -27,6 +30,15 @@ type WorkerStatus struct {
LastOnline time.Time `json:"last_online"` // last seen LastOnline time.Time `json:"last_online"` // last seen
} }
type MirrorSchedules struct {
Schedules []MirrorSchedule `json:"schedules"`
}
type MirrorSchedule struct {
MirrorName string `json:"name"`
NextSchedule time.Time `json:"next_schedule"`
}
// A CmdVerb is an action to a job or worker // A CmdVerb is an action to a job or worker
type CmdVerb uint8 type CmdVerb uint8
@@ -67,9 +79,10 @@ func (c CmdVerb) String() string {
// A WorkerCmd is the command message send from the // A WorkerCmd is the command message send from the
// manager to a worker // manager to a worker
type WorkerCmd struct { type WorkerCmd struct {
Cmd CmdVerb `json:"cmd"` Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"` MirrorID string `json:"mirror_id"`
Args []string `json:"args"` Args []string `json:"args"`
Options map[string]bool `json:"options"`
} }
func (c WorkerCmd) String() string { func (c WorkerCmd) String() string {
@@ -82,8 +95,9 @@ func (c WorkerCmd) String() string {
// A ClientCmd is the command message send from client // A ClientCmd is the command message send from client
// to the manager // to the manager
type ClientCmd struct { type ClientCmd struct {
Cmd CmdVerb `json:"cmd"` Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"` MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"` WorkerID string `json:"worker_id"`
Args []string `json:"args"` Args []string `json:"args"`
Options map[string]bool `json:"options"`
} }

72
internal/status_web.go 普通文件
查看文件

@@ -0,0 +1,72 @@
package internal
import (
"encoding/json"
"strconv"
"time"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// WebMirrorStatus is the mirror status to be shown in the web page
type WebMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
LastStarted textTime `json:"last_started"`
LastStartedTs stampTime `json:"last_started_ts"`
LastEnded textTime `json:"last_ended"`
LastEndedTs stampTime `json:"last_ended_ts"`
Scheduled textTime `json:"next_schedule"`
ScheduledTs stampTime `json:"next_schedule_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func BuildWebMirrorStatus(m MirrorStatus) WebMirrorStatus {
return WebMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
LastStarted: textTime{m.LastStarted},
LastStartedTs: stampTime{m.LastStarted},
LastEnded: textTime{m.LastEnded},
LastEndedTs: stampTime{m.LastEnded},
Scheduled: textTime{m.Scheduled},
ScheduledTs: stampTime{m.Scheduled},
Upstream: m.Upstream,
Size: m.Size,
}
}

98
internal/status_web_test.go 普通文件
查看文件

@@ -0,0 +1,98 @@
package internal
import (
"encoding/json"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := WebMirrorStatus{
Name: "tunalinux",
Status: Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
LastStarted: textTime{t},
LastStartedTs: stampTime{t},
LastEnded: textTime{t},
LastEndedTs: stampTime{t},
Scheduled: textTime{t},
ScheduledTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 WebMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
Convey("BuildWebMirrorStatus should work", t, func() {
m := MirrorStatus{
Name: "arch-sync3",
Worker: "testWorker",
IsMaster: true,
Status: Failed,
LastUpdate: time.Now().Add(-time.Minute * 30),
LastStarted: time.Now().Add(-time.Minute * 1),
LastEnded: time.Now(),
Scheduled: time.Now().Add(time.Minute * 5),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}
var m2 WebMirrorStatus
m2 = BuildWebMirrorStatus(m)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

查看文件

@@ -6,11 +6,37 @@ import (
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os/exec"
"regexp"
"time" "time"
) )
var rsyncExitValues = map[int]string{
0: "Success",
1: "Syntax or usage error",
2: "Protocol incompatibility",
3: "Errors selecting input/output files, dirs",
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server.",
5: "Error starting client-server protocol",
6: "Daemon unable to append to log-file",
10: "Error in socket I/O",
11: "Error in file I/O",
12: "Error in rsync protocol data stream",
13: "Errors with program diagnostics",
14: "Error in IPC code",
20: "Received SIGUSR1 or SIGINT",
21: "Some error returned by waitpid()",
22: "Error allocating core memory buffers",
23: "Partial transfer due to error",
24: "Partial transfer due to vanished source files",
25: "The --max-delete limit stopped deletions",
30: "Timeout in data send/receive",
35: "Timeout waiting for daemon connection",
}
// GetTLSConfig generate tls.Config from CAFile // GetTLSConfig generate tls.Config from CAFile
func GetTLSConfig(CAFile string) (*tls.Config, error) { func GetTLSConfig(CAFile string) (*tls.Config, error) {
caCert, err := ioutil.ReadFile(CAFile) caCert, err := ioutil.ReadFile(CAFile)
@@ -84,3 +110,46 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
} }
return resp, json.Unmarshal(body, obj) return resp, json.Unmarshal(body, obj)
} }
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
if fileName == "/dev/null" {
err = errors.New("Invalid log file")
return
}
if content, err := ioutil.ReadFile(fileName); err == nil {
matches = re.FindAllSubmatch(content, -1)
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
}
return
}
// ExtractSizeFromLog uses a regexp to extract the size from log files
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
matches, _ := FindAllSubmatchInFile(logFile, re)
if matches == nil || len(matches) == 0 {
return ""
}
// return the first capture group of the last occurrence
return string(matches[len(matches)-1][1])
}
// ExtractSizeFromRsyncLog extracts the size from rsync logs
func ExtractSizeFromRsyncLog(logFile string) string {
// (?m) flag enables multi-line mode
re := regexp.MustCompile(`(?m)^Total file size: ([0-9\.]+[KMGTP]?) bytes`)
return ExtractSizeFromLog(logFile, re)
}
// TranslateRsyncErrorCode translates the exit code of rsync to a message
func TranslateRsyncErrorCode(cmdErr error) (exitCode int, msg string) {
if exiterr, ok := cmdErr.(*exec.ExitError); ok {
exitCode = exiterr.ExitCode()
strerr, valid := rsyncExitValues[exitCode]
if valid {
msg = fmt.Sprintf("rsync error: %s", strerr)
}
}
return
}

42
internal/util_test.go 普通文件
查看文件

@@ -0,0 +1,42 @@
package internal
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestExtractSizeFromRsyncLog(t *testing.T) {
realLogContent := `
Number of files: 998,470 (reg: 925,484, dir: 58,892, link: 14,094)
Number of created files: 1,049 (reg: 1,049)
Number of deleted files: 1,277 (reg: 1,277)
Number of regular files transferred: 5,694
Total file size: 1.33T bytes
Total transferred file size: 2.86G bytes
Literal data: 780.62M bytes
Matched data: 2.08G bytes
File list size: 37.55M
File list generation time: 7.845 seconds
File list transfer time: 0.000 seconds
Total bytes sent: 7.55M
Total bytes received: 823.25M
sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
total size is 1.33T speedup is 1,604.11
`
Convey("Log parser should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
logFile := filepath.Join(tmpDir, "rs.log")
err = ioutil.WriteFile(logFile, []byte(realLogContent), 0755)
So(err, ShouldBeNil)
res := ExtractSizeFromRsyncLog(logFile)
So(res, ShouldEqual, "1.33T")
})
}

4
internal/version.go 普通文件
查看文件

@@ -0,0 +1,4 @@
package internal
// Version of the program
const Version string = "0.6.3"

查看文件

@@ -2,7 +2,7 @@ package manager
import ( import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli"
) )
// A Config is the top-level toml-serializaible config struct // A Config is the top-level toml-serializaible config struct

查看文件

@@ -9,7 +9,7 @@ import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
"gopkg.in/urfave/cli.v1" "github.com/urfave/cli"
) )
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {

查看文件

@@ -14,6 +14,7 @@ type dbAdapter interface {
Init() error Init() error
ListWorkers() ([]WorkerStatus, error) ListWorkers() ([]WorkerStatus, error)
GetWorker(workerID string) (WorkerStatus, error) GetWorker(workerID string) (WorkerStatus, error)
DeleteWorker(workerID string) error
CreateWorker(w WorkerStatus) (WorkerStatus, error) CreateWorker(w WorkerStatus) (WorkerStatus, error)
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
@@ -95,6 +96,19 @@ func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
return return
} }
func (b *boltAdapter) DeleteWorker(workerID string) (err error) {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v := bucket.Get([]byte(workerID))
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
err := bucket.Delete([]byte(workerID))
return err
})
return
}
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) { func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
err := b.db.Update(func(tx *bolt.Tx) error { err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey)) bucket := tx.Bucket([]byte(_workerBucketKey))
@@ -125,7 +139,7 @@ func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus
bucket := tx.Bucket([]byte(_statusBucketKey)) bucket := tx.Bucket([]byte(_statusBucketKey))
v := bucket.Get([]byte(id)) v := bucket.Get([]byte(id))
if v == nil { if v == nil {
return fmt.Errorf("no mirror %s exists in worker %s", mirrorID, workerID) return fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
} }
err := json.Unmarshal(v, &m) err := json.Unmarshal(v, &m)
return err return err
@@ -182,7 +196,7 @@ func (b *boltAdapter) FlushDisabledJobs() (err error) {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr) err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
continue continue
} }
if m.Status == Disabled { if m.Status == Disabled || len(m.Name) == 0 {
err = c.Delete() err = c.Delete()
} }
} }

查看文件

@@ -40,51 +40,75 @@ func TestBoltAdapter(t *testing.T) {
So(err, ShouldBeNil) So(err, ShouldBeNil)
} }
Convey("get exists worker", func() { Convey("get existent worker", func() {
_, err := boltDB.GetWorker(testWorkerIDs[0]) _, err := boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil) So(err, ShouldBeNil)
}) })
Convey("list exist worker", func() { Convey("list existent workers", func() {
ws, err := boltDB.ListWorkers() ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2) So(len(ws), ShouldEqual, 2)
}) })
Convey("get inexist worker", func() { Convey("get non-existent worker", func() {
_, err := boltDB.GetWorker("invalid workerID") _, err := boltDB.GetWorker("invalid workerID")
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
}) })
Convey("delete existent worker", func() {
err := boltDB.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := boltDB.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
}) })
Convey("update mirror status", func() { Convey("update mirror status", func() {
status := []MirrorStatus{ status := []MirrorStatus{
MirrorStatus{ MirrorStatus{
Name: "arch-sync1", Name: "arch-sync1",
Worker: testWorkerIDs[0], Worker: testWorkerIDs[0],
IsMaster: true, IsMaster: true,
Status: Success, Status: Success,
LastUpdate: time.Now(), LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn", LastStarted: time.Now().Add(-time.Minute),
Size: "3GB", LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
}, },
MirrorStatus{ MirrorStatus{
Name: "arch-sync2", Name: "arch-sync2",
Worker: testWorkerIDs[1], Worker: testWorkerIDs[1],
IsMaster: true, IsMaster: true,
Status: Disabled, Status: Disabled,
LastUpdate: time.Now(), LastUpdate: time.Now().Add(-time.Hour),
Upstream: "mirrors.tuna.tsinghua.edu.cn", LastStarted: time.Now().Add(-time.Minute),
Size: "4GB", LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}, },
MirrorStatus{ MirrorStatus{
Name: "arch-sync3", Name: "arch-sync3",
Worker: testWorkerIDs[1], Worker: testWorkerIDs[1],
IsMaster: true, IsMaster: true,
Status: Success, Status: Success,
LastUpdate: time.Now(), LastUpdate: time.Now().Add(-time.Minute),
Upstream: "mirrors.tuna.tsinghua.edu.cn", LastStarted: time.Now().Add(-time.Second),
Size: "4GB", LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}, },
} }

查看文件

@@ -1,6 +1,7 @@
package manager package manager
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
"time" "time"
@@ -83,10 +84,14 @@ func GetTUNASyncManager(cfg *Config) *Manager {
// workerID should be valid in this route group // workerID should be valid in this route group
workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator) workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator)
{ {
// delete specified worker
workerValidateGroup.DELETE(":id", s.deleteWorker)
// get job list // get job list
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker) workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
// post job status // post job status
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker) workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
workerValidateGroup.POST(":id/jobs/:job/size", s.updateMirrorSize)
workerValidateGroup.POST(":id/schedules", s.updateSchedulesOfWorker)
} }
// for tunasynctl to post commands // for tunasynctl to post commands
@@ -133,11 +138,11 @@ func (s *Manager) listAllJobs(c *gin.Context) {
s.returnErrJSON(c, http.StatusInternalServerError, err) s.returnErrJSON(c, http.StatusInternalServerError, err)
return return
} }
webMirStatusList := []webMirrorStatus{} webMirStatusList := []WebMirrorStatus{}
for _, m := range mirrorStatusList { for _, m := range mirrorStatusList {
webMirStatusList = append( webMirStatusList = append(
webMirStatusList, webMirStatusList,
convertMirrorStatus(m), BuildWebMirrorStatus(m),
) )
} }
c.JSON(http.StatusOK, webMirStatusList) c.JSON(http.StatusOK, webMirStatusList)
@@ -157,6 +162,22 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{_infoKey: "flushed"}) c.JSON(http.StatusOK, gin.H{_infoKey: "flushed"})
} }
// deleteWorker deletes one worker by id
func (s *Manager) deleteWorker(c *gin.Context) {
workerID := c.Param("id")
err := s.adapter.DeleteWorker(workerID)
if err != nil {
err := fmt.Errorf("failed to delete worker: %s",
err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
logger.Noticef("Worker <%s> deleted", workerID)
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
}
// listWrokers respond with informations of all the workers // listWrokers respond with informations of all the workers
func (s *Manager) listWorkers(c *gin.Context) { func (s *Manager) listWorkers(c *gin.Context) {
var workerInfos []WorkerStatus var workerInfos []WorkerStatus
@@ -220,35 +241,94 @@ func (s *Manager) returnErrJSON(c *gin.Context, code int, err error) {
}) })
} }
func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
workerID := c.Param("id")
var schedules MirrorSchedules
c.BindJSON(&schedules)
for _, schedule := range schedules.Schedules {
mirrorName := schedule.MirrorName
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
)
}
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
if err != nil {
fmt.Errorf("failed to get job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
continue
}
if curStatus.Scheduled == schedule.NextSchedule {
// no changes, skip update
continue
}
curStatus.Scheduled = schedule.NextSchedule
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
}
type empty struct{}
c.JSON(http.StatusOK, empty{})
}
func (s *Manager) updateJobOfWorker(c *gin.Context) { func (s *Manager) updateJobOfWorker(c *gin.Context) {
workerID := c.Param("id") workerID := c.Param("id")
var status MirrorStatus var status MirrorStatus
c.BindJSON(&status) c.BindJSON(&status)
mirrorName := status.Name mirrorName := status.Name
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
)
}
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName) curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
curTime := time.Now()
if status.Status == PreSyncing && curStatus.Status != PreSyncing {
status.LastStarted = curTime
} else {
status.LastStarted = curStatus.LastStarted
}
// Only successful syncing needs last_update // Only successful syncing needs last_update
if status.Status == Success { if status.Status == Success {
status.LastUpdate = time.Now() status.LastUpdate = curTime
} else { } else {
status.LastUpdate = curStatus.LastUpdate status.LastUpdate = curStatus.LastUpdate
} }
if status.Status == Success || status.Status == Failed {
status.LastEnded = curTime
} else {
status.LastEnded = curStatus.LastEnded
}
// Only message with meaningful size updates the mirror size
if len(curStatus.Size) > 0 && curStatus.Size != "unknown" {
if len(status.Size) == 0 || status.Size == "unknown" {
status.Size = curStatus.Size
}
}
// for logging // for logging
switch status.Status { switch status.Status {
case Success:
logger.Noticef("Job [%s] @<%s> success", status.Name, status.Worker)
case Failed:
logger.Warningf("Job [%s] @<%s> failed", status.Name, status.Worker)
case Syncing: case Syncing:
logger.Noticef("Job [%s] @<%s> starts syncing", status.Name, status.Worker) logger.Noticef("Job [%s] @<%s> starts syncing", status.Name, status.Worker)
case Disabled:
logger.Noticef("Job [%s] @<%s> disabled", status.Name, status.Worker)
case Paused:
logger.Noticef("Job [%s] @<%s> paused", status.Name, status.Worker)
default: default:
logger.Infof("Job [%s] @<%s> status: %s", status.Name, status.Worker, status.Status) logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
} }
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status) newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
@@ -263,6 +343,45 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
c.JSON(http.StatusOK, newStatus) c.JSON(http.StatusOK, newStatus)
} }
func (s *Manager) updateMirrorSize(c *gin.Context) {
workerID := c.Param("id")
type SizeMsg struct {
Name string `json:"name"`
Size string `json:"size"`
}
var msg SizeMsg
c.BindJSON(&msg)
mirrorName := msg.Name
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
if err != nil {
logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s",
mirrorName, workerID, err.Error(),
)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
// Only message with meaningful size updates the mirror size
if len(msg.Size) > 0 || msg.Size != "unknown" {
status.Size = msg.Size
}
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, newStatus)
}
func (s *Manager) handleClientCmd(c *gin.Context) { func (s *Manager) handleClientCmd(c *gin.Context) {
var clientCmd ClientCmd var clientCmd ClientCmd
c.BindJSON(&clientCmd) c.BindJSON(&clientCmd)
@@ -286,6 +405,7 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
Cmd: clientCmd.Cmd, Cmd: clientCmd.Cmd,
MirrorID: clientCmd.MirrorID, MirrorID: clientCmd.MirrorID,
Args: clientCmd.Args, Args: clientCmd.Args,
Options: clientCmd.Options,
} }
// update job status, even if the job did not disable successfully, // update job status, even if the job did not disable successfully,

查看文件

@@ -21,9 +21,16 @@ const (
) )
func TestHTTPServer(t *testing.T) { func TestHTTPServer(t *testing.T) {
var listenPort = 5000
Convey("HTTP server should work", t, func(ctx C) { Convey("HTTP server should work", t, func(ctx C) {
listenPort++
port := listenPort
addr := "127.0.0.1"
baseURL := fmt.Sprintf("http://%s:%d", addr, port)
InitLogger(true, true, false) InitLogger(true, true, false)
s := GetTUNASyncManager(&Config{Debug: false}) s := GetTUNASyncManager(&Config{Debug: true})
s.cfg.Server.Addr = addr
s.cfg.Server.Port = port
So(s, ShouldNotBeNil) So(s, ShouldNotBeNil)
s.setDBAdapter(&mockDBAdapter{ s.setDBAdapter(&mockDBAdapter{
workerStore: map[string]WorkerStatus{ workerStore: map[string]WorkerStatus{
@@ -32,12 +39,8 @@ func TestHTTPServer(t *testing.T) {
}}, }},
statusStore: make(map[string]MirrorStatus), statusStore: make(map[string]MirrorStatus),
}) })
port := rand.Intn(10000) + 20000 go s.Run()
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port) time.Sleep(50 * time.Millisecond)
go func() {
s.engine.Run(fmt.Sprintf("127.0.0.1:%d", port))
}()
time.Sleep(50 * time.Microsecond)
resp, err := http.Get(baseURL + "/ping") resp, err := http.Get(baseURL + "/ping")
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK) So(resp.StatusCode, ShouldEqual, http.StatusOK)
@@ -79,7 +82,34 @@ func TestHTTPServer(t *testing.T) {
So(len(actualResponseObj), ShouldEqual, 2) So(len(actualResponseObj), ShouldEqual, 2)
}) })
Convey("flush disabled jobs", func(ctx C) { Convey("delete an existent worker", func(ctx C) {
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, w.ID), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_infoKey], ShouldEqual, "deleted")
})
Convey("delete non-existent worker", func(ctx C) {
invalidWorker := "test_worker233"
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, invalidWorker), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("flush disabled jobs", func(ctx C) {
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil) req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
So(err, ShouldBeNil) So(err, ShouldBeNil)
clt := &http.Client{} clt := &http.Client{}
@@ -99,11 +129,11 @@ func TestHTTPServer(t *testing.T) {
IsMaster: true, IsMaster: true,
Status: Success, Status: Success,
Upstream: "mirrors.tuna.tsinghua.edu.cn", Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB", Size: "unknown",
} }
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil) resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
defer resp.Body.Close()
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK) So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("list mirror status of an existed worker", func(ctx C) { Convey("list mirror status of an existed worker", func(ctx C) {
@@ -121,11 +151,43 @@ func TestHTTPServer(t *testing.T) {
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
// start syncing
status.Status = PreSyncing
time.Sleep(1 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("update mirror status to PreSync - starting sync", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
}) })
Convey("list all job status of all workers", func(ctx C) { Convey("list all job status of all workers", func(ctx C) {
var ms []webMirrorStatus var ms []WebMirrorStatus
resp, err := GetJSON(baseURL+"/jobs", &ms, nil) resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK) So(resp.StatusCode, ShouldEqual, http.StatusOK)
@@ -136,21 +198,109 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream) So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size) So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster) So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second) So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
}) })
Convey("Update size of a valid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{status.Name, "5GB"}
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("Get new size of a mirror", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
})
})
Convey("Update schedule of valid mirrors", func(ctx C) {
msg := MirrorSchedules{
[]MirrorSchedule{
MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
},
}
url := fmt.Sprintf("%s/workers/%s/schedules", baseURL, status.Worker)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
})
Convey("Update size of an invalid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
Size string `json:"size"`
}{"Invalid mirror", "5GB"}
url := fmt.Sprintf("%s/workers/%s/jobs/%s/size", baseURL, status.Worker, status.Name)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
})
// what if status changed to failed
status.Status = Failed
time.Sleep(3 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("What if syncing job failed", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
}) })
Convey("update mirror status of an inexisted worker", func(ctx C) { Convey("update mirror status of an inexisted worker", func(ctx C) {
invalidWorker := "test_worker2" invalidWorker := "test_worker2"
status := MirrorStatus{ status := MirrorStatus{
Name: "arch-sync2", Name: "arch-sync2",
Worker: invalidWorker, Worker: invalidWorker,
IsMaster: true, IsMaster: true,
Status: Success, Status: Success,
LastUpdate: time.Now(), LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn", LastStarted: time.Now(),
Size: "4GB", LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
} }
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s",
baseURL, status.Worker, status.Name), status, nil) baseURL, status.Worker, status.Name), status, nil)
@@ -162,6 +312,24 @@ func TestHTTPServer(t *testing.T) {
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker) So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
}) })
Convey("update schedule of an non-existent worker", func(ctx C) {
invalidWorker := "test_worker2"
sch := MirrorSchedules{
[]MirrorSchedule{
MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
},
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",
baseURL, invalidWorker), sch, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
defer resp.Body.Close()
var msg map[string]string
err = json.NewDecoder(resp.Body).Decode(&msg)
So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("handle client command", func(ctx C) { Convey("handle client command", func(ctx C) {
cmdChan := make(chan WorkerCmd, 1) cmdChan := make(chan WorkerCmd, 1)
workerServer := makeMockWorkerServer(cmdChan) workerServer := makeMockWorkerServer(cmdChan)
@@ -180,11 +348,11 @@ func TestHTTPServer(t *testing.T) {
// run the mock worker server // run the mock worker server
workerServer.Run(bindAddress) workerServer.Run(bindAddress)
}() }()
time.Sleep(50 * time.Microsecond) time.Sleep(50 * time.Millisecond)
// verify the worker mock server is running // verify the worker mock server is running
workerResp, err := http.Get(workerBaseURL + "/ping") workerResp, err := http.Get(workerBaseURL + "/ping")
defer workerResp.Body.Close()
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer workerResp.Body.Close()
So(workerResp.StatusCode, ShouldEqual, http.StatusOK) So(workerResp.StatusCode, ShouldEqual, http.StatusOK)
Convey("when client send wrong cmd", func(ctx C) { Convey("when client send wrong cmd", func(ctx C) {
@@ -194,8 +362,8 @@ func TestHTTPServer(t *testing.T) {
WorkerID: "not_exist_worker", WorkerID: "not_exist_worker",
} }
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil) resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
defer resp.Body.Close()
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest) So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
}) })
@@ -207,9 +375,8 @@ func TestHTTPServer(t *testing.T) {
} }
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil) resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
defer resp.Body.Close()
So(err, ShouldBeNil) So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK) So(resp.StatusCode, ShouldEqual, http.StatusOK)
time.Sleep(50 * time.Microsecond) time.Sleep(50 * time.Microsecond)
select { select {
@@ -252,6 +419,11 @@ func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
return w, nil return w, nil
} }
func (b *mockDBAdapter) DeleteWorker(workerID string) error {
delete(b.workerStore, workerID)
return nil
}
func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) { func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
// _, ok := b.workerStore[w.ID] // _, ok := b.workerStore[w.ID]
// if ok { // if ok {

查看文件

@@ -1,62 +0,0 @@
package manager
import (
"encoding/json"
"strconv"
"time"
. "github.com/tuna/tunasync/internal"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// webMirrorStatus is the mirror status to be shown in the web page
type webMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func convertMirrorStatus(m MirrorStatus) webMirrorStatus {
return webMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
Upstream: m.Upstream,
Size: m.Size,
}
}

查看文件

@@ -1,44 +0,0 @@
package manager
import (
"encoding/json"
"testing"
"time"
tunasync "github.com/tuna/tunasync/internal"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := webMirrorStatus{
Name: "tunalinux",
Status: tunasync.Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 webMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

查看文件

@@ -15,15 +15,19 @@ type baseProvider struct {
ctx *Context ctx *Context
name string name string
interval time.Duration interval time.Duration
retry int
timeout time.Duration
isMaster bool isMaster bool
cmd *cmdJob cmd *cmdJob
logFileFd *os.File
isRunning atomic.Value isRunning atomic.Value
logFile *os.File
cgroup *cgroupHook cgroup *cgroupHook
hooks []jobHook zfs *zfsHook
docker *dockerHook
hooks []jobHook
} }
func (p *baseProvider) Name() string { func (p *baseProvider) Name() string {
@@ -49,6 +53,14 @@ func (p *baseProvider) Interval() time.Duration {
return p.interval return p.interval
} }
func (p *baseProvider) Retry() int {
return p.retry
}
func (p *baseProvider) Timeout() time.Duration {
return p.timeout
}
func (p *baseProvider) IsMaster() bool { func (p *baseProvider) IsMaster() bool {
return p.isMaster return p.isMaster
} }
@@ -77,12 +89,17 @@ func (p *baseProvider) LogFile() string {
return s return s
} }
} }
panic("log dir is impossible to be unavailable") panic("log file is impossible to be unavailable")
} }
func (p *baseProvider) AddHook(hook jobHook) { func (p *baseProvider) AddHook(hook jobHook) {
if cg, ok := hook.(*cgroupHook); ok { switch v := hook.(type) {
p.cgroup = cg case *cgroupHook:
p.cgroup = v
case *zfsHook:
p.zfs = v
case *dockerHook:
p.docker = v
} }
p.hooks = append(p.hooks, hook) p.hooks = append(p.hooks, hook)
} }
@@ -95,24 +112,42 @@ func (p *baseProvider) Cgroup() *cgroupHook {
return p.cgroup return p.cgroup
} }
func (p *baseProvider) prepareLogFile() error { func (p *baseProvider) ZFS() *zfsHook {
return p.zfs
}
func (p *baseProvider) Docker() *dockerHook {
return p.docker
}
func (p *baseProvider) prepareLogFile(append bool) error {
if p.LogFile() == "/dev/null" { if p.LogFile() == "/dev/null" {
p.cmd.SetLogFile(nil) p.cmd.SetLogFile(nil)
return nil return nil
} }
if p.logFile == nil { appendMode := 0
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644) if append {
if err != nil { appendMode = os.O_APPEND
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFile = logFile
} }
p.cmd.SetLogFile(p.logFile) logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE|appendMode, 0644)
if err != nil {
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFileFd = logFile
p.cmd.SetLogFile(logFile)
return nil return nil
} }
func (p *baseProvider) Run() error { func (p *baseProvider) closeLogFile() (err error) {
if p.logFileFd != nil {
err = p.logFileFd.Close()
p.logFileFd = nil
}
return
}
func (p *baseProvider) Run(started chan empty) error {
panic("Not Implemented") panic("Not Implemented")
} }
@@ -127,32 +162,27 @@ func (p *baseProvider) IsRunning() bool {
func (p *baseProvider) Wait() error { func (p *baseProvider) Wait() error {
defer func() { defer func() {
p.Lock() logger.Debugf("set isRunning to false: %s", p.Name())
p.isRunning.Store(false) p.isRunning.Store(false)
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
}() }()
logger.Debugf("calling Wait: %s", p.Name())
return p.cmd.Wait() return p.cmd.Wait()
} }
func (p *baseProvider) Terminate() error { func (p *baseProvider) Terminate() error {
p.Lock()
defer p.Unlock()
logger.Debugf("terminating provider: %s", p.Name()) logger.Debugf("terminating provider: %s", p.Name())
if !p.IsRunning() { if !p.IsRunning() {
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
return nil return nil
} }
p.Lock()
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
err := p.cmd.Terminate() err := p.cmd.Terminate()
p.isRunning.Store(false)
return err return err
} }
func (p *baseProvider) DataSize() string {
return ""
}

查看文件

@@ -0,0 +1,90 @@
package worker
import (
"fmt"
"os"
"path/filepath"
"github.com/dennwc/btrfs"
)
type btrfsSnapshotHook struct {
provider mirrorProvider
mirrorSnapshotPath string
}
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
// TODO: check if the filesystem is Btrfs
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
mirrorSnapshotPath := mirror.SnapshotPath
if mirrorSnapshotPath == "" {
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
}
return &btrfsSnapshotHook{
provider: provider,
mirrorSnapshotPath: mirrorSnapshotPath,
}
}
// check if path `snapshotPath/providerName` exists
// Case 1: Not exists => create a new subvolume
// Case 2: Exists as a subvolume => nothing to do
// Case 3: Exists as a directory => error detected
func (h *btrfsSnapshotHook) preJob() error {
path := h.provider.WorkingDir()
if _, err := os.Stat(path); os.IsNotExist(err) {
// create subvolume
err := btrfs.CreateSubVolume(path)
if err != nil {
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
return err
}
logger.Noticef("created new Btrfs subvolume %s", path)
} else {
if is, err := btrfs.IsSubVolume(path); err != nil {
return err
} else if !is {
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
}
}
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
// delete old snapshot if exists, then create a new snapshot
func (h *btrfsSnapshotHook) postSuccess() error {
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
if err != nil {
return err
} else if !isSubVol {
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
}
// is old snapshot => delete it
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
}
// create a new writable snapshot
// (the snapshot is writable so that it can be deleted easily)
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
return nil
}
// keep the old snapshot => nothing to do
func (h *btrfsSnapshotHook) postFail() error {
return nil
}

查看文件

@@ -15,35 +15,32 @@ import (
"github.com/codeskyblue/go-sh" "github.com/codeskyblue/go-sh"
) )
var cgSubsystem = "cpu"
type cgroupHook struct { type cgroupHook struct {
emptyHook emptyHook
provider mirrorProvider
basePath string basePath string
baseGroup string baseGroup string
created bool created bool
subsystem string
memLimit string
} }
func initCgroup(basePath string) { func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
if _, err := os.Stat(filepath.Join(basePath, "memory")); err == nil {
cgSubsystem = "memory"
return
}
logger.Warning("Memory subsystem of cgroup not enabled, fallback to cpu")
}
func newCgroupHook(p mirrorProvider, basePath, baseGroup string) *cgroupHook {
if basePath == "" { if basePath == "" {
basePath = "/sys/fs/cgroup" basePath = "/sys/fs/cgroup"
} }
if baseGroup == "" { if baseGroup == "" {
baseGroup = "tunasync" baseGroup = "tunasync"
} }
if subsystem == "" {
subsystem = "cpu"
}
return &cgroupHook{ return &cgroupHook{
provider: p, emptyHook: emptyHook{
provider: p,
},
basePath: basePath, basePath: basePath,
baseGroup: baseGroup, baseGroup: baseGroup,
subsystem: subsystem,
} }
} }
@@ -52,13 +49,15 @@ func (c *cgroupHook) preExec() error {
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil { if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
return err return err
} }
if cgSubsystem != "memory" { if c.subsystem != "memory" {
return nil return nil
} }
if c.provider.Type() == provRsync || c.provider.Type() == provTwoStageRsync { if c.memLimit != "" {
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name()) gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
return sh.Command( return sh.Command(
"cgset", "-r", "memory.limit_in_bytes=128M", gname, "cgset", "-r",
fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
gname,
).Run() ).Run()
} }
return nil return nil
@@ -76,7 +75,7 @@ func (c *cgroupHook) postExec() error {
func (c *cgroupHook) Cgroup() string { func (c *cgroupHook) Cgroup() string {
name := c.provider.Name() name := c.provider.Name()
return fmt.Sprintf("%s:%s/%s", cgSubsystem, c.baseGroup, name) return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
} }
func (c *cgroupHook) killAll() error { func (c *cgroupHook) killAll() error {
@@ -87,7 +86,7 @@ func (c *cgroupHook) killAll() error {
readTaskList := func() ([]int, error) { readTaskList := func() ([]int, error) {
taskList := []int{} taskList := []int{}
taskFile, err := os.Open(filepath.Join(c.basePath, cgSubsystem, c.baseGroup, name, "tasks")) taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
if err != nil { if err != nil {
return taskList, err return taskList, err
} }

查看文件

@@ -72,15 +72,18 @@ sleep 30
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
initCgroup("/sys/fs/cgroup") cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
provider.AddHook(cg) provider.AddHook(cg)
err = cg.preExec() err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
So(err, ShouldBeNil) So(err, ShouldBeNil)
go func() { go func() {
err = provider.Run() err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil) ctx.So(err, ShouldNotBeNil)
}() }()
@@ -129,15 +132,18 @@ sleep 30
provider, err := newRsyncProvider(c) provider, err := newRsyncProvider(c)
So(err, ShouldBeNil) So(err, ShouldBeNil)
initCgroup("/sys/fs/cgroup") cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
provider.AddHook(cg) provider.AddHook(cg)
cg.preExec() err = cg.preExec()
if cgSubsystem == "memory" { if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
if cg.subsystem == "memory" {
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes")) memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(128*1024*1024)) So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} }
cg.postExec() cg.postExec()
}) })

查看文件

@@ -1,9 +1,13 @@
package worker package worker
import ( import (
"errors"
"fmt"
"regexp"
"time" "time"
"github.com/anmitsu/go-shlex" "github.com/anmitsu/go-shlex"
"github.com/tuna/tunasync/internal"
) )
type cmdConfig struct { type cmdConfig struct {
@@ -11,22 +15,34 @@ type cmdConfig struct {
upstreamURL, command string upstreamURL, command string
workingDir, logDir, logFile string workingDir, logDir, logFile string
interval time.Duration interval time.Duration
retry int
timeout time.Duration
env map[string]string env map[string]string
failOnMatch string
sizePattern string
} }
type cmdProvider struct { type cmdProvider struct {
baseProvider baseProvider
cmdConfig cmdConfig
command []string command []string
dataSize string
failOnMatch *regexp.Regexp
sizePattern *regexp.Regexp
} }
func newCmdProvider(c cmdConfig) (*cmdProvider, error) { func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
// TODO: check config options // TODO: check config options
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &cmdProvider{ provider := &cmdProvider{
baseProvider: baseProvider{ baseProvider: baseProvider{
name: c.name, name: c.name,
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry,
timeout: c.timeout,
}, },
cmdConfig: c, cmdConfig: c,
} }
@@ -40,6 +56,22 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
return nil, err return nil, err
} }
provider.command = cmd provider.command = cmd
if len(c.failOnMatch) > 0 {
var err error
failOnMatch, err := regexp.Compile(c.failOnMatch)
if err != nil {
return nil, errors.New("fail-on-match regexp error: " + err.Error())
}
provider.failOnMatch = failOnMatch
}
if len(c.sizePattern) > 0 {
var err error
sizePattern, err := regexp.Compile(c.sizePattern)
if err != nil {
return nil, errors.New("size-pattern regexp error: " + err.Error())
}
provider.sizePattern = sizePattern
}
return provider, nil return provider, nil
} }
@@ -52,25 +84,57 @@ func (p *cmdProvider) Upstream() string {
return p.upstreamURL return p.upstreamURL
} }
func (p *cmdProvider) Run() error { func (p *cmdProvider) DataSize() string {
return p.dataSize
}
func (p *cmdProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil { if err := p.Start(); err != nil {
return err return err
} }
return p.Wait() started <- empty{}
if err := p.Wait(); err != nil {
return err
}
if p.failOnMatch != nil {
matches, err := internal.FindAllSubmatchInFile(p.LogFile(), p.failOnMatch)
logger.Infof("FindAllSubmatchInFile: %q\n", matches)
if err != nil {
return err
}
if len(matches) != 0 {
logger.Debug("Fail-on-match: %r", matches)
return fmt.Errorf("Fail-on-match regexp found %d matches", len(matches))
}
}
if p.sizePattern != nil {
p.dataSize = internal.ExtractSizeFromLog(p.LogFile(), p.sizePattern)
}
return nil
} }
func (p *cmdProvider) Start() error { func (p *cmdProvider) Start() error {
p.Lock()
defer p.Unlock()
if p.IsRunning() {
return errors.New("provider is currently running")
}
env := map[string]string{ env := map[string]string{
"TUNASYNC_MIRROR_NAME": p.Name(), "TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(), "TUNASYNC_WORKING_DIR": p.WorkingDir(),
"TUNASYNC_UPSTREAM_URL": p.upstreamURL, "TUNASYNC_UPSTREAM_URL": p.upstreamURL,
"TUNASYNC_LOG_DIR": p.LogDir(),
"TUNASYNC_LOG_FILE": p.LogFile(), "TUNASYNC_LOG_FILE": p.LogFile(),
} }
for k, v := range p.env { for k, v := range p.env {
env[k] = v env[k] = v
} }
p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env) p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil { if err := p.prepareLogFile(false); err != nil {
return err return err
} }
@@ -78,5 +142,6 @@ func (p *cmdProvider) Start() error {
return err return err
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil return nil
} }

查看文件

@@ -8,6 +8,6 @@ import (
type empty struct{} type empty struct{}
const maxRetry = 2 const defaultMaxRetry = 2
var logger = logging.MustGetLogger("tunasync") var logger = logging.MustGetLogger("tunasync")

查看文件

@@ -6,6 +6,7 @@ import (
"path/filepath" "path/filepath"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/imdario/mergo"
) )
type providerEnum uint8 type providerEnum uint8
@@ -33,12 +34,16 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
// Config represents worker config options // Config represents worker config options
type Config struct { type Config struct {
Global globalConfig `toml:"global"` Global globalConfig `toml:"global"`
Manager managerConfig `toml:"manager"` Manager managerConfig `toml:"manager"`
Server serverConfig `toml:"server"` Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"` Cgroup cgroupConfig `toml:"cgroup"`
Include includeConfig `toml:"include"` ZFS zfsConfig `toml:"zfs"`
Mirrors []mirrorConfig `toml:"mirrors"` BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"`
MirrorsConf []mirrorConfig `toml:"mirrors"`
Mirrors []mirrorConfig
} }
type globalConfig struct { type globalConfig struct {
@@ -47,6 +52,8 @@ type globalConfig struct {
MirrorDir string `toml:"mirror_dir"` MirrorDir string `toml:"mirror_dir"`
Concurrent int `toml:"concurrent"` Concurrent int `toml:"concurrent"`
Interval int `toml:"interval"` Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
ExecOnSuccess []string `toml:"exec_on_success"` ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"` ExecOnFailure []string `toml:"exec_on_failure"`
@@ -54,8 +61,17 @@ type globalConfig struct {
type managerConfig struct { type managerConfig struct {
APIBase string `toml:"api_base"` APIBase string `toml:"api_base"`
CACert string `toml:"ca_cert"` // this option overrides the APIBase
Token string `toml:"token"` APIList []string `toml:"api_base_list"`
CACert string `toml:"ca_cert"`
// Token string `toml:"token"`
}
func (mc managerConfig) APIBaseList() []string {
if len(mc.APIList) > 0 {
return mc.APIList
}
return []string{mc.APIBase}
} }
type serverConfig struct { type serverConfig struct {
@@ -67,9 +83,26 @@ type serverConfig struct {
} }
type cgroupConfig struct { type cgroupConfig struct {
Enable bool `toml:"enable"` Enable bool `toml:"enable"`
BasePath string `toml:"base_path"` BasePath string `toml:"base_path"`
Group string `toml:"group"` Group string `toml:"group"`
Subsystem string `toml:"subsystem"`
}
type dockerConfig struct {
Enable bool `toml:"enable"`
Volumes []string `toml:"volumes"`
Options []string `toml:"options"`
}
type zfsConfig struct {
Enable bool `toml:"enable"`
Zpool string `toml:"zpool"`
}
type btrfsSnapshotConfig struct {
Enable bool `toml:"enable"`
SnapshotPath string `toml:"snapshot_path"`
} }
type includeConfig struct { type includeConfig struct {
@@ -81,14 +114,17 @@ type includedMirrorConfig struct {
} }
type mirrorConfig struct { type mirrorConfig struct {
Name string `toml:"name"` Name string `toml:"name"`
Provider providerEnum `toml:"provider"` Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"` Upstream string `toml:"upstream"`
Interval int `toml:"interval"` Interval int `toml:"interval"`
MirrorDir string `toml:"mirror_dir"` Retry int `toml:"retry"`
LogDir string `toml:"log_dir"` Timeout int `toml:"timeout"`
Env map[string]string `toml:"env"` MirrorDir string `toml:"mirror_dir"`
Role string `toml:"role"` MirrorSubDir string `toml:"mirror_subdir"`
LogDir string `toml:"log_dir"`
Env map[string]string `toml:"env"`
Role string `toml:"role"`
// These two options over-write the global options // These two options over-write the global options
ExecOnSuccess []string `toml:"exec_on_success"` ExecOnSuccess []string `toml:"exec_on_success"`
@@ -98,12 +134,27 @@ type mirrorConfig struct {
ExecOnSuccessExtra []string `toml:"exec_on_success_extra"` ExecOnSuccessExtra []string `toml:"exec_on_success_extra"`
ExecOnFailureExtra []string `toml:"exec_on_failure_extra"` ExecOnFailureExtra []string `toml:"exec_on_failure_extra"`
Command string `toml:"command"` Command string `toml:"command"`
UseIPv6 bool `toml:"use_ipv6"` FailOnMatch string `toml:"fail_on_match"`
ExcludeFile string `toml:"exclude_file"` SizePattern string `toml:"size_pattern"`
Username string `toml:"username"` UseIPv6 bool `toml:"use_ipv6"`
Password string `toml:"password"` UseIPv4 bool `toml:"use_ipv4"`
Stage1Profile string `toml:"stage1_profile"` ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
Stage1Profile string `toml:"stage1_profile"`
MemoryLimit string `toml:"memory_limit"`
DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"`
DockerOptions []string `toml:"docker_options"`
SnapshotPath string `toml:"snapshot_path"`
ChildMirrors []mirrorConfig `toml:"mirrors"`
} }
// LoadConfig loads configuration // LoadConfig loads configuration
@@ -130,9 +181,36 @@ func LoadConfig(cfgFile string) (*Config, error) {
logger.Errorf(err.Error()) logger.Errorf(err.Error())
return nil, err return nil, err
} }
cfg.Mirrors = append(cfg.Mirrors, incMirCfg.Mirrors...) cfg.MirrorsConf = append(cfg.MirrorsConf, incMirCfg.Mirrors...)
}
}
for _, m := range cfg.MirrorsConf {
if err := recursiveMirrors(cfg, nil, m); err != nil {
return nil, err
} }
} }
return cfg, nil return cfg, nil
} }
func recursiveMirrors(cfg *Config, parent *mirrorConfig, mirror mirrorConfig) error {
var curMir mirrorConfig
if parent != nil {
curMir = *parent
}
curMir.ChildMirrors = nil
if err := mergo.Merge(&curMir, mirror, mergo.WithOverride); err != nil {
return err
}
if mirror.ChildMirrors == nil {
cfg.Mirrors = append(cfg.Mirrors, curMir)
} else {
for _, m := range mirror.ChildMirrors {
if err := recursiveMirrors(cfg, &curMir, m); err != nil {
return err
}
}
}
return nil
}

查看文件

@@ -6,6 +6,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
) )
@@ -18,6 +19,8 @@ log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors" mirror_dir = "/data/mirrors"
concurrent = 10 concurrent = 10
interval = 240 interval = 240
retry = 3
timeout = 86400
[manager] [manager]
api_base = "https://127.0.0.1:5000" api_base = "https://127.0.0.1:5000"
@@ -35,6 +38,8 @@ name = "AOSP"
provider = "command" provider = "command"
upstream = "https://aosp.google.com/" upstream = "https://aosp.google.com/"
interval = 720 interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/git/AOSP" mirror_dir = "/data/git/AOSP"
exec_on_success = [ exec_on_success = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'" "bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@@ -81,9 +86,9 @@ exec_on_failure = [
tmpDir, tmpDir,
) )
cfgBlob = cfgBlob + incSection curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644) err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
defer tmpfile.Close() defer tmpfile.Close()
@@ -116,6 +121,8 @@ use_ipv6 = true
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker") So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240) So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.Timeout, ShouldEqual, 86400)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors") So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000") So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
@@ -126,6 +133,8 @@ use_ipv6 = true
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP") So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand) So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720) So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Timeout, ShouldEqual, 3600)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo") So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1] m = cfg.Mirrors[1]
@@ -153,6 +162,102 @@ use_ipv6 = true
So(len(cfg.Mirrors), ShouldEqual, 6) So(len(cfg.Mirrors), ShouldEqual, 6)
}) })
Convey("Everything should work on nested config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
incSection := fmt.Sprintf(
"\n[include]\n"+
"include_mirrors = \"%s/*.conf\"",
tmpDir,
)
curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
incBlob1 := `
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
So(cfg.Server.Hostname, ShouldEqual, "worker1.example.com")
m := cfg.Mirrors[0]
So(m.Name, ShouldEqual, "AOSP")
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-security")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "debian")
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "ubuntu")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "ubuntu")
m = cfg.Mirrors[5]
So(m.Name, ShouldEqual, "debian-cd")
So(m.UseIPv6, ShouldEqual, true)
So(m.Provider, ShouldEqual, provRsync)
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Providers can be inited from a valid config file", t, func() { Convey("Providers can be inited from a valid config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync") tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil) So(err, ShouldEqual, nil)
@@ -203,4 +308,92 @@ use_ipv6 = true
So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt") So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
}) })
Convey("MirrorSubdir should work", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
timeout = 86400
retry = 3
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p := providers["debian-security"]
So(p.Name(), ShouldEqual, "debian-security")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-security")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-security/latest.log")
r2p, ok := p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "debian")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/debian-security")
p = providers["ubuntu"]
So(p.Name(), ShouldEqual, "ubuntu")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/ubuntu")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/ubuntu/latest.log")
r2p, ok = p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "ubuntu")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/ubuntu")
p = providers["debian-cd"]
So(p.Name(), ShouldEqual, "debian-cd")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-cd")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-cd/latest.log")
rp, ok := p.(*rsyncProvider)
So(ok, ShouldBeTrue)
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
So(p.Timeout(), ShouldEqual, 86400*time.Second)
})
} }

96
worker/docker.go 普通文件
查看文件

@@ -0,0 +1,96 @@
package worker
import (
"fmt"
"os"
)
type dockerHook struct {
emptyHook
image string
volumes []string
options []string
}
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{}
volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...)
options := []string{}
options = append(options, gCfg.Options...)
options = append(options, mCfg.DockerOptions...)
return &dockerHook{
emptyHook: emptyHook{
provider: p,
},
image: mCfg.DockerImage,
volumes: volumes,
options: options,
}
}
func (d *dockerHook) preExec() error {
p := d.provider
logDir := p.LogDir()
logFile := p.LogFile()
workingDir := p.WorkingDir()
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
logger.Debugf("Making dir %s", workingDir)
if err = os.MkdirAll(workingDir, 0755); err != nil {
return fmt.Errorf("Error making dir %s: %s", workingDir, err.Error())
}
}
// Override workingDir
ctx := p.EnterContext()
ctx.Set(
"volumes", []string{
fmt.Sprintf("%s:%s", logDir, logDir),
fmt.Sprintf("%s:%s", logFile, logFile),
fmt.Sprintf("%s:%s", workingDir, workingDir),
},
)
return nil
}
func (d *dockerHook) postExec() error {
// sh.Command(
// "docker", "rm", "-f", d.Name(),
// ).Run()
d.provider.ExitContext()
return nil
}
// Volumes returns the configured volumes and
// runtime-needed volumes, including mirror dirs
// and log files
func (d *dockerHook) Volumes() []string {
vols := make([]string, len(d.volumes))
copy(vols, d.volumes)
p := d.provider
ctx := p.Context()
if ivs, ok := ctx.Get("volumes"); ok {
vs := ivs.([]string)
vols = append(vols, vs...)
}
return vols
}
func (d *dockerHook) LogFile() string {
p := d.provider
ctx := p.Context()
if iv, ok := ctx.Get(_LogFileKey + ":docker"); ok {
v := iv.(string)
return v
}
return p.LogFile()
}
func (d *dockerHook) Name() string {
p := d.provider
return "tunasync-job-" + p.Name()
}

127
worker/docker_test.go 普通文件
查看文件

@@ -0,0 +1,127 @@
package worker
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/codeskyblue/go-sh"
. "github.com/smartystreets/goconvey/convey"
)
func cmdRun(p string, args []string) {
cmd := exec.Command(p, args...)
out, err := cmd.CombinedOutput()
if err != nil {
logger.Debugf("cmdRun failed %s", err)
return
}
logger.Debugf("cmdRun: ", string(out))
}
func getDockerByName(name string) (string, error) {
// docker ps -f 'name=$name' --format '{{.Names}}'
out, err := sh.Command(
"docker", "ps", "-a",
"--filter", "name="+name,
"--format", "{{.Names}}",
).Output()
if err == nil {
logger.Debugf("docker ps: '%s'", string(out))
}
return string(out), err
}
func TestDocker(t *testing.T) {
Convey("Docker Should Work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
cmdScript := filepath.Join(tmpDir, "cmd.sh")
tmpFile := filepath.Join(tmpDir, "log_file")
expectedOutput := "HELLO_WORLD"
c := cmdConfig{
name: "tuna-docker",
upstreamURL: "http://mirrors.tuna.moe/",
command: "/bin/cmd.sh",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
env: map[string]string{
"TEST_CONTENT": expectedOutput,
},
}
cmdScriptContent := `#!/bin/sh
echo ${TEST_CONTENT}
sleep 20
`
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
d := &dockerHook{
emptyHook: emptyHook{
provider: provider,
},
image: "alpine:3.8",
volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
},
}
provider.AddHook(d)
So(provider.Docker(), ShouldNotBeNil)
err = d.preExec()
So(err, ShouldBeNil)
cmdRun("docker", []string{"images"})
exitedErr := make(chan error, 1)
go func() {
err = provider.Run(make(chan empty, 1))
logger.Debugf("provider.Run() exited")
if err != nil {
logger.Errorf("provider.Run() failed: %v", err)
}
exitedErr <- err
}()
cmdRun("ps", []string{"aux"})
// Wait for docker running
time.Sleep(8 * time.Second)
cmdRun("ps", []string{"aux"})
// assert container running
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
// So(names, ShouldEqual, d.Name()+"\n")
err = provider.Terminate()
// So(err, ShouldBeNil)
cmdRun("ps", []string{"aux"})
<-exitedErr
// container should be terminated and removed
names, err = getDockerByName(d.Name())
So(err, ShouldBeNil)
So(names, ShouldEqual, "")
// check log content
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")
d.postExec()
})
}

查看文件

@@ -18,7 +18,6 @@ const (
type execPostHook struct { type execPostHook struct {
emptyHook emptyHook
provider mirrorProvider
// exec on success or on failure // exec on success or on failure
execOn uint8 execOn uint8
@@ -37,9 +36,11 @@ func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*ex
} }
return &execPostHook{ return &execPostHook{
provider: provider, emptyHook: emptyHook{
execOn: execOn, provider: provider,
command: cmd, },
execOn: execOn,
command: cmd,
}, nil }, nil
} }
@@ -71,6 +72,7 @@ func (h *execPostHook) Do() error {
"TUNASYNC_MIRROR_NAME": p.Name(), "TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(), "TUNASYNC_WORKING_DIR": p.WorkingDir(),
"TUNASYNC_UPSTREAM_URL": p.Upstream(), "TUNASYNC_UPSTREAM_URL": p.Upstream(),
"TUNASYNC_LOG_DIR": p.LogDir(),
"TUNASYNC_LOG_FILE": p.LogFile(), "TUNASYNC_LOG_FILE": p.LogFile(),
"TUNASYNC_JOB_EXIT_STATUS": exitStatus, "TUNASYNC_JOB_EXIT_STATUS": exitStatus,
} }

查看文件

@@ -92,7 +92,7 @@ exit 1
job.ctrlChan <- jobStart job.ctrlChan <- jobStart
msg := <-managerChan msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing) So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < maxRetry; i++ { for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan msg = <-managerChan
So(msg.status, ShouldEqual, Syncing) So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan msg = <-managerChan

查看文件

@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
tunasync "github.com/tuna/tunasync/internal" tunasync "github.com/tuna/tunasync/internal"
) )
@@ -14,12 +15,13 @@ import (
type ctrlAction uint8 type ctrlAction uint8
const ( const (
jobStart ctrlAction = iota jobStart ctrlAction = iota
jobStop // stop syncing keep the job jobStop // stop syncing keep the job
jobDisable // disable the job (stops goroutine) jobDisable // disable the job (stops goroutine)
jobRestart // restart syncing jobRestart // restart syncing
jobPing // ensure the goroutine is alive jobPing // ensure the goroutine is alive
jobHalt // worker halts jobHalt // worker halts
jobForceStart // ignore concurrent limit
) )
type jobMessage struct { type jobMessage struct {
@@ -51,6 +53,7 @@ type mirrorJob struct {
ctrlChan chan ctrlAction ctrlChan chan ctrlAction
disabled chan empty disabled chan empty
state uint32 state uint32
size string
} }
func newMirrorJob(provider mirrorProvider) *mirrorJob { func newMirrorJob(provider mirrorProvider) *mirrorJob {
@@ -110,7 +113,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
managerChan <- jobMessage{ managerChan <- jobMessage{
tunasync.Failed, m.Name(), tunasync.Failed, m.Name(),
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()), fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
false, true,
} }
return err return err
} }
@@ -136,7 +139,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return err return err
} }
for retry := 0; retry < maxRetry; retry++ { for retry := 0; retry < provider.Retry(); retry++ {
stopASAP := false // stop job as soon as possible stopASAP := false // stop job as soon as possible
if retry > 0 { if retry > 0 {
@@ -152,26 +155,44 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
var syncErr error var syncErr error
syncDone := make(chan error, 1) syncDone := make(chan error, 1)
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
go func() { go func() {
err := provider.Run() err := provider.Run(started)
if !stopASAP { syncDone <- err
syncDone <- err
}
}() }()
select { // Wait until provider started or error happened
case err := <-syncDone:
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
syncDone <- err // it will be read again later
case <-started:
logger.Debug("provider started")
}
// Now terminating the provider is feasible
var termErr error
timeout := provider.Timeout()
if timeout <= 0 {
timeout = 100000 * time.Hour // never time out
}
select { select {
case syncErr = <-syncDone: case syncErr = <-syncDone:
logger.Debug("syncing done") logger.Debug("syncing done")
case <-time.After(timeout):
logger.Notice("provider timeout")
stopASAP = true
termErr = provider.Terminate()
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
case <-kill: case <-kill:
logger.Debug("received kill") logger.Debug("received kill")
stopASAP = true stopASAP = true
err := provider.Terminate() termErr = provider.Terminate()
if err != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return err
}
syncErr = errors.New("killed by manager") syncErr = errors.New("killed by manager")
} }
if termErr != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return termErr
}
// post-exec hooks // post-exec hooks
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec") herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
@@ -182,26 +203,33 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
if syncErr == nil { if syncErr == nil {
// syncing success // syncing success
logger.Noticef("succeeded syncing %s", m.Name()) logger.Noticef("succeeded syncing %s", m.Name())
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
// post-success hooks // post-success hooks
logger.Debug("post-success hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success") err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
if err != nil { if err != nil {
return err return err
} }
return nil } else {
// syncing failed
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
// post-fail hooks
logger.Debug("post-fail hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
}
if syncErr == nil {
// syncing success
m.size = provider.DataSize()
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
return nil
} }
// syncing failed // syncing failed
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error()) managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == maxRetry-1) && (m.State() == stateReady)}
// post-fail hooks
logger.Debug("post-fail hooks")
err = runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
// gracefully exit // gracefully exit
if stopASAP { if stopASAP {
logger.Debug("No retry, exit directly") logger.Debug("No retry, exit directly")
@@ -212,22 +240,26 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return nil return nil
} }
runJob := func(kill <-chan empty, jobDone chan<- empty) { runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
select { select {
case semaphore <- empty{}: case semaphore <- empty{}:
defer func() { <-semaphore }() defer func() { <-semaphore }()
runJobWrapper(kill, jobDone) runJobWrapper(kill, jobDone)
case <-bypassSemaphore:
logger.Noticef("Concurrent limit ignored by %s", m.Name())
runJobWrapper(kill, jobDone)
case <-kill: case <-kill:
jobDone <- empty{} jobDone <- empty{}
return return
} }
} }
bypassSemaphore := make(chan empty, 1)
for { for {
if m.State() == stateReady { if m.State() == stateReady {
kill := make(chan empty) kill := make(chan empty)
jobDone := make(chan empty) jobDone := make(chan empty)
go runJob(kill, jobDone) go runJob(kill, jobDone, bypassSemaphore)
_wait_for_job: _wait_for_job:
select { select {
@@ -248,7 +280,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
m.SetState(stateReady) m.SetState(stateReady)
close(kill) close(kill)
<-jobDone <-jobDone
time.Sleep(time.Second) // Restart may fail if the process was not exited yet
continue continue
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobStart: case jobStart:
m.SetState(stateReady) m.SetState(stateReady)
goto _wait_for_job goto _wait_for_job
@@ -272,8 +311,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
case jobDisable: case jobDisable:
m.SetState(stateDisabled) m.SetState(stateDisabled)
return nil return nil
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobRestart: case jobRestart:
m.SetState(stateReady) fallthrough
case jobStart: case jobStart:
m.SetState(stateReady) m.SetState(stateReady)
default: default:

查看文件

@@ -31,6 +31,7 @@ func TestMirrorJob(t *testing.T) {
logDir: tmpDir, logDir: tmpDir,
logFile: tmpFile, logFile: tmpFile,
interval: 1 * time.Second, interval: 1 * time.Second,
timeout: 7 * time.Second,
} }
provider, err := newCmdProvider(c) provider, err := newCmdProvider(c)
@@ -41,6 +42,7 @@ func TestMirrorJob(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir) So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile) So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval) So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("For a normal mirror job", func(ctx C) { Convey("For a normal mirror job", func(ctx C) {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
@@ -112,6 +114,74 @@ func TestMirrorJob(t *testing.T) {
}) })
Convey("When running long jobs with post-fail hook", func(ctx C) {
scriptContent := `#!/bin/bash
echo '++++++'
echo $TUNASYNC_WORKING_DIR
echo $0 sleeping
sleep 3
echo $TUNASYNC_WORKING_DIR
echo '------'
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
hookScriptFile := filepath.Join(tmpDir, "hook.sh")
err = ioutil.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
So(err, ShouldBeNil)
provider.AddHook(h)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("If we kill it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we kill it then start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
time.Sleep(2 * time.Second)
logger.Debugf("Now starting...\n")
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
Convey("When running long jobs", func(ctx C) { Convey("When running long jobs", func(ctx C) {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR echo $TUNASYNC_WORKING_DIR
@@ -135,6 +205,8 @@ echo $TUNASYNC_WORKING_DIR
msg = <-managerChan msg = <-managerChan
So(msg.status, ShouldEqual, Syncing) So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
job.ctrlChan <- jobStop job.ctrlChan <- jobStop
msg = <-managerChan msg = <-managerChan
@@ -170,8 +242,276 @@ echo $TUNASYNC_WORKING_DIR
job.ctrlChan <- jobDisable job.ctrlChan <- jobDisable
<-job.disabled <-job.disabled
}) })
Convey("If we restart it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobRestart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we disable it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobDisable
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
<-job.disabled
})
Convey("If we stop it twice, than start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
job.ctrlChan <- jobStop // should be ignored
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
}) })
Convey("When a job timed out", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("It should be automatically terminated", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
}) })
} }
func TestConcurrentMirrorJobs(t *testing.T) {
InitLogger(true, true, false)
Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
const CONCURRENT = 5
var providers [CONCURRENT]*cmdProvider
var jobs [CONCURRENT]*mirrorJob
for i := 0; i < CONCURRENT; i++ {
c := cmdConfig{
name: fmt.Sprintf("job-%d", i),
upstreamURL: "http://mirrors.tuna.moe/",
command: "sleep 2",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 10 * time.Second,
}
var err error
providers[i], err = newCmdProvider(c)
So(err, ShouldBeNil)
jobs[i] = newMirrorJob(providers[i])
}
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, CONCURRENT-2)
countingJobs := func(managerChan chan jobMessage, totalJobs, concurrentCheck int) (peakConcurrent, counterFailed int) {
counterEnded := 0
counterRunning := 0
peakConcurrent = 0
counterFailed = 0
for counterEnded < totalJobs {
msg := <-managerChan
switch msg.status {
case PreSyncing:
counterRunning++
case Syncing:
case Failed:
counterFailed++
fallthrough
case Success:
counterEnded++
counterRunning--
default:
So(0, ShouldEqual, 1)
}
// Test if semaphore works
So(counterRunning, ShouldBeLessThanOrEqualTo, concurrentCheck)
if counterRunning > peakConcurrent {
peakConcurrent = counterRunning
}
}
// select {
// case msg := <-managerChan:
// logger.Errorf("extra message received: %v", msg)
// So(0, ShouldEqual, 1)
// case <-time.After(2 * time.Second):
// }
return
}
Convey("When we run them all", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we cancel one job", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobRestart
time.Sleep(200 * time.Millisecond)
}
// Cancel the one waiting for semaphore
jobs[len(jobs)-1].ctrlChan <- jobStop
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT-1, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we override the concurrent limit", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(200 * time.Millisecond)
}
jobs[len(jobs)-1].ctrlChan <- jobForceStart
jobs[len(jobs)-2].ctrlChan <- jobForceStart
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT)
So(peakConcurrent, ShouldEqual, CONCURRENT)
So(counterFailed, ShouldEqual, 0)
time.Sleep(1 * time.Second)
// fmt.Println("Restart them")
for _, job := range jobs {
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed = countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
})
}

查看文件

@@ -14,12 +14,13 @@ import (
type logLimiter struct { type logLimiter struct {
emptyHook emptyHook
provider mirrorProvider
} }
func newLogLimiter(provider mirrorProvider) *logLimiter { func newLogLimiter(provider mirrorProvider) *logLimiter {
return &logLimiter{ return &logLimiter{
provider: provider, emptyHook: emptyHook{
provider: provider,
},
} }
} }

查看文件

@@ -24,9 +24,9 @@ type mirrorProvider interface {
Type() providerEnum Type() providerEnum
// run mirror job in background // Start then Wait
Run() error Run(started chan empty) error
// run mirror job in background // Start the job
Start() error Start() error
// Wait job to finish // Wait job to finish
Wait() error Wait() error
@@ -36,16 +36,23 @@ type mirrorProvider interface {
IsRunning() bool IsRunning() bool
// Cgroup // Cgroup
Cgroup() *cgroupHook Cgroup() *cgroupHook
// ZFS
ZFS() *zfsHook
// Docker
Docker() *dockerHook
AddHook(hook jobHook) AddHook(hook jobHook)
Hooks() []jobHook Hooks() []jobHook
Interval() time.Duration Interval() time.Duration
Retry() int
Timeout() time.Duration
WorkingDir() string WorkingDir() string
LogDir() string LogDir() string
LogFile() string LogFile() string
IsMaster() bool IsMaster() bool
DataSize() string
// enter context // enter context
EnterContext() *Context EnterContext() *Context
@@ -76,12 +83,18 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
} }
if mirrorDir == "" { if mirrorDir == "" {
mirrorDir = filepath.Join( mirrorDir = filepath.Join(
cfg.Global.MirrorDir, mirror.Name, cfg.Global.MirrorDir, mirror.MirrorSubDir, mirror.Name,
) )
} }
if mirror.Interval == 0 { if mirror.Interval == 0 {
mirror.Interval = cfg.Global.Interval mirror.Interval = cfg.Global.Interval
} }
if mirror.Retry == 0 {
mirror.Retry = cfg.Global.Retry
}
if mirror.Timeout == 0 {
mirror.Timeout = cfg.Global.Timeout
}
logDir = formatLogDir(logDir, mirror) logDir = formatLogDir(logDir, mirror)
// IsMaster // IsMaster
@@ -103,36 +116,46 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
upstreamURL: mirror.Upstream, upstreamURL: mirror.Upstream,
command: mirror.Command, command: mirror.Command,
workingDir: mirrorDir, workingDir: mirrorDir,
failOnMatch: mirror.FailOnMatch,
sizePattern: mirror.SizePattern,
logDir: logDir, logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"), logFile: filepath.Join(logDir, "latest.log"),
interval: time.Duration(mirror.Interval) * time.Minute, interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
env: mirror.Env, env: mirror.Env,
} }
p, err := newCmdProvider(pc) p, err := newCmdProvider(pc)
p.isMaster = isMaster
if err != nil { if err != nil {
panic(err) panic(err)
} }
p.isMaster = isMaster
provider = p provider = p
case provRsync: case provRsync:
rc := rsyncConfig{ rc := rsyncConfig{
name: mirror.Name, name: mirror.Name,
upstreamURL: mirror.Upstream, upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command, rsyncCmd: mirror.Command,
username: mirror.Username, username: mirror.Username,
password: mirror.Password, password: mirror.Password,
excludeFile: mirror.ExcludeFile, excludeFile: mirror.ExcludeFile,
workingDir: mirrorDir, extraOptions: mirror.RsyncOptions,
logDir: logDir, overriddenOptions: mirror.RsyncOverride,
logFile: filepath.Join(logDir, "latest.log"), rsyncEnv: mirror.Env,
useIPv6: mirror.UseIPv6, workingDir: mirrorDir,
interval: time.Duration(mirror.Interval) * time.Minute, logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
} }
p, err := newRsyncProvider(rc) p, err := newRsyncProvider(rc)
p.isMaster = isMaster
if err != nil { if err != nil {
panic(err) panic(err)
} }
p.isMaster = isMaster
provider = p provider = p
case provTwoStageRsync: case provTwoStageRsync:
rc := twoStageRsyncConfig{ rc := twoStageRsyncConfig{
@@ -143,17 +166,21 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
username: mirror.Username, username: mirror.Username,
password: mirror.Password, password: mirror.Password,
excludeFile: mirror.ExcludeFile, excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncEnv: mirror.Env,
workingDir: mirrorDir, workingDir: mirrorDir,
logDir: logDir, logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"), logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6, useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute, interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
} }
p, err := newTwoStageRsyncProvider(rc) p, err := newTwoStageRsyncProvider(rc)
p.isMaster = isMaster
if err != nil { if err != nil {
panic(err) panic(err)
} }
p.isMaster = isMaster
provider = p provider = p
default: default:
panic(errors.New("Invalid mirror provider")) panic(errors.New("Invalid mirror provider"))
@@ -162,10 +189,27 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
// Add Logging Hook // Add Logging Hook
provider.AddHook(newLogLimiter(provider)) provider.AddHook(newLogLimiter(provider))
// Add Cgroup Hook // Add ZFS Hook
if cfg.Cgroup.Enable { if cfg.ZFS.Enable {
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
}
// Add Btrfs Snapshot Hook
if cfg.BtrfsSnapshot.Enable {
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
}
// Add Docker Hook
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
} else if cfg.Cgroup.Enable {
// Add Cgroup Hook
provider.AddHook( provider.AddHook(
newCgroupHook(provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group), newCgroupHook(
provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
),
) )
} }

查看文件

@@ -5,6 +5,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"testing" "testing"
"time" "time"
@@ -27,6 +28,7 @@ func TestRsyncProvider(t *testing.T) {
logDir: tmpDir, logDir: tmpDir,
logFile: tmpFile, logFile: tmpFile,
useIPv6: true, useIPv6: true,
timeout: 100 * time.Second,
interval: 600 * time.Second, interval: 600 * time.Second,
} }
@@ -39,6 +41,7 @@ func TestRsyncProvider(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir) So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile) So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval) So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("When entering a context (auto exit)", func() { Convey("When entering a context (auto exit)", func() {
func() { func() {
@@ -73,34 +76,66 @@ func TestRsyncProvider(t *testing.T) {
echo "syncing to $(pwd)" echo "syncing to $(pwd)"
echo $RSYNC_PASSWORD $@ echo $RSYNC_PASSWORD $@
sleep 1 sleep 1
echo "Total file size: 1.33T bytes"
echo "Done" echo "Done"
exit 0 exit 0
` `
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf( expectedOutput := fmt.Sprintf(
"syncing to %s\n"+ "syncing to %s\n"+
"%s\n"+ "%s\n"+
"Total file size: 1.33T bytes\n"+
"Done\n", "Done\n",
provider.WorkingDir(), targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 %s %s", "--timeout=120 -6 %s %s",
provider.upstreamURL, provider.WorkingDir(), provider.upstreamURL, provider.WorkingDir(),
), ),
) )
err = provider.Run() err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
So(provider.DataSize(), ShouldEqual, "1.33T")
}) })
}) })
Convey("If the rsync program fails", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the rsyncProvider", func() {
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
extraOptions: []string{"--somethine-invalid"},
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
})
})
} }
func TestRsyncProviderWithAuthentication(t *testing.T) { func TestRsyncProviderWithAuthentication(t *testing.T) {
@@ -110,18 +145,21 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
So(err, ShouldBeNil) So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync") scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file") tmpFile := filepath.Join(tmpDir, "log_file")
proxyAddr := "127.0.0.1:1233"
c := rsyncConfig{ c := rsyncConfig{
name: "tuna", name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/", upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile, rsyncCmd: scriptFile,
username: "tunasync", username: "tunasync",
password: "tunasyncpassword", password: "tunasyncpassword",
workingDir: tmpDir, workingDir: tmpDir,
logDir: tmpDir, extraOptions: []string{"--delete-excluded"},
logFile: tmpFile, rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
useIPv6: true, logDir: tmpDir,
interval: 600 * time.Second, logFile: tmpFile,
useIPv4: true,
interval: 600 * time.Second,
} }
provider, err := newRsyncProvider(c) provider, err := newRsyncProvider(c)
@@ -136,7 +174,7 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Let's try a run", func() { Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
echo "syncing to $(pwd)" echo "syncing to $(pwd)"
echo $USER $RSYNC_PASSWORD $@ echo $USER $RSYNC_PASSWORD $RSYNC_PROXY $@
sleep 1 sleep 1
echo "Done" echo "Done"
exit 0 exit 0
@@ -144,20 +182,84 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf( expectedOutput := fmt.Sprintf(
"syncing to %s\n"+ "syncing to %s\n"+
"%s\n"+ "%s\n"+
"Done\n", "Done\n",
provider.WorkingDir(), targetDir,
fmt.Sprintf( fmt.Sprintf(
"%s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+ "%s %s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 %s %s", "--timeout=120 -4 --delete-excluded %s %s",
provider.username, provider.password, provider.upstreamURL, provider.WorkingDir(), provider.username, provider.password, proxyAddr,
provider.upstreamURL, provider.WorkingDir(),
), ),
) )
err = provider.Run() err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
})
})
}
func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
Convey("Rsync Provider with overridden options should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
extraOptions: []string{"--delete-excluded"},
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash
echo "syncing to $(pwd)"
echo $@
sleep 1
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"-aHvh --no-o --no-g --stats -6 --delete-excluded %s %s\n"+
"Done\n",
targetDir,
provider.upstreamURL,
provider.WorkingDir(),
)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
@@ -221,7 +323,7 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent)) So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run() err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil) So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
@@ -237,29 +339,146 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent)) So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run() err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)
}) })
Convey("If a long job is killed", func(ctx C) { Convey("If a long job is killed", func(ctx C) {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
sleep 5 sleep 10
` `
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
started := make(chan empty, 1)
go func() { go func() {
err = provider.Run() err := provider.Run(started)
ctx.So(err, ShouldNotBeNil) ctx.So(err, ShouldNotBeNil)
}() }()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
err = provider.Terminate() err = provider.Terminate()
So(err, ShouldBeNil) So(err, ShouldBeNil)
}) })
}) })
Convey("Command Provider without log file should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
c := cmdConfig{
name: "run-ls",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 600 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
So(provider.IsMaster(), ShouldEqual, false)
So(provider.ZFS(), ShouldBeNil)
So(provider.Type(), ShouldEqual, provCommand)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Run the command", func() {
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
})
Convey("Command Provider with RegExprs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "run-uptime",
upstreamURL: "http://mirrors.tuna.moe/",
command: "uptime",
failOnMatch: "",
sizePattern: "",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
}
Convey("when fail-on-match regexp matches", func() {
c.failOnMatch = `[a-z]+`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when fail-on-match regexp does not match", func() {
c.failOnMatch = `load average_`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
Convey("when fail-on-match regexp meets /dev/null", func() {
c.failOnMatch = `load average_`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
Convey("when size-pattern regexp matches", func() {
c.sizePattern = `load average: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldNotBeEmpty)
_, err = strconv.ParseFloat(provider.DataSize(), 32)
So(err, ShouldBeNil)
})
Convey("when size-pattern regexp does not match", func() {
c.sizePattern = `load ave: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when size-pattern regexp meets /dev/null", func() {
c.sizePattern = `load ave: ([\d\.]+)`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
})
} }
func TestTwoStageRsyncProvider(t *testing.T) { func TestTwoStageRsyncProvider(t *testing.T) {
@@ -280,6 +499,9 @@ func TestTwoStageRsyncProvider(t *testing.T) {
logFile: tmpFile, logFile: tmpFile,
useIPv6: true, useIPv6: true,
excludeFile: tmpFile, excludeFile: tmpFile,
extraOptions: []string{"--delete-excluded", "--cache"},
username: "hello",
password: "world",
} }
provider, err := newTwoStageRsyncProvider(c) provider, err := newTwoStageRsyncProvider(c)
@@ -303,9 +525,10 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
err = provider.Run() err = provider.Run(make(chan empty, 2))
So(err, ShouldBeNil) So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf( expectedOutput := fmt.Sprintf(
"syncing to %s\n"+ "syncing to %s\n"+
"%s\n"+ "%s\n"+
@@ -313,18 +536,18 @@ exit 0
"syncing to %s\n"+ "syncing to %s\n"+
"%s\n"+ "%s\n"+
"Done\n", "Done\n",
provider.WorkingDir(), targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+ "--timeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s", "--exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
), ),
provider.WorkingDir(), targetDir,
fmt.Sprintf( fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+ "--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 --exclude-from %s %s %s", "--timeout=120 --delete-excluded --cache -6 --exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
), ),
) )
@@ -338,32 +561,65 @@ exit 0
Convey("Try terminating", func(ctx C) { Convey("Try terminating", func(ctx C) {
scriptContent := `#!/bin/bash scriptContent := `#!/bin/bash
echo $@ echo $@
sleep 4 sleep 10
exit 0 exit 0
` `
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755) err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil) So(err, ShouldBeNil)
started := make(chan empty, 2)
go func() { go func() {
err = provider.Run() err := provider.Run(started)
ctx.So(err, ShouldNotBeNil) ctx.So(err, ShouldNotBeNil)
}() }()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
err = provider.Terminate() err = provider.Terminate()
So(err, ShouldBeNil) So(err, ShouldBeNil)
expectedOutput := fmt.Sprintf( expectedOutput := fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+ "-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+ "--timeout=120 --exclude dists/ -6 "+
"--exclude-from %s %s %s\n", "--exclude-from %s %s %s\n",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(), provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
) )
loggedContent, err := ioutil.ReadFile(provider.LogFile()) loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput) So(string(loggedContent), ShouldStartWith, expectedOutput)
// fmt.Println(string(loggedContent)) // fmt.Println(string(loggedContent))
}) })
}) })
Convey("If the rsync program fails", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the twoStageRsyncProvider", func() {
c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://0.0.0.1/",
stage1Profile: "debian",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
excludeFile: tmpFile,
}
provider, err := newTwoStageRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")
})
})
} }

查看文件

@@ -4,22 +4,30 @@ import (
"errors" "errors"
"strings" "strings"
"time" "time"
"github.com/tuna/tunasync/internal"
) )
type rsyncConfig struct { type rsyncConfig struct {
name string name string
rsyncCmd string rsyncCmd string
upstreamURL, username, password, excludeFile string upstreamURL, username, password, excludeFile string
extraOptions []string
overriddenOptions []string
rsyncEnv map[string]string
workingDir, logDir, logFile string workingDir, logDir, logFile string
useIPv6 bool useIPv6, useIPv4 bool
interval time.Duration interval time.Duration
retry int
timeout time.Duration
} }
// An RsyncProvider provides the implementation to rsync-based syncing jobs // An RsyncProvider provides the implementation to rsync-based syncing jobs
type rsyncProvider struct { type rsyncProvider struct {
baseProvider baseProvider
rsyncConfig rsyncConfig
options []string options []string
dataSize string
} }
func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) { func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
@@ -27,11 +35,16 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if !strings.HasSuffix(c.upstreamURL, "/") { if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /") return nil, errors.New("rsync upstream URL should ends with /")
} }
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &rsyncProvider{ provider := &rsyncProvider{
baseProvider: baseProvider{ baseProvider: baseProvider{
name: c.name, name: c.name,
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry,
timeout: c.timeout,
}, },
rsyncConfig: c, rsyncConfig: c,
} }
@@ -39,21 +52,38 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if c.rsyncCmd == "" { if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync" provider.rsyncCmd = "rsync"
} }
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
options := []string{ options := []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates", "--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120", "--contimeout=120", "--safe-links", "--timeout=120",
}
if c.overriddenOptions != nil {
options = c.overriddenOptions
} }
if c.useIPv6 { if c.useIPv6 {
options = append(options, "-6") options = append(options, "-6")
} else if c.useIPv4 {
options = append(options, "-4")
} }
if c.excludeFile != "" { if c.excludeFile != "" {
options = append(options, "--exclude-from", c.excludeFile) options = append(options, "--exclude-from", c.excludeFile)
} }
if c.extraOptions != nil {
options = append(options, c.extraOptions...)
}
provider.options = options provider.options = options
provider.ctx.Set(_WorkingDirKey, c.workingDir) provider.ctx.Set(_WorkingDirKey, c.workingDir)
@@ -71,28 +101,45 @@ func (p *rsyncProvider) Upstream() string {
return p.upstreamURL return p.upstreamURL
} }
func (p *rsyncProvider) Run() error { func (p *rsyncProvider) DataSize() string {
return p.dataSize
}
func (p *rsyncProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil { if err := p.Start(); err != nil {
return err return err
} }
return p.Wait() started <- empty{}
if err := p.Wait(); err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err
}
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil
} }
func (p *rsyncProvider) Start() error { func (p *rsyncProvider) Start() error {
p.Lock()
defer p.Unlock()
env := map[string]string{} if p.IsRunning() {
if p.username != "" { return errors.New("provider is currently running")
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
} }
command := []string{p.rsyncCmd} command := []string{p.rsyncCmd}
command = append(command, p.options...) command = append(command, p.options...)
command = append(command, p.upstreamURL, p.WorkingDir()) command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), env) p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(); err != nil { if err := p.prepareLogFile(false); err != nil {
return err return err
} }
@@ -100,5 +147,6 @@ func (p *rsyncProvider) Start() error {
return err return err
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil return nil
} }

查看文件

@@ -2,6 +2,7 @@ package worker
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"os/exec" "os/exec"
"strings" "strings"
@@ -9,6 +10,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/codeskyblue/go-sh"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@@ -31,11 +33,44 @@ type cmdJob struct {
func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob { func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob {
var cmd *exec.Cmd var cmd *exec.Cmd
if provider.Cgroup() != nil { if d := provider.Docker(); d != nil {
c := "docker"
args := []string{
"run", "--rm",
"-a", "STDOUT", "-a", "STDERR",
"--name", d.Name(),
"-w", workingDir,
}
// specify user
args = append(
args, "-u",
fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid()),
)
// add volumes
for _, vol := range d.Volumes() {
logger.Debugf("volume: %s", vol)
args = append(args, "-v", vol)
}
// set env
for k, v := range env {
kv := fmt.Sprintf("%s=%s", k, v)
args = append(args, "-e", kv)
}
// apply options
args = append(args, d.options...)
// apply image and command
args = append(args, d.image)
// apply command
args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...)
} else if provider.Cgroup() != nil {
c := "cgexec" c := "cgexec"
args := []string{"-g", provider.Cgroup().Cgroup()} args := []string{"-g", provider.Cgroup().Cgroup()}
args = append(args, cmdAndArgs...) args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...) cmd = exec.Command(c, args...)
} else { } else {
if len(cmdAndArgs) == 1 { if len(cmdAndArgs) == 1 {
cmd = exec.Command(cmdAndArgs[0]) cmd = exec.Command(cmdAndArgs[0])
@@ -48,25 +83,28 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
} }
} }
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir) if provider.Docker() == nil {
if _, err := os.Stat(workingDir); os.IsNotExist(err) { logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
logger.Debugf("Making dir %s", workingDir) if _, err := os.Stat(workingDir); os.IsNotExist(err) {
if err = os.MkdirAll(workingDir, 0755); err != nil { logger.Debugf("Making dir %s", workingDir)
logger.Errorf("Error making dir %s", workingDir) if err = os.MkdirAll(workingDir, 0755); err != nil {
logger.Errorf("Error making dir %s: %s", workingDir, err.Error())
}
} }
cmd.Dir = workingDir
cmd.Env = newEnviron(env, true)
} }
cmd.Dir = workingDir
cmd.Env = newEnviron(env, true)
return &cmdJob{ return &cmdJob{
cmd: cmd, cmd: cmd,
workingDir: workingDir, workingDir: workingDir,
env: env, env: env,
provider: provider,
} }
} }
func (c *cmdJob) Start() error { func (c *cmdJob) Start() error {
logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1) c.finished = make(chan empty, 1)
return c.cmd.Start() return c.cmd.Start()
} }
@@ -95,6 +133,14 @@ func (c *cmdJob) Terminate() error {
if c.cmd == nil || c.cmd.Process == nil { if c.cmd == nil || c.cmd.Process == nil {
return errProcessNotStarted return errProcessNotStarted
} }
if d := c.provider.Docker(); d != nil {
sh.Command(
"docker", "stop", "-t", "2", d.Name(),
).Run()
return nil
}
err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM) err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM)
if err != nil { if err != nil {
return err return err

查看文件

@@ -15,6 +15,11 @@ type scheduleQueue struct {
jobs map[string]bool jobs map[string]bool
} }
type jobScheduleInfo struct {
jobName string
nextScheduled time.Time
}
func timeLessThan(l, r interface{}) bool { func timeLessThan(l, r interface{}) bool {
tl := l.(time.Time) tl := l.(time.Time)
tr := r.(time.Time) tr := r.(time.Time)
@@ -28,6 +33,20 @@ func newScheduleQueue() *scheduleQueue {
return queue return queue
} }
func (q *scheduleQueue) GetJobs() (jobs []jobScheduleInfo) {
cur := q.list.Iterator()
defer cur.Close()
for cur.Next() {
cj := cur.Value().(*mirrorJob)
jobs = append(jobs, jobScheduleInfo{
cj.Name(),
cur.Key().(time.Time),
})
}
return
}
func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) { func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) {
q.Lock() q.Lock()
defer q.Unlock() defer q.Unlock()

查看文件

@@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"strings" "strings"
"time" "time"
"github.com/tuna/tunasync/internal"
) )
type twoStageRsyncConfig struct { type twoStageRsyncConfig struct {
@@ -12,9 +14,13 @@ type twoStageRsyncConfig struct {
rsyncCmd string rsyncCmd string
stage1Profile string stage1Profile string
upstreamURL, username, password, excludeFile string upstreamURL, username, password, excludeFile string
extraOptions []string
rsyncEnv map[string]string
workingDir, logDir, logFile string workingDir, logDir, logFile string
useIPv6 bool useIPv6 bool
interval time.Duration interval time.Duration
retry int
timeout time.Duration
} }
// An RsyncProvider provides the implementation to rsync-based syncing jobs // An RsyncProvider provides the implementation to rsync-based syncing jobs
@@ -23,6 +29,7 @@ type twoStageRsyncProvider struct {
twoStageRsyncConfig twoStageRsyncConfig
stage1Options []string stage1Options []string
stage2Options []string stage2Options []string
dataSize string
} }
var rsyncStage1Profiles = map[string]([]string){ var rsyncStage1Profiles = map[string]([]string){
@@ -38,27 +45,41 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
if !strings.HasSuffix(c.upstreamURL, "/") { if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /") return nil, errors.New("rsync upstream URL should ends with /")
} }
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &twoStageRsyncProvider{ provider := &twoStageRsyncProvider{
baseProvider: baseProvider{ baseProvider: baseProvider{
name: c.name, name: c.name,
ctx: NewContext(), ctx: NewContext(),
interval: c.interval, interval: c.interval,
retry: c.retry,
timeout: c.timeout,
}, },
twoStageRsyncConfig: c, twoStageRsyncConfig: c,
stage1Options: []string{ stage1Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--safe-links", "--timeout=120", "--contimeout=120", "--safe-links", "--timeout=120",
}, },
stage2Options: []string{ stage2Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats", "-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates", "--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120", "--contimeout=120", "--safe-links", "--timeout=120",
}, },
} }
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
if c.rsyncCmd == "" { if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync" provider.rsyncCmd = "rsync"
} }
@@ -78,6 +99,10 @@ func (p *twoStageRsyncProvider) Upstream() string {
return p.upstreamURL return p.upstreamURL
} }
func (p *twoStageRsyncProvider) DataSize() string {
return p.dataSize
}
func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) { func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
var options []string var options []string
if stage == 1 { if stage == 1 {
@@ -92,6 +117,9 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
} else if stage == 2 { } else if stage == 2 {
options = append(options, p.stage2Options...) options = append(options, p.stage2Options...)
if p.extraOptions != nil {
options = append(options, p.extraOptions...)
}
} else { } else {
return []string{}, fmt.Errorf("Invalid stage: %d", stage) return []string{}, fmt.Errorf("Invalid stage: %d", stage)
} }
@@ -107,17 +135,15 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return options, nil return options, nil
} }
func (p *twoStageRsyncProvider) Run() error { func (p *twoStageRsyncProvider) Run(started chan empty) error {
defer p.Wait() p.Lock()
defer p.Unlock()
env := map[string]string{} if p.IsRunning() {
if p.username != "" { return errors.New("provider is currently running")
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
} }
p.dataSize = ""
stages := []int{1, 2} stages := []int{1, 2}
for _, stage := range stages { for _, stage := range stages {
command := []string{p.rsyncCmd} command := []string{p.rsyncCmd}
@@ -128,21 +154,33 @@ func (p *twoStageRsyncProvider) Run() error {
command = append(command, options...) command = append(command, options...)
command = append(command, p.upstreamURL, p.WorkingDir()) command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), env) p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(); err != nil { if err := p.prepareLogFile(stage > 1); err != nil {
return err return err
} }
defer p.closeLogFile()
if err = p.cmd.Start(); err != nil { if err = p.cmd.Start(); err != nil {
return err return err
} }
p.isRunning.Store(true) p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
started <- empty{}
err = p.cmd.Wait() p.Unlock()
p.isRunning.Store(false) err = p.Wait()
p.Lock()
if err != nil { if err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err return err
} }
} }
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil return nil
} }

查看文件

@@ -12,8 +12,6 @@ import (
. "github.com/tuna/tunasync/internal" . "github.com/tuna/tunasync/internal"
) )
var tunasyncWorker *Worker
// A Worker is a instance of tunasync worker // A Worker is a instance of tunasync worker
type Worker struct { type Worker struct {
L sync.Mutex L sync.Mutex
@@ -29,10 +27,11 @@ type Worker struct {
httpClient *http.Client httpClient *http.Client
} }
// GetTUNASyncWorker returns a singalton worker // NewTUNASyncWorker creates a worker
func GetTUNASyncWorker(cfg *Config) *Worker { func NewTUNASyncWorker(cfg *Config) *Worker {
if tunasyncWorker != nil {
return tunasyncWorker if cfg.Global.Retry == 0 {
cfg.Global.Retry = defaultMaxRetry
} }
w := &Worker{ w := &Worker{
@@ -55,12 +54,8 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
w.httpClient = httpClient w.httpClient = httpClient
} }
if cfg.Cgroup.Enable {
initCgroup(cfg.Cgroup.BasePath)
}
w.initJobs() w.initJobs()
w.makeHTTPServer() w.makeHTTPServer()
tunasyncWorker = w
return w return w
} }
@@ -222,7 +217,11 @@ func (w *Worker) makeHTTPServer() {
} }
switch cmd.Cmd { switch cmd.Cmd {
case CmdStart: case CmdStart:
job.ctrlChan <- jobStart if cmd.Options["force"] {
job.ctrlChan <- jobForceStart
} else {
job.ctrlChan <- jobStart
}
case CmdRestart: case CmdRestart:
job.ctrlChan <- jobRestart job.ctrlChan <- jobRestart
case CmdStop: case CmdStop:
@@ -309,6 +308,9 @@ func (w *Worker) runSchedule() {
w.L.Unlock() w.L.Unlock()
schedInfo := w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
tick := time.Tick(5 * time.Second) tick := time.Tick(5 * time.Second)
for { for {
select { select {
@@ -345,6 +347,9 @@ func (w *Worker) runSchedule() {
w.schedule.AddJob(schedTime, job) w.schedule.AddJob(schedTime, job)
} }
schedInfo = w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
case <-tick: case <-tick:
// check schedule every 5 seconds // check schedule every 5 seconds
if job := w.schedule.Pop(); job != nil { if job := w.schedule.Pop(); job != nil {
@@ -389,28 +394,21 @@ func (w *Worker) URL() string {
} }
func (w *Worker) registorWorker() { func (w *Worker) registorWorker() {
url := fmt.Sprintf(
"%s/workers",
w.cfg.Manager.APIBase,
)
msg := WorkerStatus{ msg := WorkerStatus{
ID: w.Name(), ID: w.Name(),
URL: w.URL(), URL: w.URL(),
} }
if _, err := PostJSON(url, msg, w.httpClient); err != nil { for _, root := range w.cfg.Manager.APIBaseList() {
logger.Errorf("Failed to register worker") url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
}
} }
} }
func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) { func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s",
w.cfg.Manager.APIBase,
w.Name(),
jobMsg.name,
)
p := job.provider p := job.provider
smsg := MirrorStatus{ smsg := MirrorStatus{
Name: jobMsg.name, Name: jobMsg.name,
@@ -422,19 +420,49 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
ErrorMsg: jobMsg.msg, ErrorMsg: jobMsg.msg,
} }
if _, err := PostJSON(url, smsg, w.httpClient); err != nil { // Certain Providers (rsync for example) may know the size of mirror,
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error()) // so we report it to Manager here
if len(job.size) != 0 {
smsg.Size = job.size
}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
)
logger.Debugf("reporting on manager url: %s", url)
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
}
}
}
func (w *Worker) updateSchedInfo(schedInfo []jobScheduleInfo) {
var s []MirrorSchedule
for _, sched := range schedInfo {
s = append(s, MirrorSchedule{
MirrorName: sched.jobName,
NextSchedule: sched.nextScheduled,
})
}
msg := MirrorSchedules{Schedules: s}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/schedules", root, w.Name(),
)
logger.Debugf("reporting on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to upload schedules: %s", err.Error())
}
} }
} }
func (w *Worker) fetchJobStatus() []MirrorStatus { func (w *Worker) fetchJobStatus() []MirrorStatus {
var mirrorList []MirrorStatus var mirrorList []MirrorStatus
apiBase := w.cfg.Manager.APIBaseList()[0]
url := fmt.Sprintf( url := fmt.Sprintf("%s/workers/%s/jobs", apiBase, w.Name())
"%s/workers/%s/jobs",
w.cfg.Manager.APIBase,
w.Name(),
)
if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil { if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
logger.Errorf("Failed to fetch job status: %s", err.Error()) logger.Errorf("Failed to fetch job status: %s", err.Error())

255
worker/worker_test.go 普通文件
查看文件

@@ -0,0 +1,255 @@
package worker
import (
"net/http"
"strconv"
"testing"
"time"
"github.com/gin-gonic/gin"
. "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal"
)
type workTestFunc func(*Worker)
var managerPort = 5001
var workerPort = 5002
func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"_infoKey": "pong"})
})
r.POST("/workers", func(c *gin.Context) {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
recvData <- _worker
c.JSON(http.StatusOK, _worker)
})
r.POST("/workers/dut/schedules", func(c *gin.Context) {
var _sch MirrorSchedules
c.BindJSON(&_sch)
recvData <- _sch
c.JSON(http.StatusOK, empty{})
})
r.POST("/workers/dut/jobs/:job", func(c *gin.Context) {
var status MirrorStatus
c.BindJSON(&status)
recvData <- status
c.JSON(http.StatusOK, status)
})
r.GET("/workers/dut/jobs", func(c *gin.Context) {
mirrorStatusList := []MirrorStatus{}
c.JSON(http.StatusOK, mirrorStatusList)
})
return r
}
func startWorkerThenStop(cfg *Config, tester workTestFunc) {
exitedChan := make(chan int)
w := NewTUNASyncWorker(cfg)
So(w, ShouldNotBeNil)
go func() {
w.Run()
exitedChan <- 1
}()
tester(w)
w.Halt()
select {
case exited := <-exitedChan:
So(exited, ShouldEqual, 1)
case <-time.After(2 * time.Second):
So(0, ShouldEqual, 1)
}
}
func sendCommandToWorker(workerURL string, httpClient *http.Client, cmd CmdVerb, mirror string) {
workerCmd := WorkerCmd{
Cmd: cmd,
MirrorID: mirror,
}
logger.Debugf("POST to %s with cmd %s", workerURL, cmd)
_, err := PostJSON(workerURL, workerCmd, httpClient)
So(err, ShouldBeNil)
}
func TestWorker(t *testing.T) {
InitLogger(false, true, false)
recvDataChan := make(chan interface{})
_s := makeMockManagerServer(recvDataChan)
httpServer := &http.Server{
Addr: "localhost:" + strconv.Itoa(managerPort),
Handler: _s,
ReadTimeout: 2 * time.Second,
WriteTimeout: 2 * time.Second,
}
go func() {
err := httpServer.ListenAndServe()
So(err, ShouldBeNil)
}()
// Wait for http server starting
time.Sleep(500 * time.Millisecond)
Convey("Worker should work", t, func(ctx C) {
httpClient, err := CreateHTTPClient("")
So(err, ShouldBeNil)
workerPort++
workerCfg := Config{
Global: globalConfig{
Name: "dut",
LogDir: "/tmp",
MirrorDir: "/tmp",
Concurrent: 2,
Interval: 1,
},
Server: serverConfig{
Hostname: "localhost",
Addr: "127.0.0.1",
Port: workerPort,
},
Manager: managerConfig{
APIBase: "http://localhost:" + strconv.Itoa(managerPort),
},
}
logger.Debugf("worker port %d", workerPort)
Convey("with no job", func(ctx C) {
dummyTester := func(*Worker) {
registered := false
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
registered = true
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(reg.URL, httpClient, CmdStart, "foobar")
} else if sch, ok := data.(MirrorSchedules); ok {
So(len(sch.Schedules), ShouldEqual, 0)
}
case <-time.After(2 * time.Second):
So(registered, ShouldBeTrue)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with one job", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
mirrorConfig{
Name: "job-ls",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
jobRunning := false
lastStatus := SyncStatus(None)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-ls")
} else if sch, ok := data.(MirrorSchedules); ok {
if !jobRunning {
So(len(sch.Schedules), ShouldEqual, 1)
So(sch.Schedules[0].MirrorName, ShouldEqual, "job-ls")
So(sch.Schedules[0].NextSchedule,
ShouldHappenBetween,
time.Now().Add(-2*time.Second),
time.Now().Add(1*time.Minute))
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning = status.Status == PreSyncing || status.Status == Syncing
So(status.Status, ShouldNotEqual, Failed)
lastStatus = status.Status
}
case <-time.After(2 * time.Second):
So(url, ShouldNotEqual, "")
So(jobRunning, ShouldBeFalse)
So(lastStatus, ShouldEqual, Success)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with several jobs", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
mirrorConfig{
Name: "job-ls-1",
Provider: provCommand,
Command: "ls",
},
mirrorConfig{
Name: "job-fail",
Provider: provCommand,
Command: "non-existent-command-xxxx",
},
mirrorConfig{
Name: "job-ls-2",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
lastStatus := make(map[string]SyncStatus)
nextSch := make(map[string]time.Time)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-fail")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-1")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-2")
} else if sch, ok := data.(MirrorSchedules); ok {
//So(len(sch.Schedules), ShouldEqual, 3)
for _, item := range sch.Schedules {
nextSch[item.MirrorName] = item.NextSchedule
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning := status.Status == PreSyncing || status.Status == Syncing
if !jobRunning {
if status.Name == "job-fail" {
So(status.Status, ShouldEqual, Failed)
} else {
So(status.Status, ShouldNotEqual, Failed)
}
}
lastStatus[status.Name] = status.Status
}
case <-time.After(2 * time.Second):
So(len(lastStatus), ShouldEqual, 3)
So(len(nextSch), ShouldEqual, 3)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
})
}

54
worker/zfs_hook.go 普通文件
查看文件

@@ -0,0 +1,54 @@
package worker
import (
"fmt"
"os"
"os/user"
"strings"
"github.com/codeskyblue/go-sh"
)
type zfsHook struct {
emptyHook
zpool string
}
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
return &zfsHook{
emptyHook: emptyHook{
provider: provider,
},
zpool: zpool,
}
}
func (z *zfsHook) printHelpMessage() {
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
zfsDataset = strings.ToLower(zfsDataset)
workingDir := z.provider.WorkingDir()
logger.Infof("You may create the ZFS dataset with:")
logger.Infof(" zfs create '%s'", zfsDataset)
logger.Infof(" zfs set mountpoint='%s' '%s'", workingDir, zfsDataset)
usr, err := user.Current()
if err != nil || usr.Uid == "0" {
return
}
logger.Infof(" chown %s '%s'", usr.Uid, workingDir)
}
// check if working directory is a zfs dataset
func (z *zfsHook) preJob() error {
workingDir := z.provider.WorkingDir()
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
logger.Errorf("Directory %s doesn't exist", workingDir)
z.printHelpMessage()
return err
}
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
logger.Errorf("%s is not a mount point", workingDir)
z.printHelpMessage()
return err
}
return nil
}

48
worker/zfs_hook_test.go 普通文件
查看文件

@@ -0,0 +1,48 @@
package worker
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestZFSHook(t *testing.T) {
Convey("ZFS Hook should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "tuna_zfs_hook_test",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
Convey("When working directory doesn't exist", func(ctx C) {
errRm := os.RemoveAll(tmpDir)
So(errRm, ShouldBeNil)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
Convey("When working directory is not a mount point", func(ctx C) {
defer os.RemoveAll(tmpDir)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
})
}