1
0
镜像自地址 https://github.com/tuna/tunasync.git 已同步 2025-12-06 14:36:47 +00:00

154 次代码提交

作者 SHA1 备注 提交日期
z4yx
2afe1f2e06 bump version to 0.6.6 2020-06-17 22:12:11 +08:00
z4yx
1b099520b2 [manager] protect DB with RW lock 2020-06-17 22:10:39 +08:00
z4yx
85b2105a2b [worker] retry registration 2020-06-17 21:34:55 +08:00
zyx
45e5d900fb bump version to 0.6.5 2020-06-08 22:30:28 +08:00
zyx
7b0cd490b7 fix misuse of a variable 2020-06-08 22:23:12 +08:00
zyx
9178966aed bump version to 0.6.4 2020-06-04 09:44:17 +08:00
zyx
b5d2a0ad89 bug fix: jobs not being scheduled after timeout 2020-06-04 09:37:20 +08:00
zyx
d8963c9946 test rsync inside a Docker container 2020-06-03 21:51:04 +08:00
zyx
198afa72cd bug fix: rsync can access the exclude file in Docker (close #59) 2020-06-03 21:50:38 +08:00
zyx
85ce9c1270 wait for docker container removal 2020-06-03 19:47:14 +08:00
zyx
a8a35fc259 Merge branch 'master' of github.com:tuna/tunasync 2020-06-03 13:28:58 +08:00
zyx
c00eb12a75 Two new options for rsync provider
- rsync_no_timeout=true/false # disable --timeout option
- rsync_timeout=n # set --timeout=n
related to issue #121
2020-06-03 13:26:49 +08:00
Yuxiang Zhang
95ae9c16a9 Update workers.conf 2020-06-01 16:59:44 +08:00
zyx
0392ef28c7 bump version to 0.6.3 2020-05-25 19:21:27 +08:00
zyx
b2a22a9bbc update editor config 2020-05-25 19:16:53 +08:00
zyx
31862210ba implement the timeout 2020-05-25 19:15:05 +08:00
zyx
e47ba2097e add a timeout field to providers 2020-05-25 18:24:05 +08:00
zyx
e8c7ff3d7f config items of timeout 2020-05-25 18:08:31 +08:00
Yuxiang Zhang
7e7b469f1e Update workers.conf 2020-05-23 15:28:32 +08:00
Yuxiang Zhang
eac66c7554 add config examples of the worker (#118) 2020-05-23 15:23:15 +08:00
z4yx
38b0156fae [bug fix] provider is not terminated if premature stop command received 2020-05-09 18:42:54 +08:00
z4yx
c8e7d29f34 bump version to 0.6.2 2020-04-08 20:12:41 +08:00
Yuxiang Zhang
d40638d738 Merge pull request #116 from BITNP/laststarted
Add MirrorStatus.LastStarted property
2020-04-06 23:01:58 +08:00
Phy
471d865042 Add LastStarted test case 2020-04-05 01:07:46 -04:00
Phy
c1641b6714 Add MirrorStatus.LastStarted property
- status.Status is in PreSyncing, and
- curStatus.Status is not in PreSyncing
2020-04-05 00:12:10 -04:00
z4yx
b8edc1f714 bump version to 0.6 2020-03-29 12:48:29 +08:00
z4yx
001703a059 CI runs slower, give it more time 2020-03-29 12:01:39 +08:00
z4yx
2bbd4afda8 remove logger.Error() 2020-03-29 11:54:39 +08:00
z4yx
e8e6ab6ed6 Merge branch 'wip-newlog' 2020-03-29 11:47:53 +08:00
Yuxiang Zhang
3fed3f1cf3 Merge pull request #114 from tuna/nest_mirror
Support nested mirror config
2020-03-29 11:44:40 +08:00
z4yx
1491b6c42b format the code 2020-03-29 09:06:19 +08:00
Miao Wang
7a9895350b Support nested mirror config 2020-03-29 00:24:58 +08:00
z4yx
95d6acb026 tunasynctl: print command results with plain text instead of logging messages 2020-03-28 17:07:53 +08:00
z4yx
b132192448 Add a debugging log level to tunasynctl 2020-03-28 16:33:56 +08:00
z4yx
91209cab60 translate rsync exit code to error message (solve #20). May help #109 and #110 2020-03-28 16:26:40 +08:00
z4yx
1fb9f85862 closing log files where they were opened 2020-03-28 16:26:40 +08:00
Yuxiang Zhang
d10387e40b Merge pull request #112 from BITNP/cli-logging
Use proper logging for some debug output
2020-03-23 22:21:08 +08:00
Phy
5c01e3fa22 Use fmt.Println for cli JSON output 2020-03-23 10:19:55 -04:00
Phy
a44891d3e8 Set proper logging level on tunasynctl-cmd 2020-03-23 01:21:16 -04:00
Phy
4d461bd172 Use logger to print some debug messages than fmt.print 2020-03-23 01:20:49 -04:00
zyx
c5ed682a49 Bump version to 0.5.1 2020-03-20 10:39:34 +08:00
zyx
2c33380ce0 fix util_test 2020-03-20 10:35:54 +08:00
zyx
70cb22096f Merge branch 'master' of github.ip4.run:tuna/tunasync 2020-03-20 10:30:53 +08:00
zyx
b1f2679fbf [cmd provider] add support of match size in logs 2020-03-20 10:30:44 +08:00
Yuxiang Zhang
92a255fd3c Update tunasync.yml 2020-03-16 22:43:41 +08:00
zyx
aee1a705b7 remove "--contimeout=120" from default rsync options 2020-03-16 22:23:47 +08:00
zyx
c99916cc2a Bump version to 0.4.3 2020-03-16 22:03:40 +08:00
zyx
9eb72c5db0 fix misuse of variables 2020-03-16 21:59:34 +08:00
z4yx
b490c22984 add test of rsyncEnv 2020-03-16 21:16:23 +08:00
z4yx
ae5ff25d20 in case rsyncEnv is nil 2020-03-16 21:11:15 +08:00
z4yx
365f49e6d3 add support of env config for rsync provider 2020-03-16 20:59:08 +08:00
z4yx
fddb793ca1 v0.4.2 2020-03-14 11:30:44 +08:00
z4yx
c41d7a4038 Bring docker test back
commit 4540ba24c72cb2d24e2e04870025dfa233dedf30
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 11:16:13 2020 +0800

    wait longer

commit c8f07b81a7fe5fdef9224e8bc187500c4d67f049
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:55:49 2020 +0800

    try to Terminate

commit 10d2d4b9d0756cf8f60fe27e1e41ae29b5ea6cbe
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:50:26 2020 +0800

    forward the error

commit 38c96ee44d31088b9e6de67ebb745358fac8d49a
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:31:39 2020 +0800

    now enable the assertion

commit 3b3c46a065a035d906d4cc5022d42e30b1f52a08
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:26:40 2020 +0800

    rm un-related info

commit dd7ef7e3d0a0765c1fc48296d70966b3b4d581dd
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 10:12:01 2020 +0800

    print err of provider.Run

commit 49a7b57dbf52d410c0dfe796be9c2f6213884931
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:55:48 2020 +0800

    wait until it exits

commit a3e8f699072e3252b3300c667f1425a966aedb39
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:54:19 2020 +0800

    targeting alpine:3.8

commit f30b8565049bb373a1a91a34ad07c8c3df8e1036
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:47:27 2020 +0800

    see what happens

commit 8c21229a8be8e2ac0737bbc4bb88ba54e9fb7a20
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Sat Mar 14 09:30:50 2020 +0800

    remove one assertion

commit 123368e6ef07aa63c489bb49bdf370d3abdd17bb
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:32:45 2020 +0800

    docker test somehow works now

commit 94fa294a9bbedb569e6dd9cc7e4f27e73ed97443
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:27:12 2020 +0800

    should use -d

commit b35bae2a9cb5e006c513da95377ab9487fc4341a
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:22:25 2020 +0800

    docker run not working??

commit 9aea0036f434d333087f0cff3ce5165a53554e5f
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 23:12:39 2020 +0800

    test if docker works

commit f92578b159587a8bbda296bbf9261fb4c5e2f186
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 17:42:00 2020 +0800

    debugging docker_test

commit b649e32f76549711af597ce3a642309a41a08bf9
Author: z4yx <z4yx@users.noreply.github.com>
Date:   Fri Mar 13 17:27:55 2020 +0800

    Revert "remove docker_test.go"

    This reverts commit a517a4bb64.
2020-03-14 11:23:19 +08:00
z4yx
8b0ef2bb53 fix the test 2020-03-14 11:11:10 +08:00
z4yx
b25be80670 extra options should only be applied to the second stage 2020-03-14 11:01:34 +08:00
z4yx
07cb51076e Bump version to 0.4.1 2020-03-13 17:53:28 +08:00
z4yx
3a2888dd5d Bye Travis! 2020-03-13 17:24:57 +08:00
z4yx
ada881850a maybe we don't have to install docker.io 2020-03-13 17:17:02 +08:00
z4yx
6f51188021 create release on tags 2020-03-13 17:14:49 +08:00
z4yx
a517a4bb64 remove docker_test.go 2020-03-13 17:08:39 +08:00
z4yx
b816803eaf Revert "disable docker_test"
This reverts commit 6d17d6b4ca.
2020-03-13 17:08:19 +08:00
z4yx
6d17d6b4ca disable docker_test 2020-03-13 17:04:36 +08:00
z4yx
51e7f1d573 add TestRsyncProviderWithOverriddenOptions 2020-03-13 17:02:38 +08:00
z4yx
c99095267e [docker test] Wait for docker running 2020-03-13 16:57:07 +08:00
z4yx
5c140035ec [worker teset] Wait for http server starting 2020-03-13 16:41:30 +08:00
z4yx
6ef9ccdfe6 unmask docker 2020-03-13 16:00:55 +08:00
z4yx
8df5e41d5b systemctl start docker 2020-03-13 15:57:36 +08:00
z4yx
a38a88cf41 run apt update 2020-03-13 15:49:39 +08:00
z4yx
f603aebec9 add test steps 2020-03-13 15:46:56 +08:00
z4yx
80ad3247a0 fix build 2020-03-13 15:16:39 +08:00
z4yx
02468e21c0 add an option "rsync_override" 2020-03-13 15:12:52 +08:00
z4yx
d48815b817 update the condition 2020-03-13 15:06:26 +08:00
z4yx
07cd7b5f1f update go.mod 2020-03-13 15:04:06 +08:00
z4yx
3f45e8b02b Merge commit 'ad28e8aacc124ffb751cc77a5e3c3a3ad8d0a97c' into wip-ga 2020-03-13 15:03:39 +08:00
Yuxiang Zhang
ed1f20b1e6 Hello Github Actions 2020-03-13 14:59:56 +08:00
z4yx
ad28e8aacc using Go Modules 2020-03-13 13:55:57 +08:00
Yuxiang Zhang
230d63e871 Merge pull request #108 from tuna/wip-fail-on-match (close #87)
[mirror config] job fails on pattern match in log
2020-03-09 21:54:34 +08:00
z4yx
908f098c72 [mirror config] job fails on pattern match in log 2020-03-09 21:48:06 +08:00
Yuxiang Zhang
22cfdfc9c2 [mirror config] extra rsync options (#107)
* support "rsync_options" array in config

* add test for new options

* fix tests
2020-03-09 20:48:09 +08:00
z4yx
36010dc33e Merge branch 'master' of github.com:tuna/tunasync 2019-11-30 00:08:59 +08:00
z4yx
bc416a6088 add tests for post-hooks 2019-11-30 00:08:46 +08:00
z4yx
a065a11b38 change timeout in tests 2019-11-07 12:29:57 +08:00
z4yx
b4fe4db82a Merge remote-tracking branch 'origin/dev' 2019-11-04 23:11:34 +08:00
z4yx
839363aaaa reschedule the job if any hook fails 2019-11-04 22:52:03 +08:00
Yuxiang Zhang
08aee8eb42 Merge pull request #98 from ziqin/feature/btrfs-snapshot
Reimplement Btrfs snapshots hook
2019-08-31 10:57:44 +08:00
Jeeken Wang
501f77ee41 Merge branch 'master' into feature/btrfs-snapshot 2019-08-15 01:26:28 +08:00
z4yx
9e91fd706e Merge branch 'dev' 2019-08-13 23:10:43 +08:00
z4yx
94cf0b4bdb fix possible null dereferencing, reported by #96 2019-08-13 23:07:01 +08:00
WANG Ziqin
8fd2059013 add doc for setup btrfs snapshots 2019-08-02 13:31:33 +08:00
WANG Ziqin
6b56c4254c feat(btrfs_snapshot_hook): reimplemented Btrfs snapshots
TODO: test coverage
2019-08-02 13:31:33 +08:00
Yuxiang Zhang
3872c41607 Merge pull request #97 from ziqin/master
Refine: remove outer `provider`s which shadow the embedded `provider`s provided by `emptyHook`
2019-08-02 09:27:04 +08:00
WANG Ziqin
30259da0f0 fix nil pointer dereference: check err first 2019-08-02 02:15:22 +08:00
WANG Ziqin
4854d9b981 Fix test: initialize dockerHook with embedded provider 2019-07-31 17:29:28 +08:00
WANG Ziqin
06fce98c00 Eliminate duplicate mirrorProvider in Hooks 2019-07-31 16:11:56 +08:00
Jeeken Wang
8408236646 Update "Job Run Process" diagram according to runJobWrapper 2019-07-31 12:26:09 +08:00
z4yx
540eea8aeb set golang version to 1.11 2019-07-05 16:54:29 +08:00
z4yx
a6fc97889d [bug fix] stalled scheduler if post-sync hook runs for a time which is longer than the sync interval 2019-07-05 16:29:00 +08:00
Yuxiang Zhang
5f7d974469 Merge pull request #93 from vgxbj/patch-1
Fix ascii chart for `Job Run Process`
2019-05-30 10:16:22 +08:00
Guō Xīng
3b52f93e7e Fix ascii chart for Job Run Process 2019-05-29 14:32:50 +08:00
zyx
1025189542 fix possible null dereferencing in server_test 2019-04-13 11:13:17 +08:00
zyx
9f91d90fc5 check Retry configuration in providers 2019-04-13 11:01:56 +08:00
zyx
1aa4ae9cc1 Merge remote-tracking branch 'kinosang/master' into wip-test-pr 2019-04-13 02:07:41 +08:00
zyx
d0deeb19a9 extract mirror size from rsync provider automatically 2019-04-13 01:27:35 +08:00
zyx
a283328dc4 increase test converage of worker 2019-04-12 09:43:57 +08:00
zyx
1890bbed3c add tests for last commit 2019-04-11 12:36:43 +08:00
zyx
ddc9efd155 report next scheduled sync time 2019-04-11 12:36:18 +08:00
zyx
7eb119b892 singleton of worker is not used, so remove it 2019-04-11 10:07:42 +08:00
zyx
96f11f57ed throw an error if executing reload command without worker id 2019-04-09 22:30:08 +08:00
Yuxiang Zhang
3e6e6f9b14 Update tips.md 2019-04-07 21:48:57 +08:00
Yuxiang Zhang
b06cadfe06 Update tips.md 2019-04-07 21:48:00 +08:00
Yuxiang Zhang
9c34372ae4 add link to tips.md 2019-04-07 21:35:40 +08:00
Yuxiang Zhang
ebbfff40f6 Merge pull request #91 from SCU-MingYuan/master
Added some control tips
2019-04-07 21:33:33 +08:00
GaryH4
5eeade22fc Update tips.md 2019-04-07 19:55:13 +08:00
GaryH4
4b3741308b Update tips.md 2019-04-06 23:48:33 +08:00
GaryH4
7d495c1956 Update tips.md 2019-04-06 23:40:43 +08:00
GaryH4
0bf8400077 Added some tips 2019-04-06 23:30:04 +08:00
Yuxiang Zhang
c611759394 Update get_started.md 2019-04-06 11:21:22 +08:00
Yuxiang Zhang
279aa32b68 Update get_started.md 2019-04-06 11:09:24 +08:00
Yuxiang Zhang
025544449a remove section of certificate generation 2019-04-06 10:56:38 +08:00
zyx
90d419ca66 add tests for last commit 2019-03-31 12:16:45 +08:00
zyx
96cb975412 Let user create ZFS dataset manually due to security considerations 2019-03-31 12:09:42 +08:00
王邈
ff3e690497 Revert "change owner of folder to current user after creating zfs dataset (close #89)"
This reverts commit a58e6d37ae and
re-opens #89.

Signed-off-by: 王邈 <shankerwangmiao@gmail.com>
2019-03-26 00:30:06 +08:00
zyx
a58e6d37ae change owner of folder to current user after creating zfs dataset (close #89) 2019-03-25 23:40:04 +08:00
zhang
7a4a8ad486 Merge branch 'master' of github.com:tuna/tunasync 2018-10-25 22:52:21 +08:00
zhang
e1c0c25efa add example of worker config 2018-10-25 22:52:02 +08:00
z4yx
9ac374527a regenerate travis deploy key 2018-10-25 17:27:32 +08:00
z4yx
f03626d4e1 update Get Started document 2018-10-25 17:23:02 +08:00
z4yx
23bf4890cc bump version to v0.3.2 2018-10-25 17:07:04 +08:00
z4yx
2f6a61aee5 increse test coverage 2018-10-25 17:02:05 +08:00
z4yx
b6043142e1 test if it works with golang 1.8 2018-10-25 16:16:04 +08:00
zhang
6241576b12 bug fix: tunasynctl failed to parse datetime when you list jobs of specific worker 2018-06-13 10:28:48 +08:00
bigeagle
ef78563b8c Merge pull request #74 from houbaron/patch-1
Update README.md
2018-05-31 21:25:55 +08:00
bigeagle
ca106f1360 Merge pull request #82 from tuna/dev
New feature: remove a worker with tunasynctl
2018-05-31 21:22:46 +08:00
Miao Wang
628266ac5a Merge pull request #81 from tuna/wip-override-concurrent-limit
New feature: run "tunasynctl start" with "-f" to override the limit of concurrent jobs
2018-05-31 14:22:03 +08:00
Yuxiang Zhang
7e601d9fff New feature: remove a worker with tunasynctl
Fix #78
2018-05-31 12:32:22 +08:00
z4yx
c750aa1871 new feature: run "tunasynctl start" with "-f" to override concurrent job limit 2018-05-30 18:59:24 +08:00
Yuxiang Zhang
6cbe91b4f1 new command: jobForceStart 2018-05-30 16:07:07 +08:00
Yuxiang Zhang
89a792986d increase test coverage rate of job & provider 2018-05-30 14:00:10 +08:00
Yuxiang Zhang
0fdb07d061 bug fix: log over-written in twoStageRsyncProvider
solve more DATA RACE problem
2018-05-30 12:28:09 +08:00
Yuxiang Zhang
c5bb172f99 increase test coverage rate of job.go 2018-05-30 11:45:05 +08:00
Yuxiang Zhang
79e6167028 fix race condition on logFile of baseProvider 2018-05-30 01:46:16 +08:00
Miao Wang
285ffb2f2f Merge pull request #80 from tuna/dev
Fix the "list" command of tunasynctl
2018-05-29 21:42:57 +08:00
Yuxiang Zhang
95bb4bbd5e report the last ended time (updated whether successful or not) of jobs 2018-05-29 21:21:03 +08:00
Yuxiang Zhang
6bca9d2cd5 fix TestHTTPServer in manager package 2018-05-29 19:07:01 +08:00
Yuxiang Zhang
4fe7d03e54 Move the WebMirrorStatus to internal package. Fix the list command of tunasynctl 2018-05-29 18:48:33 +08:00
Baron Hou
1fe9499728 Update README.md 2017-09-29 18:14:11 +08:00
bigeagle
a475b044c6 feat(worker): add 'use_ipv4' option for rsync provider 2017-09-08 00:15:48 +08:00
bigeagle
a50a360a91 Revert "feat(worker): add '-4' option to rsync when 'use_ipv6' is false"
This reverts commit d536aca2ac.
2017-09-08 00:12:40 +08:00
bigeagle
d536aca2ac feat(worker): add '-4' option to rsync when 'use_ipv6' is false 2017-09-06 23:22:55 +08:00
bigeagle
28545d61e7 Merge pull request #68 from l2dy/master
Update README.md
2017-05-29 11:03:27 -05:00
Zero King
a87fb0f8b4 Update README.md 2017-05-29 15:42:10 +00:00
Jason Lau
095e7c6320 Merge pull request #65 from felixonmars/patch-1
Fix a typo: Fisrt -> First
2017-03-30 15:31:46 +08:00
Felix Yan
7b441312f4 Fix a typo: Fisrt -> First 2017-03-30 13:27:40 +08:00
7IN0SAN9
563860d424 fix #63 2017-03-27 13:09:56 +08:00
共有 54 个文件被更改,包括 3986 次插入596 次删除

54
.github/workflows/release.yml vendored 普通文件
查看文件

@@ -0,0 +1,54 @@
name: release
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
tar -jcf build/tunasync-linux-bin.tar.bz2 -C build tunasync tunasynctl
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./build/tunasync-linux-bin.tar.bz2
asset_name: tunasync-linux-bin.tar.bz2
asset_content_type: application/x-bzip2

73
.github/workflows/tunasync.yml vendored 普通文件
查看文件

@@ -0,0 +1,73 @@
name: tunasync
on: [push]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
- name: Keep artifacts
uses: actions/upload-artifact@v1
with:
name: tunasync-bin
path: build/
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Setup test dependencies
run: |
sudo apt-get update
sudo apt-get install -y cgroup-bin
docker pull alpine:3.8
lssubsys -am
sudo cgcreate -a $USER -t $USER -g cpu:tunasync
sudo cgcreate -a $USER -t $USER -g memory:tunasync
- name: Set up Go 1.13
uses: actions/setup-go@v1
with:
go-version: 1.13
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Run Unit tests.
run: make test
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.0
with:
infile: profile.cov
outfile: coverage.lcov
- name: Coveralls
uses: coverallsapp/github-action@v1.0.1
with:
github-token: ${{ secrets.github_token }}
path-to-lcov: coverage.lcov

查看文件

@@ -1,30 +0,0 @@
#!/bin/bash
function die() {
echo $*
exit 1
}
export GOPATH=`pwd`:$GOPATH
make travis
# Initialize profile.cov
echo "mode: count" > profile.cov
# Initialize error tracking
ERROR=""
# Test each package and append coverage profile info to profile.cov
for pkg in `cat .testpackages.txt`
do
go test -v -covermode=count -coverprofile=profile_tmp.cov $pkg || ERROR="Error testing $pkg"
[ -f profile_tmp.cov ] && {
tail -n +2 profile_tmp.cov >> profile.cov || die "Unable to append coverage for $pkg"
}
done
if [ ! -z "$ERROR" ]
then
die "Encountered error, last error was: $ERROR"
fi

查看文件

@@ -1,43 +0,0 @@
sudo: required
language: go
go:
- 1.6
before_install:
- sudo apt-get install cgroup-bin
- go get github.com/smartystreets/goconvey
- go get golang.org/x/tools/cmd/cover
- go get -v github.com/mattn/goveralls
os:
- linux
services:
- docker
before_script:
- lssubsys -am
- sudo cgcreate -a $USER -t $USER -g cpu:tunasync
- sudo cgcreate -a $USER -t $USER -g memory:tunasync
- docker pull alpine
script:
- ./.testandcover.bash
after_success:
- goveralls -coverprofile=profile.cov -service=travis-ci
before_deploy: "echo 'ready to deploy?'"
deploy:
provider: releases
file:
- "build/tunasync-linux-bin.tar.gz"
api_key:
secure: "F9kaVaR1mxEh2+EL9Nm8GZmbVY98pXCJA0LGDNrq1C2vU61AUNOeX6yI1mMklHNZPLBqoFDvGN1M5HnJ+xWCFH+KnJgLD2GVIAcAxFNpcNWQe8XKE5heklNsIQNQfuh/rJKM6YzeDB9G5RN4Y76iL4WIAXhNnMm48W6jLnWhf70="
skip_cleanup: true
overwrite: true
on:
tags: true
all_branches: true

13
.vscode/settings.json vendored 普通文件
查看文件

@@ -0,0 +1,13 @@
{
"cSpell.words": [
"Btrfs",
"Debugf",
"Infof",
"Noticef",
"Warningf",
"cgroup",
"mergo",
"tmpl",
"zpool"
]
}

查看文件

@@ -2,8 +2,6 @@ LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`
all: get tunasync tunasynctl
travis: get tunasync tunasynctl travis-package
get:
go get ./cmd/tunasync
go get ./cmd/tunasynctl
@@ -17,5 +15,5 @@ tunasync: build
tunasynctl: build
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
travis-package: tunasync tunasynctl
tar zcf build/tunasync-linux-bin.tar.gz -C build tunasync tunasynctl
test:
go test -v -covermode=count -coverprofile=profile.cov ./...

查看文件

@@ -1,8 +1,8 @@
tunasync
========
[![Build Status](https://travis-ci.org/tuna/tunasync.svg?branch=dev)](https://travis-ci.org/tuna/tunasync)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=dev)](https://coveralls.io/github/tuna/tunasync?branch=dev)
![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master)
[![Commitizen friendly](https://img.shields.io/badge/commitizen-friendly-brightgreen.svg)](http://commitizen.github.io/cz-cli/)
![GPLv3](https://img.shields.io/badge/license-GPLv3-blue.svg)
@@ -19,7 +19,7 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
```
# Architecture
- Manager: Centural instance on status and job management
- Manager: Central instance for status and job management
- Worker: Runs mirror jobs
+------------+ +---+ +---+
@@ -40,84 +40,23 @@ Pre-built binary for Linux x86_64 is available at [Github releases](https://gith
# Job Run Process
PreSyncing Syncing Success
+-----------+ +-----------+ +-------------+ +--------------+
| pre-job +--+->| job run +--->| post-exec +-+-->| post-success |
+-----------+ ^ +-----------+ +-------------+ | +--------------+
| |
| +-----------------+ | Failed
+------+ post-fail |<---------+
+-----------------+
PreSyncing Syncing Success
+-----------+ +----------+ +-----------+ +-------------+ +--------------+
| pre-job +--+->| pre-exec +--->| job run +--->| post-exec +-+-->| post-success |
+-----------+ ^ +----------+ +-----------+ +-------------+ | +--------------+
| |
| +-----------------+ | Failed
+----------------+ post-fail |<---------------+
+-----------------+
```
## Generate Self-Signed Certificate
Fisrt, create root CA
```
openssl genrsa -out rootCA.key 2048
openssl req -x509 -new -nodes -key rootCA.key -days 365 -out rootCA.crt
```
Create host key
```
openssl genrsa -out host.key 2048
```
Now create CSR, before that, write a `req.cnf`
```
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
[req_distinguished_name]
countryName = Country Name (2 letter code)
countryName_default = CN
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default = BJ
localityName = Locality Name (eg, city)
localityName_default = Beijing
organizationalUnitName = Organizational Unit Name (eg, section)
organizationalUnitName_default = TUNA
commonName = Common Name (server FQDN or domain name)
commonName_default = <server_FQDN>
commonName_max = 64
[v3_req]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = <server_FQDN_1>
DNS.2 = <server_FQDN_2>
```
Substitute `<server_FQDN>` with your server's FQDN, then run
```
openssl req -new -key host.key -out host.csr -config req.cnf
```
Finally generate and sign host cert with root CA
```
openssl x509 -req -in host.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out host.crt -days 365 -extensions v3_req -extfile req.cnf
```
## Building
Setup GOPATH like [this](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
Then:
Go version: 1.13
```
go get -d github.com/tuna/tunasync/cmd/tunasync
cd $GOPATH/src/github.com/tuna/tunasync
make
make all
```
If you have multiple `GOPATH`s, replace the `$GOPATH` with your first one.
Binaries in the `build/`.

查看文件

@@ -11,7 +11,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/pkg/profile"
"gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1"
"github.com/urfave/cli"
tunasync "github.com/tuna/tunasync/internal"
"github.com/tuna/tunasync/manager"
@@ -60,7 +60,7 @@ func startWorker(c *cli.Context) error {
os.Exit(1)
}
w := worker.GetTUNASyncWorker(cfg)
w := worker.NewTUNASyncWorker(cfg)
if w == nil {
logger.Errorf("Error intializing TUNA sync worker.")
os.Exit(1)

查看文件

@@ -11,8 +11,8 @@ import (
"time"
"github.com/BurntSushi/toml"
"github.com/urfave/cli"
"gopkg.in/op/go-logging.v1"
"gopkg.in/urfave/cli.v1"
tunasync "github.com/tuna/tunasync/internal"
)
@@ -32,7 +32,7 @@ const (
userCfgFile = "$HOME/.config/tunasync/ctl.conf" // user-specific conf
)
var logger = logging.MustGetLogger("tunasynctl-cmd")
var logger = logging.MustGetLogger("tunasynctl")
var baseURL string
var client *http.Client
@@ -41,7 +41,7 @@ func initializeWrapper(handler cli.ActionFunc) cli.ActionFunc {
return func(c *cli.Context) error {
err := initialize(c)
if err != nil {
return cli.NewExitError("", 1)
return cli.NewExitError(err.Error(), 1)
}
return handler(c)
}
@@ -55,8 +55,9 @@ type config struct {
func loadConfig(cfgFile string, cfg *config) error {
if cfgFile != "" {
logger.Infof("Loading config: %s", cfgFile)
if _, err := toml.DecodeFile(cfgFile, cfg); err != nil {
logger.Errorf(err.Error())
// logger.Errorf(err.Error())
return err
}
}
@@ -66,7 +67,7 @@ func loadConfig(cfgFile string, cfg *config) error {
func initialize(c *cli.Context) error {
// init logger
tunasync.InitLogger(c.Bool("verbose"), c.Bool("verbose"), false)
tunasync.InitLogger(c.Bool("verbose"), c.Bool("debug"), false)
cfg := new(config)
@@ -76,14 +77,23 @@ func initialize(c *cli.Context) error {
// find config file and load config
if _, err := os.Stat(systemCfgFile); err == nil {
loadConfig(systemCfgFile, cfg)
err = loadConfig(systemCfgFile, cfg)
if err != nil {
return err
}
}
fmt.Println(os.ExpandEnv(userCfgFile))
logger.Debug("user config file: %s", os.ExpandEnv(userCfgFile))
if _, err := os.Stat(os.ExpandEnv(userCfgFile)); err == nil {
loadConfig(os.ExpandEnv(userCfgFile), cfg)
err = loadConfig(os.ExpandEnv(userCfgFile), cfg)
if err != nil {
return err
}
}
if c.String("config") != "" {
loadConfig(c.String("config"), cfg)
err := loadConfig(c.String("config"), cfg)
if err != nil {
return err
}
}
// override config using the command-line arguments
@@ -112,7 +122,7 @@ func initialize(c *cli.Context) error {
client, err = tunasync.CreateHTTPClient(cfg.CACert)
if err != nil {
err = fmt.Errorf("Error initializing HTTP client: %s", err.Error())
logger.Error(err.Error())
// logger.Error(err.Error())
return err
}
@@ -135,14 +145,14 @@ func listWorkers(c *cli.Context) error {
err.Error()),
1)
}
fmt.Print(string(b))
fmt.Println(string(b))
return nil
}
func listJobs(c *cli.Context) error {
// FIXME: there should be an API on manager server side that return MirrorStatus list to tunasynctl
var jobs []tunasync.MirrorStatus
var genericJobs interface{}
if c.Bool("all") {
var jobs []tunasync.WebMirrorStatus
_, err := tunasync.GetJSON(baseURL+listJobsPath, &jobs, client)
if err != nil {
return cli.NewExitError(
@@ -150,8 +160,10 @@ func listJobs(c *cli.Context) error {
"of all jobs from manager server: %s", err.Error()),
1)
}
genericJobs = jobs
} else {
var jobs []tunasync.MirrorStatus
args := c.Args()
if len(args) == 0 {
return cli.NewExitError(
@@ -165,24 +177,32 @@ func listJobs(c *cli.Context) error {
_, err := tunasync.GetJSON(fmt.Sprintf("%s/workers/%s/jobs",
baseURL, workerID), &workerJobs, client)
if err != nil {
logger.Errorf("Filed to correctly get jobs"+
logger.Infof("Failed to correctly get jobs"+
" for worker %s: %s", workerID, err.Error())
}
ans <- workerJobs
}(workerID)
}
for range args {
jobs = append(jobs, <-ans...)
job := <-ans
if job == nil {
return cli.NewExitError(
fmt.Sprintf("Failed to correctly get information "+
"of jobs from at least one manager"),
1)
}
jobs = append(jobs, job...)
}
genericJobs = jobs
}
b, err := json.MarshalIndent(jobs, "", " ")
b, err := json.MarshalIndent(genericJobs, "", " ")
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out informations: %s", err.Error()),
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Printf(string(b))
fmt.Println(string(b))
return nil
}
@@ -233,7 +253,53 @@ func updateMirrorSize(c *cli.Context) error {
)
}
logger.Infof("Successfully updated mirror size to %s", mirrorSize)
fmt.Printf("Successfully updated mirror size to %s\n", mirrorSize)
return nil
}
func removeWorker(c *cli.Context) error {
args := c.Args()
if len(args) != 0 {
return cli.NewExitError("Usage: tunasynctl -w <worker-id>", 1)
}
workerID := c.String("worker")
if len(workerID) == 0 {
return cli.NewExitError("Please specify the <worker-id>", 1)
}
url := fmt.Sprintf("%s/workers/%s", baseURL, workerID)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
logger.Panicf("Invalid HTTP Request: %s", err.Error())
}
resp, err := client.Do(req)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to send request to manager: %s", err.Error()), 1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Failed to parse response: %s", err.Error()),
1)
}
return cli.NewExitError(fmt.Sprintf("Failed to correctly send"+
" command: HTTP status code is not 200: %s", body),
1)
}
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
if res["message"] == "deleted" {
fmt.Println("Successfully removed the worker")
} else {
return cli.NewExitError("Failed to remove the worker", 1)
}
return nil
}
@@ -265,7 +331,7 @@ func flushDisabledJobs(c *cli.Context) error {
1)
}
logger.Info("Successfully flushed disabled jobs")
fmt.Println("Successfully flushed disabled jobs")
return nil
}
@@ -286,11 +352,16 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
"argument WORKER", 1)
}
options := map[string]bool{}
if c.Bool("force") {
options["force"] = true
}
cmd := tunasync.ClientCmd{
Cmd: cmd,
MirrorID: mirrorID,
WorkerID: c.String("worker"),
Args: argsList,
Options: options,
}
resp, err := tunasync.PostJSON(baseURL+cmdPath, cmd, client)
if err != nil {
@@ -313,7 +384,7 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body),
1)
}
logger.Info("Succesfully send command")
fmt.Println("Successfully send the command")
return nil
}
@@ -321,6 +392,11 @@ func cmdJob(cmd tunasync.CmdVerb) cli.ActionFunc {
func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
return func(c *cli.Context) error {
if c.String("worker") == "" {
return cli.NewExitError("Please specify the worker with -w <worker-id>", 1)
}
cmd := tunasync.ClientCmd{
Cmd: cmd,
WorkerID: c.String("worker"),
@@ -346,7 +422,7 @@ func cmdWorker(cmd tunasync.CmdVerb) cli.ActionFunc {
" command: HTTP status code is not 200: %s", body),
1)
}
logger.Info("Succesfully send command")
fmt.Println("Successfully send the command")
return nil
}
@@ -403,6 +479,10 @@ func main() {
Name: "verbose, v",
Usage: "Enable verbosely logging",
},
cli.BoolFlag{
Name: "debug",
Usage: "Enable debugging logging",
},
}
cmdFlags := []cli.Flag{
cli.StringFlag{
@@ -411,6 +491,11 @@ func main() {
},
}
forceStartFlag := cli.BoolFlag{
Name: "force, f",
Usage: "Override the concurrent limit",
}
app.Commands = []cli.Command{
{
Name: "list",
@@ -436,6 +521,18 @@ func main() {
Flags: commonFlags,
Action: initializeWrapper(listWorkers),
},
{
Name: "rm-worker",
Usage: "Remove a worker",
Flags: append(
commonFlags,
cli.StringFlag{
Name: "worker, w",
Usage: "worker-id of the worker to be removed",
},
),
Action: initializeWrapper(removeWorker),
},
{
Name: "set-size",
Usage: "Set mirror size",
@@ -451,7 +548,7 @@ func main() {
{
Name: "start",
Usage: "Start a job",
Flags: append(commonFlags, cmdFlags...),
Flags: append(append(commonFlags, cmdFlags...), forceStartFlag),
Action: initializeWrapper(cmdJob(tunasync.CmdStart)),
},
{

查看文件

@@ -42,7 +42,7 @@ interval = 1
[manager]
api_base = "http://localhost:12345"
token = "some_token"
token = ""
ca_cert = ""
[cgroup]
@@ -90,6 +90,30 @@ $ tunasync worker --config ~/tunasync_demo/worker.conf
本例中,镜像的数据在`/tmp/tunasync/`
### 控制
查看同步状态
```
$ tunasynctl list -p 12345 --all
```
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
配置文件内容为:
```
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
### 安全
worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都是在本机,那么没必要使用 https。此时 manager 就不指定 `ssl_key``ssl_cert`,留空;worker 的 `ca_cert` 留空,`api_base``http://` 开头。
如果需要加密的通信,manager 需要指定 `ssl_key``ssl_cert`,worker 要指定 `ca_cert`,并且 `api_base` 应该是 `https://` 开头。
## 更进一步
可以参看
@@ -100,3 +124,7 @@ $ tunasync worker --help
```
可以看一下 log 目录
一些 worker 配置文件示例 [workers.conf](workers.conf)
你可能会用到的操作 [tips.md](tips.md)

93
docs/zh_CN/tips.md 普通文件
查看文件

@@ -0,0 +1,93 @@
## 删除某worker的某镜像
先确定已经给tunasynctl写好config文件`~/.config/tunasync/ctl.conf`
```toml
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
```
接着
```shell
$ tunasynctl disable -w <worker_id> <mirror_name>
$ tunasynctl flush
```
## 热重载 `worker.conf`
```shell
$ tunasynctl reload -w <worker_id>
```
e.g. 删除 `test_worker``elvish` 镜像:
1. 删除存放镜像的文件夹
2. 删除 `worker.conf` 中对应的 `mirror` 段落
3. 接着操作:
```shell
$ tunasynctl reload -w test_worker
$ tunasynctl disable -w test_worker elvish
$ tunasynctl flush
```
4. (可选)最后删除日志文件夹里的日志
## 删除worker
```shell
$ tunasynctl rm-worker -w <worker_id>
```
e.g.
```shell
$ tunasynctl rm-worker -w test_worker
```
## 更新镜像的大小
```shell
$ tunasynctl set-size -w <worker_id> <mirror_name> <size>
```
其中,末尾的 <size> 参数,由操作者设定,或由某定时脚本生成
由于 `du -s` 比较耗时,故镜像大小可直接由rsync的日志文件读出
## Btrfs 文件系统快照
如果镜像文件存放在以 Btrfs 为文件系统的分区中,可启用由 Btrfs 提供的快照 (Snapshot) 功能。对于每一个镜像,tunasync 在每次成功同步后更新其快照。
`worker.conf` 中添加如下配置,即可启用 Btrfs 快照功能:
```toml
[btrfs_snapshot]
enable = true
snapshot_path = "/path/to/snapshot/directory"
```
其中 `snapshot_path` 为快照所在目录。如将其作为发布版本,则镜像同步过程对于镜像站用户而言具有原子性。如此可避免用户接收到仍处于“中间态”的(未完成同步的)文件。
也可以在 `[[mirrors]]` 中为特定镜像单独指定快照路径,如:
```toml
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elvish.io/elvish/"
interval = 1440
snapshot_path = "/data/publish/elvish"
```
**提示:**
若运行 tunasync 的用户无 root 权限,请确保该用户对镜像同步目录和快照目录均具有写和执行权限,并使用 [`user_subvol_rm_allowed` 选项](https://btrfs.wiki.kernel.org/index.php/Manpage/btrfs(5)#MOUNT_OPTIONS)挂载相应的 Btrfs 分区。

817
docs/zh_CN/workers.conf 普通文件
查看文件

@@ -0,0 +1,817 @@
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
[global]
name = "mirror_worker"
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/srv/tunasync"
concurrent = 10
interval = 1
[manager]
api_base = "http://localhost:12345"
token = "some_token"
ca_cert = ""
[cgroup]
enable = false
base_path = "/sys/fs/cgroup"
group = "tunasync"
[server]
hostname = "localhost"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = ""
ssl_key = ""
[[mirrors]]
name = "adobe-fonts"
interval = 1440
provider = "command"
upstream = "https://github.com/adobe-fonts"
command = "/home/scripts/adobe-fonts.sh"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "AdoptOpenJDK"
interval = 5760
provider = "command"
command = "/home/scripts/adoptopenjdk.py"
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "alpine"
provider = "rsync"
upstream = "rsync://rsync.alpinelinux.org/alpine/"
memory_limit = "256M"
[[mirrors]]
name = "anaconda"
provider = "command"
upstream = "https://repo.continuum.io/"
command = "/home/scripts/anaconda.py --delete"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "apache"
provider = "rsync"
upstream = "rsync://rsync.apache.org/apache-dist/"
use_ipv4 = true
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "armbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/apt/"
memory_limit = "256M"
[[mirrors]]
name = "armbian-releases"
provider = "rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/dl/"
memory_limit = "256M"
[[mirrors]]
name = "bananian"
provider = "command"
upstream = "https://dl.bananian.org/"
command = "/home/scripts/lftp.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "bioconductor"
provider = "rsync"
upstream = "master.bioconductor.org:./"
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
exclude_file = "/etc/excludes/bioconductor.txt"
memory_limit = "256M"
[[mirrors]]
name = "blender"
provider = "rsync"
upstream = "rsync://mirrors.dotsrc.org/blender/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/blender.txt"
interval = 1440
memory_limit = "256M"
[[mirrors]]
name = "chakra"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/packages/"
memory_limit = "256M"
[[mirrors]]
name = "chakra-releases"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/releases/"
memory_limit = "256M"
[[mirrors]]
name = "chef"
interval = 1440
provider = "command"
upstream = "https://packages.chef.io/repos"
command = "/home/scripts/chef.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "clickhouse"
interval = 2880
provider = "rsync"
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
exclude_file = "/etc/excludes/clickhouse.txt"
memory_limit = "256M"
[[mirrors]]
name = "clojars"
provider = "command"
upstream = "s3://clojars-repo-production/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "CPAN"
provider = "rsync"
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
memory_limit = "256M"
[[mirrors]]
name = "CRAN"
provider = "rsync"
upstream = "rsync://cran.r-project.org/CRAN/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "CTAN"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/CTAN/"
memory_limit = "256M"
[[mirrors]]
name = "dart-pub"
provider = "command"
upstream = "https://pub.dev/api"
command = "/home/scripts/pub.sh"
interval = 30
docker_image = "tunathu/pub-mirror:latest"
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
[[mirrors]]
name = "debian"
provider = "command"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
command = "/home/scripts/debian.sh sync:archive:debian"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/ftpsync"
docker_volumes = [
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
"/log/ftpsync:/home/log/tunasync/ftpsync",
]
[mirrors.env]
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
[[mirrors]]
name = "docker-ce"
provider = "command"
upstream = "https://download.docker.com/"
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ELK"
interval = 1440
provider = "command"
upstream = "https://packages.elastic.co"
command = "/home/scripts/ELK.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
WGET_OPTIONS = "-6"
[[mirrors]]
name = "elasticstack"
interval = 1440
provider = "command"
upstream = "https://artifacts.elastic.co/"
command = "/home/scripts/elastic.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "erlang-solutions"
interval = 1440
provider = "command"
upstream = "https://packages.erlang-solutions.com"
command = "/home/scripts/erlang.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "flutter"
interval = 1440
provider = "command"
upstream = "https://storage.googleapis.com/flutter_infra/"
command = "/home/scripts/flutter.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "github-release"
provider = "command"
upstream = "https://api.github.com/repos/"
command = "/home/scripts/github-release.py --workers 5"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
GITHUB_TOKEN = "xxxxx"
[[mirrors]]
name = "gitlab-ce"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-ee"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-runner"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
command = "/home/scripts/gitlab-runner.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "grafana"
interval = 1440
provider = "command"
upstream = "https://packages.grafana.com/oss"
command = "/home/scripts/grafana.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "hackage"
provider = "command"
command = "/home/scripts/hackage.sh"
upstream = "https://hackage.haskell.org/"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew-bottles"
provider = "command"
upstream = "https://homebrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "influxdata"
interval = 1440
provider = "command"
upstream = "https://repos.influxdata.com"
command = "/home/scripts/influxdata.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "kali"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.nluug.nl/kali/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "kali-images"
provider = "rsync"
upstream = "rsync://ftp.nluug.nl/kali-images/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "KaOS"
provider = "rsync"
upstream = "rsync://kaosx.tk/kaos/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kernel"
provider = "rsync"
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kicad"
provider = "command"
upstream = "s3://kicad-downloads/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "kodi"
provider = "rsync"
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
use_ipv6 = true
[[mirrors]]
name = "kubernetes"
interval = 2880
provider = "command"
upstream = "http://packages.cloud.google.com"
command = "/home/scripts/kubernetes.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "linuxbrew-bottles"
provider = "command"
upstream = "https://linuxbrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
RUN_LINUXBREW = "true"
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "linuxmint"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "lxc-images"
provider = "command"
upstream = "https://us.images.linuxcontainers.org/"
command = "/home/scripts/lxc-images.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 720
[[mirrors]]
name = "lyx"
provider = "command"
upstream = "ftp://ftp.lyx.org/pub/lyx/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "mongodb"
interval = 1440
provider = "command"
upstream = "https://repo.mongodb.org"
command = "/home/scripts/mongodb.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "msys2"
provider = "command"
upstream = "http://repo.msys2.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "mysql"
interval = 30
provider = "command"
upstream = "https://repo.mysql.com"
command = "/home/scripts/mysql.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
USE_IPV6 = "1"
[[mirrors]]
name = "nix"
interval = 1440
provider = "command"
upstream = "s3://nix-releases/nix/"
command = "/home/scripts/nix.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
[[mirrors]]
name = "nix-channels"
interval = 300
provider = "command"
upstream = "https://nixos.org/channels"
command = "timeout 20h /home/scripts/nix-channels.py"
docker_image = "tunathu/nix-channels:latest"
docker_options = [
"--cpus", "20",
]
[[mirrors]]
name = "nodesource"
provider = "command"
upstream = "https://deb.nodesource.com/"
command = "/home/scripts/nodesource.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "openresty"
provider = "command"
upstream = "https://openresty.org/package/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "packagist"
provider = "command"
upstream = "http://packagist.org/"
command = "/home/scripts/packagist.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "proxmox"
interval = 1440
provider = "command"
upstream = "http://download.proxmox.com"
command = "/home/scripts/proxmox.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "pypi"
provider = "command"
upstream = "https://pypi.python.org/"
command = "/home/scripts/pypi.sh"
docker_image = "tunathu/bandersnatch:latest"
interval = 5
[[mirrors]]
name = "qt"
provider = "rsync"
upstream = "rsync://master.qt-project.org/qt-all/"
exclude_file = "/etc/excludes/qt.txt"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "raspberrypi"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
memory_limit = "256M"
[[mirrors]]
name = "raspbian-images"
interval = 5760
provider = "command"
upstream = "https://downloads.raspberrypi.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
[[mirrors]]
name = "raspbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.raspbian.org/archive/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "redhat"
provider = "rsync"
upstream = "rsync://ftp.redhat.com/redhat/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
exclude_file = "/etc/excludes/redhat.txt"
interval = 1440
[mirrors.env]
RSYNC_PROXY="127.0.0.1:8123"
[[mirrors]]
name = "remi"
interval = 1440
provider = "command"
upstream = "rsync://rpms.remirepo.net"
command = "/home/scripts/remi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "repo-ck"
provider = "command"
upstream = "http://repo-ck.com"
command = "/home/scripts/repo-ck.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ros"
provider = "rsync"
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
memory_limit = "256M"
[[mirrors]]
name = "ros2"
interval = 1440
provider = "command"
upstream = "http://packages.ros.org/ros2"
command = "/home/scripts/ros2.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rubygems"
provider = "command"
upstream = "https://rubygems.org"
command = "/home/scripts/rubygems.sh"
docker_image = "tunathu/rubygems-mirror"
interval = 60
# set environment varialbes
[mirrors.env]
INIT = "0"
[[mirrors]]
name = "rudder"
interval = 2880
provider = "command"
upstream = "https://repository.rudder.io"
command = "/home/scripts/rudder.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rustup"
provider = "command"
upstream = "https://rustup.rs/"
command = "/home/scripts/rustup.sh"
interval = 1440
docker_image = "tunathu/rustup-mirror:latest"
docker_volumes = [
]
docker_options = [
]
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
[[mirrors]]
name = "saltstack"
interval = 1440 # required on http://repo.saltstack.com/#mirror
provider = "command"
upstream = "s3://s3/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
[[mirrors]]
name = "solus"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/solus/"
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
memory_limit = "256M"
[[mirrors]]
name = "stackage"
provider = "command"
command = "/home/scripts/stackage.py"
upstream = "https://www.stackage.org/"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
GIT_COMMITTER_NAME = "TUNA mirrors"
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
[[mirrors]]
name = "steamos"
interval = 1440
provider = "command"
upstream = "http://repo.steampowered.com"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
[[mirrors]]
name = "termux"
interval = 1440
provider = "command"
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
command = "/home/scripts/termux.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ubuntu"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.ubuntu.com/ubuntu/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "ubuntu-ports"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
memory_limit = "256M"
[[mirrors]]
name = "virtualbox"
interval = 1440
provider = "command"
upstream = "http://download.virtualbox.org/virtualbox"
command = "/home/scripts/virtualbox.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "winehq"
provider = "command"
upstream = "ftp://ftp.winehq.org/pub/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
[[mirrors]]
name = "zabbix"
provider = "rsync"
upstream = "rsync://repo.zabbix.com/mirror/"
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
memory_limit = "256M"
[[mirrors]]
name = "AOSP"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://android.googlesource.com/mirror/manifest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "lineageOS"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://github.com/LineageOS/mirror"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "chromiumos"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/cros.sh"
upstream = "https://chromium.googlesource.com"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
USE_BITMAP_INDEX = "1"
CONCURRENT_JOBS = "20"
[[mirrors]]
name = "crates.io-index.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "https://github.com/rust-lang/crates.io-index.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "flutter-sdk.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/flutter/flutter.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gcc.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://gcc.gnu.org/git/gcc.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gentoo-portage.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/gentoo-mirror/gentoo.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "git-repo"
provider = "command"
command = "/home/tunasync-scripts/git-repo.sh"
upstream = "https://gerrit.googlesource.com/git-repo"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew"
provider = "command"
command = "/home/tunasync-scripts/homebrew.sh"
upstream = "https://github.com/Homebrew"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "CocoaPods"
provider = "command"
command = "/home/tunasync-scripts/cocoapods.sh"
upstream = "https://github.com/CocoaPods"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "pybombs"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/pybombs.sh"
upstream = "https://github.com/scateu/pybombs-mirror/"
docker_image = "tunathu/tunasync-scripts:latest"
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[mirrors.env]
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
[[mirrors]]
name = "llvm"
provider = "command"
command = "/home/tunasync-scripts/llvm.sh"
upstream = "https://git.llvm.org/git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
# vim: ft=toml

21
go.mod 普通文件
查看文件

@@ -0,0 +1,21 @@
module github.com/tuna/tunasync
go 1.13
require (
github.com/BurntSushi/toml v0.3.1
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
github.com/boltdb/bolt v1.3.1
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035
github.com/gin-gonic/gin v1.5.0
github.com/imdario/mergo v0.3.9
github.com/mattn/goveralls v0.0.5 // indirect
github.com/pkg/profile v1.4.0
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
github.com/smartystreets/goconvey v1.6.4
github.com/urfave/cli v1.22.3
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 // indirect
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
)

104
go.sum 普通文件
查看文件

@@ -0,0 +1,104 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035 h1:4e+UEZaKPx0ZEiCMPUHMV51RGwbb1VJGCYqRFn/qmWM=
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.5.0 h1:fi+bqFAx/oLK54somfCtEZs9HeH1LHVoEPUgARpTqyc=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/goveralls v0.0.5 h1:spfq8AyZ0cCk57Za6/juJ5btQxeE1FaEGMdfcI+XO48=
github.com/mattn/goveralls v0.0.5/go.mod h1:Xg2LHi51faXLyKXwsndxiW6uxEEQT9+3sjGzzwU4xy0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU=
github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200113040837-eac381796e91/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 h1:6TB4+MaZlkcSsJDu+BS5yxSEuZIYhjWz+jhbSLEZylI=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

查看文件

@@ -24,9 +24,12 @@ func InitLogger(verbose, debug, withSystemd bool) {
if debug {
logging.SetLevel(logging.DEBUG, "tunasync")
logging.SetLevel(logging.DEBUG, "tunasynctl")
} else if verbose {
logging.SetLevel(logging.INFO, "tunasync")
logging.SetLevel(logging.INFO, "tunasynctl")
} else {
logging.SetLevel(logging.NOTICE, "tunasync")
logging.SetLevel(logging.NOTICE, "tunasynctl")
}
}

查看文件

@@ -8,14 +8,17 @@ import (
// A MirrorStatus represents a msg when
// a worker has done syncing
type MirrorStatus struct {
Name string `json:"name"`
Worker string `json:"worker"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"`
Upstream string `json:"upstream"`
Size string `json:"size"`
ErrorMsg string `json:"error_msg"`
Name string `json:"name"`
Worker string `json:"worker"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"`
LastStarted time.Time `json:"last_started"`
LastEnded time.Time `json:"last_ended"`
Scheduled time.Time `json:"next_schedule"`
Upstream string `json:"upstream"`
Size string `json:"size"`
ErrorMsg string `json:"error_msg"`
}
// A WorkerStatus is the information struct that describe
@@ -27,6 +30,15 @@ type WorkerStatus struct {
LastOnline time.Time `json:"last_online"` // last seen
}
type MirrorSchedules struct {
Schedules []MirrorSchedule `json:"schedules"`
}
type MirrorSchedule struct {
MirrorName string `json:"name"`
NextSchedule time.Time `json:"next_schedule"`
}
// A CmdVerb is an action to a job or worker
type CmdVerb uint8
@@ -67,9 +79,10 @@ func (c CmdVerb) String() string {
// A WorkerCmd is the command message send from the
// manager to a worker
type WorkerCmd struct {
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
Args []string `json:"args"`
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
Args []string `json:"args"`
Options map[string]bool `json:"options"`
}
func (c WorkerCmd) String() string {
@@ -82,8 +95,9 @@ func (c WorkerCmd) String() string {
// A ClientCmd is the command message send from client
// to the manager
type ClientCmd struct {
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"`
Args []string `json:"args"`
Cmd CmdVerb `json:"cmd"`
MirrorID string `json:"mirror_id"`
WorkerID string `json:"worker_id"`
Args []string `json:"args"`
Options map[string]bool `json:"options"`
}

72
internal/status_web.go 普通文件
查看文件

@@ -0,0 +1,72 @@
package internal
import (
"encoding/json"
"strconv"
"time"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// WebMirrorStatus is the mirror status to be shown in the web page
type WebMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
LastStarted textTime `json:"last_started"`
LastStartedTs stampTime `json:"last_started_ts"`
LastEnded textTime `json:"last_ended"`
LastEndedTs stampTime `json:"last_ended_ts"`
Scheduled textTime `json:"next_schedule"`
ScheduledTs stampTime `json:"next_schedule_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func BuildWebMirrorStatus(m MirrorStatus) WebMirrorStatus {
return WebMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
LastStarted: textTime{m.LastStarted},
LastStartedTs: stampTime{m.LastStarted},
LastEnded: textTime{m.LastEnded},
LastEndedTs: stampTime{m.LastEnded},
Scheduled: textTime{m.Scheduled},
ScheduledTs: stampTime{m.Scheduled},
Upstream: m.Upstream,
Size: m.Size,
}
}

98
internal/status_web_test.go 普通文件
查看文件

@@ -0,0 +1,98 @@
package internal
import (
"encoding/json"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := WebMirrorStatus{
Name: "tunalinux",
Status: Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
LastStarted: textTime{t},
LastStartedTs: stampTime{t},
LastEnded: textTime{t},
LastEndedTs: stampTime{t},
Scheduled: textTime{t},
ScheduledTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 WebMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
Convey("BuildWebMirrorStatus should work", t, func() {
m := MirrorStatus{
Name: "arch-sync3",
Worker: "testWorker",
IsMaster: true,
Status: Failed,
LastUpdate: time.Now().Add(-time.Minute * 30),
LastStarted: time.Now().Add(-time.Minute * 1),
LastEnded: time.Now(),
Scheduled: time.Now().Add(time.Minute * 5),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}
var m2 WebMirrorStatus
m2 = BuildWebMirrorStatus(m)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.LastEndedTs.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
So(m2.Scheduled.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.ScheduledTs.Unix(), ShouldEqual, m.Scheduled.Unix())
So(m2.Scheduled.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.ScheduledTs.UnixNano(), ShouldEqual, m.Scheduled.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

查看文件

@@ -6,11 +6,37 @@ import (
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os/exec"
"regexp"
"time"
)
var rsyncExitValues = map[int]string{
0: "Success",
1: "Syntax or usage error",
2: "Protocol incompatibility",
3: "Errors selecting input/output files, dirs",
4: "Requested action not supported: an attempt was made to manipulate 64-bit files on a platform that cannot support them; or an option was specified that is supported by the client and not by the server.",
5: "Error starting client-server protocol",
6: "Daemon unable to append to log-file",
10: "Error in socket I/O",
11: "Error in file I/O",
12: "Error in rsync protocol data stream",
13: "Errors with program diagnostics",
14: "Error in IPC code",
20: "Received SIGUSR1 or SIGINT",
21: "Some error returned by waitpid()",
22: "Error allocating core memory buffers",
23: "Partial transfer due to error",
24: "Partial transfer due to vanished source files",
25: "The --max-delete limit stopped deletions",
30: "Timeout in data send/receive",
35: "Timeout waiting for daemon connection",
}
// GetTLSConfig generate tls.Config from CAFile
func GetTLSConfig(CAFile string) (*tls.Config, error) {
caCert, err := ioutil.ReadFile(CAFile)
@@ -84,3 +110,46 @@ func GetJSON(url string, obj interface{}, client *http.Client) (*http.Response,
}
return resp, json.Unmarshal(body, obj)
}
// FindAllSubmatchInFile calls re.FindAllSubmatch to find matches in given file
func FindAllSubmatchInFile(fileName string, re *regexp.Regexp) (matches [][][]byte, err error) {
if fileName == "/dev/null" {
err = errors.New("Invalid log file")
return
}
if content, err := ioutil.ReadFile(fileName); err == nil {
matches = re.FindAllSubmatch(content, -1)
// fmt.Printf("FindAllSubmatchInFile: %q\n", matches)
}
return
}
// ExtractSizeFromLog uses a regexp to extract the size from log files
func ExtractSizeFromLog(logFile string, re *regexp.Regexp) string {
matches, _ := FindAllSubmatchInFile(logFile, re)
if matches == nil || len(matches) == 0 {
return ""
}
// return the first capture group of the last occurrence
return string(matches[len(matches)-1][1])
}
// ExtractSizeFromRsyncLog extracts the size from rsync logs
func ExtractSizeFromRsyncLog(logFile string) string {
// (?m) flag enables multi-line mode
re := regexp.MustCompile(`(?m)^Total file size: ([0-9\.]+[KMGTP]?) bytes`)
return ExtractSizeFromLog(logFile, re)
}
// TranslateRsyncErrorCode translates the exit code of rsync to a message
func TranslateRsyncErrorCode(cmdErr error) (exitCode int, msg string) {
if exiterr, ok := cmdErr.(*exec.ExitError); ok {
exitCode = exiterr.ExitCode()
strerr, valid := rsyncExitValues[exitCode]
if valid {
msg = fmt.Sprintf("rsync error: %s", strerr)
}
}
return
}

42
internal/util_test.go 普通文件
查看文件

@@ -0,0 +1,42 @@
package internal
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestExtractSizeFromRsyncLog(t *testing.T) {
realLogContent := `
Number of files: 998,470 (reg: 925,484, dir: 58,892, link: 14,094)
Number of created files: 1,049 (reg: 1,049)
Number of deleted files: 1,277 (reg: 1,277)
Number of regular files transferred: 5,694
Total file size: 1.33T bytes
Total transferred file size: 2.86G bytes
Literal data: 780.62M bytes
Matched data: 2.08G bytes
File list size: 37.55M
File list generation time: 7.845 seconds
File list transfer time: 0.000 seconds
Total bytes sent: 7.55M
Total bytes received: 823.25M
sent 7.55M bytes received 823.25M bytes 5.11M bytes/sec
total size is 1.33T speedup is 1,604.11
`
Convey("Log parser should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
logFile := filepath.Join(tmpDir, "rs.log")
err = ioutil.WriteFile(logFile, []byte(realLogContent), 0755)
So(err, ShouldBeNil)
res := ExtractSizeFromRsyncLog(logFile)
So(res, ShouldEqual, "1.33T")
})
}

查看文件

@@ -1,3 +1,4 @@
package internal
const Version string = "0.2-dev"
// Version of the program
const Version string = "0.6.6"

查看文件

@@ -2,7 +2,7 @@ package manager
import (
"github.com/BurntSushi/toml"
"gopkg.in/urfave/cli.v1"
"github.com/urfave/cli"
)
// A Config is the top-level toml-serializaible config struct

查看文件

@@ -9,7 +9,7 @@ import (
"github.com/BurntSushi/toml"
. "github.com/smartystreets/goconvey/convey"
"gopkg.in/urfave/cli.v1"
"github.com/urfave/cli"
)
func TestConfig(t *testing.T) {

查看文件

@@ -14,6 +14,7 @@ type dbAdapter interface {
Init() error
ListWorkers() ([]WorkerStatus, error)
GetWorker(workerID string) (WorkerStatus, error)
DeleteWorker(workerID string) error
CreateWorker(w WorkerStatus) (WorkerStatus, error)
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
@@ -95,6 +96,19 @@ func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
return
}
func (b *boltAdapter) DeleteWorker(workerID string) (err error) {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v := bucket.Get([]byte(workerID))
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
err := bucket.Delete([]byte(workerID))
return err
})
return
}
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))

查看文件

@@ -40,51 +40,75 @@ func TestBoltAdapter(t *testing.T) {
So(err, ShouldBeNil)
}
Convey("get exists worker", func() {
Convey("get existent worker", func() {
_, err := boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
})
Convey("list exist worker", func() {
Convey("list existent workers", func() {
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
Convey("get inexist worker", func() {
Convey("get non-existent worker", func() {
_, err := boltDB.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
})
Convey("delete existent worker", func() {
err := boltDB.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := boltDB.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
Convey("update mirror status", func() {
status := []MirrorStatus{
MirrorStatus{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Minute),
LastStarted: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}

查看文件

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/gin-gonic/gin"
@@ -23,6 +24,7 @@ type Manager struct {
cfg *Config
engine *gin.Engine
adapter dbAdapter
rwmu sync.RWMutex
httpClient *http.Client
}
@@ -84,11 +86,14 @@ func GetTUNASyncManager(cfg *Config) *Manager {
// workerID should be valid in this route group
workerValidateGroup := s.engine.Group("/workers", s.workerIDValidator)
{
// delete specified worker
workerValidateGroup.DELETE(":id", s.deleteWorker)
// get job list
workerValidateGroup.GET(":id/jobs", s.listJobsOfWorker)
// post job status
workerValidateGroup.POST(":id/jobs/:job", s.updateJobOfWorker)
workerValidateGroup.POST(":id/jobs/:job/size", s.updateMirrorSize)
workerValidateGroup.POST(":id/schedules", s.updateSchedulesOfWorker)
}
// for tunasynctl to post commands
@@ -124,9 +129,11 @@ func (s *Manager) Run() {
}
}
// listAllJobs repond with all jobs of specified workers
// listAllJobs respond with all jobs of specified workers
func (s *Manager) listAllJobs(c *gin.Context) {
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list all mirror status: %s",
err.Error(),
@@ -135,11 +142,11 @@ func (s *Manager) listAllJobs(c *gin.Context) {
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
webMirStatusList := []webMirrorStatus{}
webMirStatusList := []WebMirrorStatus{}
for _, m := range mirrorStatusList {
webMirStatusList = append(
webMirStatusList,
convertMirrorStatus(m),
BuildWebMirrorStatus(m),
)
}
c.JSON(http.StatusOK, webMirStatusList)
@@ -147,7 +154,9 @@ func (s *Manager) listAllJobs(c *gin.Context) {
// flushDisabledJobs deletes all jobs that marks as deleted
func (s *Manager) flushDisabledJobs(c *gin.Context) {
s.rwmu.Lock()
err := s.adapter.FlushDisabledJobs()
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to flush disabled jobs: %s",
err.Error(),
@@ -159,10 +168,30 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{_infoKey: "flushed"})
}
// listWrokers respond with informations of all the workers
// deleteWorker deletes one worker by id
func (s *Manager) deleteWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.Lock()
err := s.adapter.DeleteWorker(workerID)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to delete worker: %s",
err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
logger.Noticef("Worker <%s> deleted", workerID)
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
}
// listWorkers respond with information of all the workers
func (s *Manager) listWorkers(c *gin.Context) {
var workerInfos []WorkerStatus
s.rwmu.RLock()
workers, err := s.adapter.ListWorkers()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list workers: %s",
err.Error(),
@@ -204,7 +233,9 @@ func (s *Manager) registerWorker(c *gin.Context) {
// listJobsOfWorker respond with all the jobs of the specified worker
func (s *Manager) listJobsOfWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list jobs of worker %s: %s",
workerID, err.Error(),
@@ -222,6 +253,52 @@ func (s *Manager) returnErrJSON(c *gin.Context, code int, err error) {
})
}
func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
workerID := c.Param("id")
var schedules MirrorSchedules
c.BindJSON(&schedules)
for _, schedule := range schedules.Schedules {
mirrorName := schedule.MirrorName
if len(mirrorName) == 0 {
s.returnErrJSON(
c, http.StatusBadRequest,
errors.New("Mirror Name should not be empty"),
)
}
s.rwmu.RLock()
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
fmt.Errorf("failed to get job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
continue
}
if curStatus.Scheduled == schedule.NextSchedule {
// no changes, skip update
continue
}
curStatus.Scheduled = schedule.NextSchedule
s.rwmu.Lock()
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
c.Error(err)
s.returnErrJSON(c, http.StatusInternalServerError, err)
return
}
}
type empty struct{}
c.JSON(http.StatusOK, empty{})
}
func (s *Manager) updateJobOfWorker(c *gin.Context) {
workerID := c.Param("id")
var status MirrorStatus
@@ -234,14 +311,28 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
)
}
s.rwmu.RLock()
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
curTime := time.Now()
if status.Status == PreSyncing && curStatus.Status != PreSyncing {
status.LastStarted = curTime
} else {
status.LastStarted = curStatus.LastStarted
}
// Only successful syncing needs last_update
if status.Status == Success {
status.LastUpdate = time.Now()
status.LastUpdate = curTime
} else {
status.LastUpdate = curStatus.LastUpdate
}
if status.Status == Success || status.Status == Failed {
status.LastEnded = curTime
} else {
status.LastEnded = curStatus.LastEnded
}
// Only message with meaningful size updates the mirror size
if len(curStatus.Size) > 0 && curStatus.Size != "unknown" {
@@ -258,7 +349,9 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
}
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@@ -280,7 +373,9 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
c.BindJSON(&msg)
mirrorName := msg.Name
s.rwmu.RLock()
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s",
@@ -297,7 +392,9 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@@ -320,7 +417,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
return
}
s.rwmu.RLock()
w, err := s.adapter.GetWorker(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("worker %s is not registered yet", workerID)
s.returnErrJSON(c, http.StatusBadRequest, err)
@@ -332,11 +431,14 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
Cmd: clientCmd.Cmd,
MirrorID: clientCmd.MirrorID,
Args: clientCmd.Args,
Options: clientCmd.Options,
}
// update job status, even if the job did not disable successfully,
// this status should be set as disabled
s.rwmu.RLock()
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
s.rwmu.RUnlock()
changed := false
switch clientCmd.Cmd {
case CmdDisable:
@@ -347,7 +449,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
changed = true
}
if changed {
s.rwmu.Lock()
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
s.rwmu.Unlock()
}
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)

查看文件

@@ -7,6 +7,7 @@ import (
"math/rand"
"net/http"
"strings"
"sync/atomic"
"testing"
"time"
@@ -21,9 +22,16 @@ const (
)
func TestHTTPServer(t *testing.T) {
var listenPort = 5000
Convey("HTTP server should work", t, func(ctx C) {
listenPort++
port := listenPort
addr := "127.0.0.1"
baseURL := fmt.Sprintf("http://%s:%d", addr, port)
InitLogger(true, true, false)
s := GetTUNASyncManager(&Config{Debug: false})
s := GetTUNASyncManager(&Config{Debug: true})
s.cfg.Server.Addr = addr
s.cfg.Server.Port = port
So(s, ShouldNotBeNil)
s.setDBAdapter(&mockDBAdapter{
workerStore: map[string]WorkerStatus{
@@ -32,12 +40,8 @@ func TestHTTPServer(t *testing.T) {
}},
statusStore: make(map[string]MirrorStatus),
})
port := rand.Intn(10000) + 20000
baseURL := fmt.Sprintf("http://127.0.0.1:%d", port)
go func() {
s.engine.Run(fmt.Sprintf("127.0.0.1:%d", port))
}()
time.Sleep(50 * time.Microsecond)
go s.Run()
time.Sleep(50 * time.Millisecond)
resp, err := http.Get(baseURL + "/ping")
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
@@ -61,6 +65,34 @@ func TestHTTPServer(t *testing.T) {
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
})
Convey("when register multiple workers", func(ctx C) {
N := 10
var cnt uint32
for i := 0; i < N; i++ {
go func(id int) {
w := WorkerStatus{
ID: fmt.Sprintf("worker%d", id),
}
resp, err := PostJSON(baseURL+"/workers", w, nil)
ctx.So(err, ShouldBeNil)
ctx.So(resp.StatusCode, ShouldEqual, http.StatusOK)
atomic.AddUint32(&cnt, 1)
}(i)
}
time.Sleep(2 * time.Second)
So(cnt, ShouldEqual, N)
Convey("list all workers", func(ctx C) {
resp, err := http.Get(baseURL + "/workers")
So(err, ShouldBeNil)
defer resp.Body.Close()
var actualResponseObj []WorkerStatus
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
So(err, ShouldBeNil)
So(len(actualResponseObj), ShouldEqual, N+1)
})
})
Convey("when register a worker", func(ctx C) {
w := WorkerStatus{
ID: "test_worker1",
@@ -79,7 +111,34 @@ func TestHTTPServer(t *testing.T) {
So(len(actualResponseObj), ShouldEqual, 2)
})
Convey("flush disabled jobs", func(ctx C) {
Convey("delete an existent worker", func(ctx C) {
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, w.ID), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_infoKey], ShouldEqual, "deleted")
})
Convey("delete non-existent worker", func(ctx C) {
invalidWorker := "test_worker233"
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/workers/%s", baseURL, invalidWorker), nil)
So(err, ShouldBeNil)
clt := &http.Client{}
resp, err := clt.Do(req)
So(err, ShouldBeNil)
defer resp.Body.Close()
res := map[string]string{}
err = json.NewDecoder(resp.Body).Decode(&res)
So(err, ShouldBeNil)
So(res[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("flush disabled jobs", func(ctx C) {
req, err := http.NewRequest("DELETE", baseURL+"/jobs/disabled", nil)
So(err, ShouldBeNil)
clt := &http.Client{}
@@ -102,8 +161,8 @@ func TestHTTPServer(t *testing.T) {
Size: "unknown",
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
defer resp.Body.Close()
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("list mirror status of an existed worker", func(ctx C) {
@@ -121,11 +180,43 @@ func TestHTTPServer(t *testing.T) {
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
// start syncing
status.Status = PreSyncing
time.Sleep(1 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("update mirror status to PreSync - starting sync", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
})
Convey("list all job status of all workers", func(ctx C) {
var ms []webMirrorStatus
var ms []WebMirrorStatus
resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
@@ -136,7 +227,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
})
@@ -165,10 +258,26 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
})
})
Convey("Update schedule of valid mirrors", func(ctx C) {
msg := MirrorSchedules{
[]MirrorSchedule{
MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
},
}
url := fmt.Sprintf("%s/workers/%s/schedules", baseURL, status.Worker)
resp, err := PostJSON(url, msg, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
})
Convey("Update size of an invalid mirror", func(ctx C) {
msg := struct {
Name string `json:"name"`
@@ -180,18 +289,47 @@ func TestHTTPServer(t *testing.T) {
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusInternalServerError)
})
// what if status changed to failed
status.Status = Failed
time.Sleep(3 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("What if syncing job failed", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
})
Convey("update mirror status of an inexisted worker", func(ctx C) {
invalidWorker := "test_worker2"
status := MirrorStatus{
Name: "arch-sync2",
Worker: invalidWorker,
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
Name: "arch-sync2",
Worker: invalidWorker,
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now(),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s",
baseURL, status.Worker, status.Name), status, nil)
@@ -203,6 +341,24 @@ func TestHTTPServer(t *testing.T) {
So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("update schedule of an non-existent worker", func(ctx C) {
invalidWorker := "test_worker2"
sch := MirrorSchedules{
[]MirrorSchedule{
MirrorSchedule{"arch-sync1", time.Now().Add(time.Minute * 10)},
MirrorSchedule{"arch-sync2", time.Now().Add(time.Minute * 7)},
},
}
resp, err := PostJSON(fmt.Sprintf("%s/workers/%s/schedules",
baseURL, invalidWorker), sch, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
defer resp.Body.Close()
var msg map[string]string
err = json.NewDecoder(resp.Body).Decode(&msg)
So(err, ShouldBeNil)
So(msg[_errorKey], ShouldEqual, "invalid workerID "+invalidWorker)
})
Convey("handle client command", func(ctx C) {
cmdChan := make(chan WorkerCmd, 1)
workerServer := makeMockWorkerServer(cmdChan)
@@ -221,11 +377,11 @@ func TestHTTPServer(t *testing.T) {
// run the mock worker server
workerServer.Run(bindAddress)
}()
time.Sleep(50 * time.Microsecond)
time.Sleep(50 * time.Millisecond)
// verify the worker mock server is running
workerResp, err := http.Get(workerBaseURL + "/ping")
defer workerResp.Body.Close()
So(err, ShouldBeNil)
defer workerResp.Body.Close()
So(workerResp.StatusCode, ShouldEqual, http.StatusOK)
Convey("when client send wrong cmd", func(ctx C) {
@@ -235,8 +391,8 @@ func TestHTTPServer(t *testing.T) {
WorkerID: "not_exist_worker",
}
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
defer resp.Body.Close()
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusBadRequest)
})
@@ -248,9 +404,8 @@ func TestHTTPServer(t *testing.T) {
}
resp, err := PostJSON(baseURL+"/cmd", clientCmd, nil)
defer resp.Body.Close()
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
time.Sleep(50 * time.Microsecond)
select {
@@ -293,6 +448,11 @@ func (b *mockDBAdapter) GetWorker(workerID string) (WorkerStatus, error) {
return w, nil
}
func (b *mockDBAdapter) DeleteWorker(workerID string) error {
delete(b.workerStore, workerID)
return nil
}
func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
// _, ok := b.workerStore[w.ID]
// if ok {

查看文件

@@ -1,62 +0,0 @@
package manager
import (
"encoding/json"
"strconv"
"time"
. "github.com/tuna/tunasync/internal"
)
type textTime struct {
time.Time
}
func (t textTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Format("2006-01-02 15:04:05 -0700"))
}
func (t *textTime) UnmarshalJSON(b []byte) error {
s := string(b)
t2, err := time.Parse(`"2006-01-02 15:04:05 -0700"`, s)
*t = textTime{t2}
return err
}
type stampTime struct {
time.Time
}
func (t stampTime) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Unix())
}
func (t *stampTime) UnmarshalJSON(b []byte) error {
ts, err := strconv.Atoi(string(b))
if err != nil {
return err
}
*t = stampTime{time.Unix(int64(ts), 0)}
return err
}
// webMirrorStatus is the mirror status to be shown in the web page
type webMirrorStatus struct {
Name string `json:"name"`
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
Upstream string `json:"upstream"`
Size string `json:"size"` // approximate size
}
func convertMirrorStatus(m MirrorStatus) webMirrorStatus {
return webMirrorStatus{
Name: m.Name,
IsMaster: m.IsMaster,
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
Upstream: m.Upstream,
Size: m.Size,
}
}

查看文件

@@ -1,44 +0,0 @@
package manager
import (
"encoding/json"
"testing"
"time"
tunasync "github.com/tuna/tunasync/internal"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatus(t *testing.T) {
Convey("status json ser-de should work", t, func() {
tz := "Asia/Tokyo"
loc, err := time.LoadLocation(tz)
So(err, ShouldBeNil)
t := time.Date(2016, time.April, 16, 23, 8, 10, 0, loc)
m := webMirrorStatus{
Name: "tunalinux",
Status: tunasync.Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
Size: "5GB",
Upstream: "rsync://mirrors.tuna.tsinghua.edu.cn/tunalinux/",
}
b, err := json.Marshal(m)
So(err, ShouldBeNil)
//fmt.Println(string(b))
var m2 webMirrorStatus
err = json.Unmarshal(b, &m2)
So(err, ShouldBeNil)
// fmt.Printf("%#v", m2)
So(m2.Name, ShouldEqual, m.Name)
So(m2.Status, ShouldEqual, m.Status)
So(m2.LastUpdate.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.Size, ShouldEqual, m.Size)
So(m2.Upstream, ShouldEqual, m.Upstream)
})
}

查看文件

@@ -15,13 +15,14 @@ type baseProvider struct {
ctx *Context
name string
interval time.Duration
retry int
timeout time.Duration
isMaster bool
cmd *cmdJob
logFileFd *os.File
isRunning atomic.Value
logFile *os.File
cgroup *cgroupHook
zfs *zfsHook
docker *dockerHook
@@ -52,6 +53,14 @@ func (p *baseProvider) Interval() time.Duration {
return p.interval
}
func (p *baseProvider) Retry() int {
return p.retry
}
func (p *baseProvider) Timeout() time.Duration {
return p.timeout
}
func (p *baseProvider) IsMaster() bool {
return p.isMaster
}
@@ -111,24 +120,34 @@ func (p *baseProvider) Docker() *dockerHook {
return p.docker
}
func (p *baseProvider) prepareLogFile() error {
func (p *baseProvider) prepareLogFile(append bool) error {
if p.LogFile() == "/dev/null" {
p.cmd.SetLogFile(nil)
return nil
}
if p.logFile == nil {
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFile = logFile
appendMode := 0
if append {
appendMode = os.O_APPEND
}
p.cmd.SetLogFile(p.logFile)
logFile, err := os.OpenFile(p.LogFile(), os.O_WRONLY|os.O_CREATE|appendMode, 0644)
if err != nil {
logger.Errorf("Error opening logfile %s: %s", p.LogFile(), err.Error())
return err
}
p.logFileFd = logFile
p.cmd.SetLogFile(logFile)
return nil
}
func (p *baseProvider) Run() error {
func (p *baseProvider) closeLogFile() (err error) {
if p.logFileFd != nil {
err = p.logFileFd.Close()
p.logFileFd = nil
}
return
}
func (p *baseProvider) Run(started chan empty) error {
panic("Not Implemented")
}
@@ -143,32 +162,27 @@ func (p *baseProvider) IsRunning() bool {
func (p *baseProvider) Wait() error {
defer func() {
p.Lock()
logger.Debugf("set isRunning to false: %s", p.Name())
p.isRunning.Store(false)
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
}()
logger.Debugf("calling Wait: %s", p.Name())
return p.cmd.Wait()
}
func (p *baseProvider) Terminate() error {
p.Lock()
defer p.Unlock()
logger.Debugf("terminating provider: %s", p.Name())
if !p.IsRunning() {
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
return nil
}
p.Lock()
if p.logFile != nil {
p.logFile.Close()
p.logFile = nil
}
p.Unlock()
err := p.cmd.Terminate()
p.isRunning.Store(false)
return err
}
func (p *baseProvider) DataSize() string {
return ""
}

查看文件

@@ -0,0 +1,90 @@
package worker
import (
"fmt"
"os"
"path/filepath"
"github.com/dennwc/btrfs"
)
type btrfsSnapshotHook struct {
provider mirrorProvider
mirrorSnapshotPath string
}
// the user who runs the jobs (typically `tunasync`) should be granted the permission to run btrfs commands
// TODO: check if the filesystem is Btrfs
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
mirrorSnapshotPath := mirror.SnapshotPath
if mirrorSnapshotPath == "" {
mirrorSnapshotPath = filepath.Join(snapshotPath, provider.Name())
}
return &btrfsSnapshotHook{
provider: provider,
mirrorSnapshotPath: mirrorSnapshotPath,
}
}
// check if path `snapshotPath/providerName` exists
// Case 1: Not exists => create a new subvolume
// Case 2: Exists as a subvolume => nothing to do
// Case 3: Exists as a directory => error detected
func (h *btrfsSnapshotHook) preJob() error {
path := h.provider.WorkingDir()
if _, err := os.Stat(path); os.IsNotExist(err) {
// create subvolume
err := btrfs.CreateSubVolume(path)
if err != nil {
logger.Errorf("failed to create Btrfs subvolume %s: %s", path, err.Error())
return err
}
logger.Noticef("created new Btrfs subvolume %s", path)
} else {
if is, err := btrfs.IsSubVolume(path); err != nil {
return err
} else if !is {
return fmt.Errorf("path %s exists but isn't a Btrfs subvolume", path)
}
}
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
// delete old snapshot if exists, then create a new snapshot
func (h *btrfsSnapshotHook) postSuccess() error {
if _, err := os.Stat(h.mirrorSnapshotPath); !os.IsNotExist(err) {
isSubVol, err := btrfs.IsSubVolume(h.mirrorSnapshotPath)
if err != nil {
return err
} else if !isSubVol {
return fmt.Errorf("path %s exists and isn't a Btrfs snapshot", h.mirrorSnapshotPath)
}
// is old snapshot => delete it
if err := btrfs.DeleteSubVolume(h.mirrorSnapshotPath); err != nil {
logger.Errorf("failed to delete old Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("deleted old snapshot %s", h.mirrorSnapshotPath)
}
// create a new writable snapshot
// (the snapshot is writable so that it can be deleted easily)
if err := btrfs.SnapshotSubVolume(h.provider.WorkingDir(), h.mirrorSnapshotPath, false); err != nil {
logger.Errorf("failed to create new Btrfs snapshot %s", h.mirrorSnapshotPath)
return err
}
logger.Noticef("created new Btrfs snapshot %s", h.mirrorSnapshotPath)
return nil
}
// keep the old snapshot => nothing to do
func (h *btrfsSnapshotHook) postFail() error {
return nil
}

查看文件

@@ -17,7 +17,6 @@ import (
type cgroupHook struct {
emptyHook
provider mirrorProvider
basePath string
baseGroup string
created bool
@@ -36,7 +35,9 @@ func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit st
subsystem = "cpu"
}
return &cgroupHook{
provider: p,
emptyHook: emptyHook{
provider: p,
},
basePath: basePath,
baseGroup: baseGroup,
subsystem: subsystem,

查看文件

@@ -83,7 +83,7 @@ sleep 30
So(err, ShouldBeNil)
go func() {
err = provider.Run()
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()

查看文件

@@ -1,9 +1,13 @@
package worker
import (
"errors"
"fmt"
"regexp"
"time"
"github.com/anmitsu/go-shlex"
"github.com/tuna/tunasync/internal"
)
type cmdConfig struct {
@@ -11,22 +15,34 @@ type cmdConfig struct {
upstreamURL, command string
workingDir, logDir, logFile string
interval time.Duration
retry int
timeout time.Duration
env map[string]string
failOnMatch string
sizePattern string
}
type cmdProvider struct {
baseProvider
cmdConfig
command []string
command []string
dataSize string
failOnMatch *regexp.Regexp
sizePattern *regexp.Regexp
}
func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
// TODO: check config options
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &cmdProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
cmdConfig: c,
}
@@ -40,6 +56,22 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
return nil, err
}
provider.command = cmd
if len(c.failOnMatch) > 0 {
var err error
failOnMatch, err := regexp.Compile(c.failOnMatch)
if err != nil {
return nil, errors.New("fail-on-match regexp error: " + err.Error())
}
provider.failOnMatch = failOnMatch
}
if len(c.sizePattern) > 0 {
var err error
sizePattern, err := regexp.Compile(c.sizePattern)
if err != nil {
return nil, errors.New("size-pattern regexp error: " + err.Error())
}
provider.sizePattern = sizePattern
}
return provider, nil
}
@@ -52,14 +84,45 @@ func (p *cmdProvider) Upstream() string {
return p.upstreamURL
}
func (p *cmdProvider) Run() error {
func (p *cmdProvider) DataSize() string {
return p.dataSize
}
func (p *cmdProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil {
return err
}
return p.Wait()
started <- empty{}
if err := p.Wait(); err != nil {
return err
}
if p.failOnMatch != nil {
matches, err := internal.FindAllSubmatchInFile(p.LogFile(), p.failOnMatch)
logger.Infof("FindAllSubmatchInFile: %q\n", matches)
if err != nil {
return err
}
if len(matches) != 0 {
logger.Debug("Fail-on-match: %r", matches)
return fmt.Errorf("Fail-on-match regexp found %d matches", len(matches))
}
}
if p.sizePattern != nil {
p.dataSize = internal.ExtractSizeFromLog(p.LogFile(), p.sizePattern)
}
return nil
}
func (p *cmdProvider) Start() error {
p.Lock()
defer p.Unlock()
if p.IsRunning() {
return errors.New("provider is currently running")
}
env := map[string]string{
"TUNASYNC_MIRROR_NAME": p.Name(),
"TUNASYNC_WORKING_DIR": p.WorkingDir(),
@@ -71,7 +134,7 @@ func (p *cmdProvider) Start() error {
env[k] = v
}
p.cmd = newCmdJob(p, p.command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil {
if err := p.prepareLogFile(false); err != nil {
return err
}
@@ -79,5 +142,6 @@ func (p *cmdProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

查看文件

@@ -1,6 +1,6 @@
package worker
// put global viables and types here
// put global variables and types here
import (
"gopkg.in/op/go-logging.v1"
@@ -8,6 +8,6 @@ import (
type empty struct{}
const maxRetry = 2
const defaultMaxRetry = 2
var logger = logging.MustGetLogger("tunasync")

查看文件

@@ -6,6 +6,7 @@ import (
"path/filepath"
"github.com/BurntSushi/toml"
"github.com/imdario/mergo"
)
type providerEnum uint8
@@ -33,14 +34,16 @@ func (p *providerEnum) UnmarshalText(text []byte) error {
// Config represents worker config options
type Config struct {
Global globalConfig `toml:"global"`
Manager managerConfig `toml:"manager"`
Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"`
ZFS zfsConfig `toml:"zfs"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"`
Mirrors []mirrorConfig `toml:"mirrors"`
Global globalConfig `toml:"global"`
Manager managerConfig `toml:"manager"`
Server serverConfig `toml:"server"`
Cgroup cgroupConfig `toml:"cgroup"`
ZFS zfsConfig `toml:"zfs"`
BtrfsSnapshot btrfsSnapshotConfig `toml:"btrfs_snapshot"`
Docker dockerConfig `toml:"docker"`
Include includeConfig `toml:"include"`
MirrorsConf []mirrorConfig `toml:"mirrors"`
Mirrors []mirrorConfig
}
type globalConfig struct {
@@ -49,6 +52,8 @@ type globalConfig struct {
MirrorDir string `toml:"mirror_dir"`
Concurrent int `toml:"concurrent"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"`
@@ -95,6 +100,11 @@ type zfsConfig struct {
Zpool string `toml:"zpool"`
}
type btrfsSnapshotConfig struct {
Enable bool `toml:"enable"`
SnapshotPath string `toml:"snapshot_path"`
}
type includeConfig struct {
IncludeMirrors string `toml:"include_mirrors"`
}
@@ -104,14 +114,17 @@ type includedMirrorConfig struct {
}
type mirrorConfig struct {
Name string `toml:"name"`
Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"`
Interval int `toml:"interval"`
MirrorDir string `toml:"mirror_dir"`
LogDir string `toml:"log_dir"`
Env map[string]string `toml:"env"`
Role string `toml:"role"`
Name string `toml:"name"`
Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
MirrorDir string `toml:"mirror_dir"`
MirrorSubDir string `toml:"mirror_subdir"`
LogDir string `toml:"log_dir"`
Env map[string]string `toml:"env"`
Role string `toml:"role"`
// These two options over-write the global options
ExecOnSuccess []string `toml:"exec_on_success"`
@@ -121,18 +134,29 @@ type mirrorConfig struct {
ExecOnSuccessExtra []string `toml:"exec_on_success_extra"`
ExecOnFailureExtra []string `toml:"exec_on_failure_extra"`
Command string `toml:"command"`
UseIPv6 bool `toml:"use_ipv6"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
Stage1Profile string `toml:"stage1_profile"`
Command string `toml:"command"`
FailOnMatch string `toml:"fail_on_match"`
SizePattern string `toml:"size_pattern"`
UseIPv6 bool `toml:"use_ipv6"`
UseIPv4 bool `toml:"use_ipv4"`
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncNoTimeo bool `toml:"rsync_no_timeout"`
RsyncTimeout int `toml:"rsync_timeout"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
Stage1Profile string `toml:"stage1_profile"`
MemoryLimit string `toml:"memory_limit"`
DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"`
DockerOptions []string `toml:"docker_options"`
SnapshotPath string `toml:"snapshot_path"`
ChildMirrors []mirrorConfig `toml:"mirrors"`
}
// LoadConfig loads configuration
@@ -159,9 +183,36 @@ func LoadConfig(cfgFile string) (*Config, error) {
logger.Errorf(err.Error())
return nil, err
}
cfg.Mirrors = append(cfg.Mirrors, incMirCfg.Mirrors...)
cfg.MirrorsConf = append(cfg.MirrorsConf, incMirCfg.Mirrors...)
}
}
for _, m := range cfg.MirrorsConf {
if err := recursiveMirrors(cfg, nil, m); err != nil {
return nil, err
}
}
return cfg, nil
}
func recursiveMirrors(cfg *Config, parent *mirrorConfig, mirror mirrorConfig) error {
var curMir mirrorConfig
if parent != nil {
curMir = *parent
}
curMir.ChildMirrors = nil
if err := mergo.Merge(&curMir, mirror, mergo.WithOverride); err != nil {
return err
}
if mirror.ChildMirrors == nil {
cfg.Mirrors = append(cfg.Mirrors, curMir)
} else {
for _, m := range mirror.ChildMirrors {
if err := recursiveMirrors(cfg, &curMir, m); err != nil {
return err
}
}
}
return nil
}

查看文件

@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
@@ -18,6 +19,8 @@ log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
[manager]
api_base = "https://127.0.0.1:5000"
@@ -35,6 +38,8 @@ name = "AOSP"
provider = "command"
upstream = "https://aosp.google.com/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/git/AOSP"
exec_on_success = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@@ -81,9 +86,9 @@ exec_on_failure = [
tmpDir,
)
cfgBlob = cfgBlob + incSection
curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob), 0644)
err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
@@ -116,6 +121,8 @@ use_ipv6 = true
So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.Timeout, ShouldEqual, 86400)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
@@ -126,6 +133,8 @@ use_ipv6 = true
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Timeout, ShouldEqual, 3600)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
@@ -153,6 +162,102 @@ use_ipv6 = true
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Everything should work on nested config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
tmpDir, err := ioutil.TempDir("", "tunasync")
So(err, ShouldBeNil)
defer os.RemoveAll(tmpDir)
incSection := fmt.Sprintf(
"\n[include]\n"+
"include_mirrors = \"%s/*.conf\"",
tmpDir,
)
curCfgBlob := cfgBlob + incSection
err = ioutil.WriteFile(tmpfile.Name(), []byte(curCfgBlob), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
incBlob1 := `
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(filepath.Join(tmpDir, "nest.conf"), []byte(incBlob1), 0644)
So(err, ShouldEqual, nil)
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
So(cfg.Server.Hostname, ShouldEqual, "worker1.example.com")
m := cfg.Mirrors[0]
So(m.Name, ShouldEqual, "AOSP")
So(m.MirrorDir, ShouldEqual, "/data/git/AOSP")
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-security")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "debian")
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "ubuntu")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.UseIPv6, ShouldEqual, true)
So(m.Stage1Profile, ShouldEqual, "ubuntu")
m = cfg.Mirrors[5]
So(m.Name, ShouldEqual, "debian-cd")
So(m.UseIPv6, ShouldEqual, true)
So(m.Provider, ShouldEqual, provRsync)
So(len(cfg.Mirrors), ShouldEqual, 6)
})
Convey("Providers can be inited from a valid config file", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
@@ -203,4 +308,92 @@ use_ipv6 = true
So(rp.excludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
})
Convey("MirrorSubdir should work", t, func() {
tmpfile, err := ioutil.TempFile("", "tunasync")
So(err, ShouldEqual, nil)
defer os.Remove(tmpfile.Name())
cfgBlob1 := `
[global]
name = "test_worker"
log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
timeout = 86400
retry = 3
[manager]
api_base = "https://127.0.0.1:5000"
token = "some_token"
[server]
hostname = "worker1.example.com"
listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = "/etc/tunasync.d/worker1.cert"
ssl_key = "/etc/tunasync.d/worker1.key"
[[mirrors]]
name = "ipv6s"
use_ipv6 = true
[[mirrors.mirrors]]
name = "debians"
mirror_subdir = "debian"
provider = "two-stage-rsync"
stage1_profile = "debian"
[[mirrors.mirrors.mirrors]]
name = "debian-security"
upstream = "rsync://test.host/debian-security/"
[[mirrors.mirrors.mirrors]]
name = "ubuntu"
stage1_profile = "ubuntu"
upstream = "rsync://test.host2/ubuntu/"
[[mirrors.mirrors]]
name = "debian-cd"
provider = "rsync"
upstream = "rsync://test.host3/debian-cd/"
`
err = ioutil.WriteFile(tmpfile.Name(), []byte(cfgBlob1), 0644)
So(err, ShouldEqual, nil)
defer tmpfile.Close()
cfg, err := LoadConfig(tmpfile.Name())
So(err, ShouldBeNil)
providers := map[string]mirrorProvider{}
for _, m := range cfg.Mirrors {
p := newMirrorProvider(m, cfg)
providers[p.Name()] = p
}
p := providers["debian-security"]
So(p.Name(), ShouldEqual, "debian-security")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-security")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-security/latest.log")
r2p, ok := p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "debian")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/debian-security")
p = providers["ubuntu"]
So(p.Name(), ShouldEqual, "ubuntu")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/ubuntu")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/ubuntu/latest.log")
r2p, ok = p.(*twoStageRsyncProvider)
So(ok, ShouldBeTrue)
So(r2p.stage1Profile, ShouldEqual, "ubuntu")
So(r2p.WorkingDir(), ShouldEqual, "/data/mirrors/debian/ubuntu")
p = providers["debian-cd"]
So(p.Name(), ShouldEqual, "debian-cd")
So(p.LogDir(), ShouldEqual, "/var/log/tunasync/debian-cd")
So(p.LogFile(), ShouldEqual, "/var/log/tunasync/debian-cd/latest.log")
rp, ok := p.(*rsyncProvider)
So(ok, ShouldBeTrue)
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
So(p.Timeout(), ShouldEqual, 86400*time.Second)
})
}

查看文件

@@ -3,30 +3,38 @@ package worker
import (
"fmt"
"os"
"time"
"github.com/codeskyblue/go-sh"
)
type dockerHook struct {
emptyHook
provider mirrorProvider
image string
volumes []string
options []string
image string
volumes []string
options []string
}
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{}
volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...)
if len(mCfg.ExcludeFile) > 0 {
arg := fmt.Sprintf("%s:%s:ro", mCfg.ExcludeFile, mCfg.ExcludeFile)
volumes = append(volumes, arg)
}
options := []string{}
options = append(options, gCfg.Options...)
options = append(options, mCfg.DockerOptions...)
return &dockerHook{
provider: p,
image: mCfg.DockerImage,
volumes: volumes,
options: options,
emptyHook: emptyHook{
provider: p,
},
image: mCfg.DockerImage,
volumes: volumes,
options: options,
}
}
@@ -59,6 +67,27 @@ func (d *dockerHook) postExec() error {
// sh.Command(
// "docker", "rm", "-f", d.Name(),
// ).Run()
name := d.Name()
retry := 10
for ; retry > 0; retry-- {
out, err := sh.Command(
"docker", "ps", "-a",
"--filter", "name=^"+name+"$",
"--format", "{{.Status}}",
).Output()
if err != nil {
logger.Errorf("docker ps failed: %v", err)
break
}
if len(out) == 0 {
break
}
logger.Debugf("container %s still exists: '%s'", name, string(out))
time.Sleep(1 * time.Second)
}
if retry == 0 {
logger.Warningf("container %s not removed automatically, next sync may fail", name)
}
d.provider.ExitContext()
return nil
}

查看文件

@@ -4,6 +4,7 @@ import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
@@ -12,13 +13,27 @@ import (
. "github.com/smartystreets/goconvey/convey"
)
func cmdRun(p string, args []string) {
cmd := exec.Command(p, args...)
out, err := cmd.CombinedOutput()
if err != nil {
logger.Debugf("cmdRun failed %s", err)
return
}
logger.Debugf("cmdRun: ", string(out))
}
func getDockerByName(name string) (string, error) {
// docker ps -f 'name=$name' --format '{{.Names}}'
out, err := sh.Command(
"docker", "ps",
"docker", "ps", "-a",
"--filter", "name="+name,
"--format", "{{.Names}}",
).Output()
if err == nil {
logger.Debugf("docker ps: '%s'", string(out))
}
return string(out), err
}
@@ -46,7 +61,7 @@ func TestDocker(t *testing.T) {
cmdScriptContent := `#!/bin/sh
echo ${TEST_CONTENT}
sleep 10
sleep 20
`
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
@@ -55,8 +70,10 @@ sleep 10
So(err, ShouldBeNil)
d := &dockerHook{
provider: provider,
image: "alpine",
emptyHook: emptyHook{
provider: provider,
},
image: "alpine:3.8",
volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
},
@@ -67,12 +84,27 @@ sleep 10
err = d.preExec()
So(err, ShouldBeNil)
cmdRun("docker", []string{"images"})
exitedErr := make(chan error, 1)
go func() {
err = provider.Run()
ctx.So(err, ShouldNotBeNil)
err = provider.Run(make(chan empty, 1))
logger.Debugf("provider.Run() exited")
if err != nil {
logger.Errorf("provider.Run() failed: %v", err)
}
exitedErr <- err
}()
time.Sleep(1 * time.Second)
// Wait for docker running
for wait := 0; wait < 8; wait++ {
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
if names != "" {
break
}
time.Sleep(1 * time.Second)
}
// cmdRun("ps", []string{"aux"})
// assert container running
names, err := getDockerByName(d.Name())
@@ -82,6 +114,9 @@ sleep 10
err = provider.Terminate()
So(err, ShouldBeNil)
// cmdRun("ps", []string{"aux"})
<-exitedErr
// container should be terminated and removed
names, err = getDockerByName(d.Name())
So(err, ShouldBeNil)

查看文件

@@ -18,7 +18,6 @@ const (
type execPostHook struct {
emptyHook
provider mirrorProvider
// exec on success or on failure
execOn uint8
@@ -37,9 +36,11 @@ func newExecPostHook(provider mirrorProvider, execOn uint8, command string) (*ex
}
return &execPostHook{
provider: provider,
execOn: execOn,
command: cmd,
emptyHook: emptyHook{
provider: provider,
},
execOn: execOn,
command: cmd,
}, nil
}

查看文件

@@ -92,7 +92,7 @@ exit 1
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < maxRetry; i++ {
for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan

查看文件

@@ -5,6 +5,7 @@ import (
"fmt"
"sync"
"sync/atomic"
"time"
tunasync "github.com/tuna/tunasync/internal"
)
@@ -14,12 +15,13 @@ import (
type ctrlAction uint8
const (
jobStart ctrlAction = iota
jobStop // stop syncing keep the job
jobDisable // disable the job (stops goroutine)
jobRestart // restart syncing
jobPing // ensure the goroutine is alive
jobHalt // worker halts
jobStart ctrlAction = iota
jobStop // stop syncing keep the job
jobDisable // disable the job (stops goroutine)
jobRestart // restart syncing
jobPing // ensure the goroutine is alive
jobHalt // worker halts
jobForceStart // ignore concurrent limit
)
type jobMessage struct {
@@ -51,6 +53,7 @@ type mirrorJob struct {
ctrlChan chan ctrlAction
disabled chan empty
state uint32
size string
}
func newMirrorJob(provider mirrorProvider) *mirrorJob {
@@ -110,7 +113,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
managerChan <- jobMessage{
tunasync.Failed, m.Name(),
fmt.Sprintf("error exec hook %s: %s", hookname, err.Error()),
false,
true,
}
return err
}
@@ -136,7 +139,7 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return err
}
for retry := 0; retry < maxRetry; retry++ {
for retry := 0; retry < provider.Retry(); retry++ {
stopASAP := false // stop job as soon as possible
if retry > 0 {
@@ -152,26 +155,43 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
var syncErr error
syncDone := make(chan error, 1)
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
go func() {
err := provider.Run()
if !stopASAP {
syncDone <- err
}
err := provider.Run(started)
syncDone <- err
}()
select { // Wait until provider started or error happened
case err := <-syncDone:
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
syncDone <- err // it will be read again later
case <-started:
logger.Debug("provider started")
}
// Now terminating the provider is feasible
var termErr error
timeout := provider.Timeout()
if timeout <= 0 {
timeout = 100000 * time.Hour // never time out
}
select {
case syncErr = <-syncDone:
logger.Debug("syncing done")
case <-time.After(timeout):
logger.Notice("provider timeout")
termErr = provider.Terminate()
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
case <-kill:
logger.Debug("received kill")
stopASAP = true
err := provider.Terminate()
if err != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return err
}
termErr = provider.Terminate()
syncErr = errors.New("killed by manager")
}
if termErr != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), termErr.Error())
return termErr
}
// post-exec hooks
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")
@@ -182,26 +202,33 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
if syncErr == nil {
// syncing success
logger.Noticef("succeeded syncing %s", m.Name())
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
// post-success hooks
logger.Debug("post-success hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postSuccess() }, "post-success")
if err != nil {
return err
}
return nil
} else {
// syncing failed
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
// post-fail hooks
logger.Debug("post-fail hooks")
err := runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
}
if syncErr == nil {
// syncing success
m.size = provider.DataSize()
managerChan <- jobMessage{tunasync.Success, m.Name(), "", (m.State() == stateReady)}
return nil
}
// syncing failed
logger.Warningf("failed syncing %s: %s", m.Name(), syncErr.Error())
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == maxRetry-1) && (m.State() == stateReady)}
managerChan <- jobMessage{tunasync.Failed, m.Name(), syncErr.Error(), (retry == provider.Retry()-1) && (m.State() == stateReady)}
// post-fail hooks
logger.Debug("post-fail hooks")
err = runHooks(rHooks, func(h jobHook) error { return h.postFail() }, "post-fail")
if err != nil {
return err
}
// gracefully exit
if stopASAP {
logger.Debug("No retry, exit directly")
@@ -212,22 +239,26 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
return nil
}
runJob := func(kill <-chan empty, jobDone chan<- empty) {
runJob := func(kill <-chan empty, jobDone chan<- empty, bypassSemaphore <-chan empty) {
select {
case semaphore <- empty{}:
defer func() { <-semaphore }()
runJobWrapper(kill, jobDone)
case <-bypassSemaphore:
logger.Noticef("Concurrent limit ignored by %s", m.Name())
runJobWrapper(kill, jobDone)
case <-kill:
jobDone <- empty{}
return
}
}
bypassSemaphore := make(chan empty, 1)
for {
if m.State() == stateReady {
kill := make(chan empty)
jobDone := make(chan empty)
go runJob(kill, jobDone)
go runJob(kill, jobDone, bypassSemaphore)
_wait_for_job:
select {
@@ -248,7 +279,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
m.SetState(stateReady)
close(kill)
<-jobDone
time.Sleep(time.Second) // Restart may fail if the process was not exited yet
continue
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobStart:
m.SetState(stateReady)
goto _wait_for_job
@@ -272,8 +310,14 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
case jobDisable:
m.SetState(stateDisabled)
return nil
case jobForceStart:
select { //non-blocking
default:
case bypassSemaphore <- empty{}:
}
fallthrough
case jobRestart:
m.SetState(stateReady)
fallthrough
case jobStart:
m.SetState(stateReady)
default:

查看文件

@@ -31,6 +31,7 @@ func TestMirrorJob(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
timeout: 7 * time.Second,
}
provider, err := newCmdProvider(c)
@@ -41,6 +42,7 @@ func TestMirrorJob(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("For a normal mirror job", func(ctx C) {
scriptContent := `#!/bin/bash
@@ -112,6 +114,74 @@ func TestMirrorJob(t *testing.T) {
})
Convey("When running long jobs with post-fail hook", func(ctx C) {
scriptContent := `#!/bin/bash
echo '++++++'
echo $TUNASYNC_WORKING_DIR
echo $0 sleeping
sleep 3
echo $TUNASYNC_WORKING_DIR
echo '------'
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
hookScriptFile := filepath.Join(tmpDir, "hook.sh")
err = ioutil.WriteFile(hookScriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
h, err := newExecPostHook(provider, execOnFailure, hookScriptFile)
So(err, ShouldBeNil)
provider.AddHook(h)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("If we kill it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we kill it then start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
time.Sleep(2 * time.Second)
logger.Debugf("Now starting...\n")
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
Convey("When running long jobs", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
@@ -135,6 +205,8 @@ echo $TUNASYNC_WORKING_DIR
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
job.ctrlChan <- jobStop
msg = <-managerChan
@@ -170,8 +242,299 @@ echo $TUNASYNC_WORKING_DIR
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we restart it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobRestart
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("If we disable it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobDisable
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
<-job.disabled
})
Convey("If we stop it twice, than start it", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStop
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldEqual, "killed by manager")
job.ctrlChan <- jobStop // should be ignored
job.ctrlChan <- jobStart
msg = <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Success)
expectedOutput := fmt.Sprintf(
"%s\n%s\n",
provider.WorkingDir(), provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
})
Convey("When a job timed out", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("It should be automatically terminated", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("It should be retried", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldContainSubstring, "timeout after")
// re-schedule after last try
So(msg.schedule, ShouldEqual, i == defaultMaxRetry-1)
}
job.ctrlChan <- jobDisable
<-job.disabled
})
})
})
}
func TestConcurrentMirrorJobs(t *testing.T) {
InitLogger(true, true, false)
Convey("Concurrent MirrorJobs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
const CONCURRENT = 5
var providers [CONCURRENT]*cmdProvider
var jobs [CONCURRENT]*mirrorJob
for i := 0; i < CONCURRENT; i++ {
c := cmdConfig{
name: fmt.Sprintf("job-%d", i),
upstreamURL: "http://mirrors.tuna.moe/",
command: "sleep 2",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 10 * time.Second,
}
var err error
providers[i], err = newCmdProvider(c)
So(err, ShouldBeNil)
jobs[i] = newMirrorJob(providers[i])
}
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, CONCURRENT-2)
countingJobs := func(managerChan chan jobMessage, totalJobs, concurrentCheck int) (peakConcurrent, counterFailed int) {
counterEnded := 0
counterRunning := 0
peakConcurrent = 0
counterFailed = 0
for counterEnded < totalJobs {
msg := <-managerChan
switch msg.status {
case PreSyncing:
counterRunning++
case Syncing:
case Failed:
counterFailed++
fallthrough
case Success:
counterEnded++
counterRunning--
default:
So(0, ShouldEqual, 1)
}
// Test if semaphore works
So(counterRunning, ShouldBeLessThanOrEqualTo, concurrentCheck)
if counterRunning > peakConcurrent {
peakConcurrent = counterRunning
}
}
// select {
// case msg := <-managerChan:
// logger.Errorf("extra message received: %v", msg)
// So(0, ShouldEqual, 1)
// case <-time.After(2 * time.Second):
// }
return
}
Convey("When we run them all", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we cancel one job", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobRestart
time.Sleep(200 * time.Millisecond)
}
// Cancel the one waiting for semaphore
jobs[len(jobs)-1].ctrlChan <- jobStop
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT-1, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
Convey("If we override the concurrent limit", func(ctx C) {
for _, job := range jobs {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(200 * time.Millisecond)
}
jobs[len(jobs)-1].ctrlChan <- jobForceStart
jobs[len(jobs)-2].ctrlChan <- jobForceStart
peakConcurrent, counterFailed := countingJobs(managerChan, CONCURRENT, CONCURRENT)
So(peakConcurrent, ShouldEqual, CONCURRENT)
So(counterFailed, ShouldEqual, 0)
time.Sleep(1 * time.Second)
// fmt.Println("Restart them")
for _, job := range jobs {
job.ctrlChan <- jobStart
}
peakConcurrent, counterFailed = countingJobs(managerChan, CONCURRENT, CONCURRENT-2)
So(peakConcurrent, ShouldEqual, CONCURRENT-2)
So(counterFailed, ShouldEqual, 0)
for _, job := range jobs {
job.ctrlChan <- jobDisable
<-job.disabled
}
})
})
}

查看文件

@@ -14,12 +14,13 @@ import (
type logLimiter struct {
emptyHook
provider mirrorProvider
}
func newLogLimiter(provider mirrorProvider) *logLimiter {
return &logLimiter{
provider: provider,
emptyHook: emptyHook{
provider: provider,
},
}
}

查看文件

@@ -24,9 +24,9 @@ type mirrorProvider interface {
Type() providerEnum
// run mirror job in background
Run() error
// run mirror job in background
// Start then Wait
Run(started chan empty) error
// Start the job
Start() error
// Wait job to finish
Wait() error
@@ -45,11 +45,14 @@ type mirrorProvider interface {
Hooks() []jobHook
Interval() time.Duration
Retry() int
Timeout() time.Duration
WorkingDir() string
LogDir() string
LogFile() string
IsMaster() bool
DataSize() string
// enter context
EnterContext() *Context
@@ -80,12 +83,18 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
}
if mirrorDir == "" {
mirrorDir = filepath.Join(
cfg.Global.MirrorDir, mirror.Name,
cfg.Global.MirrorDir, mirror.MirrorSubDir, mirror.Name,
)
}
if mirror.Interval == 0 {
mirror.Interval = cfg.Global.Interval
}
if mirror.Retry == 0 {
mirror.Retry = cfg.Global.Retry
}
if mirror.Timeout == 0 {
mirror.Timeout = cfg.Global.Timeout
}
logDir = formatLogDir(logDir, mirror)
// IsMaster
@@ -107,57 +116,75 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
upstreamURL: mirror.Upstream,
command: mirror.Command,
workingDir: mirrorDir,
failOnMatch: mirror.FailOnMatch,
sizePattern: mirror.SizePattern,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
env: mirror.Env,
}
p, err := newCmdProvider(pc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
case provRsync:
rc := rsyncConfig{
name: mirror.Name,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute,
name: mirror.Name,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
overriddenOptions: mirror.RsyncOverride,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
}
p, err := newRsyncProvider(rc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
case provTwoStageRsync:
rc := twoStageRsyncConfig{
name: mirror.Name,
stage1Profile: mirror.Stage1Profile,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute,
name: mirror.Name,
stage1Profile: mirror.Stage1Profile,
upstreamURL: mirror.Upstream,
rsyncCmd: mirror.Command,
username: mirror.Username,
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
}
p, err := newTwoStageRsyncProvider(rc)
p.isMaster = isMaster
if err != nil {
panic(err)
}
p.isMaster = isMaster
provider = p
default:
panic(errors.New("Invalid mirror provider"))
@@ -171,6 +198,11 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
}
// Add Btrfs Snapshot Hook
if cfg.BtrfsSnapshot.Enable {
provider.AddHook(newBtrfsSnapshotHook(provider, cfg.BtrfsSnapshot.SnapshotPath, mirror))
}
// Add Docker Hook
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))

查看文件

@@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"time"
@@ -27,6 +28,7 @@ func TestRsyncProvider(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
timeout: 100 * time.Second,
interval: 600 * time.Second,
}
@@ -39,6 +41,7 @@ func TestRsyncProvider(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("When entering a context (auto exit)", func() {
func() {
@@ -73,34 +76,66 @@ func TestRsyncProvider(t *testing.T) {
echo "syncing to $(pwd)"
echo $RSYNC_PASSWORD $@
sleep 1
echo "Total file size: 1.33T bytes"
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
"Total file size: 1.33T bytes\n"+
"Done\n",
provider.WorkingDir(),
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 %s %s",
"--timeout=120 -6 %s %s",
provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
So(provider.DataSize(), ShouldEqual, "1.33T")
})
})
Convey("If the rsync program fails", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the rsyncProvider", func() {
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
extraOptions: []string{"--somethine-invalid"},
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Syntax or usage error")
})
})
}
func TestRsyncProviderWithAuthentication(t *testing.T) {
@@ -110,18 +145,22 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
proxyAddr := "127.0.0.1:1233"
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
username: "tunasync",
password: "tunasyncpassword",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
username: "tunasync",
password: "tunasyncpassword",
workingDir: tmpDir,
extraOptions: []string{"--delete-excluded"},
rsyncTimeoutValue: 30,
rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
logDir: tmpDir,
logFile: tmpFile,
useIPv4: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
@@ -136,7 +175,7 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash
echo "syncing to $(pwd)"
echo $USER $RSYNC_PASSWORD $@
echo $USER $RSYNC_PASSWORD $RSYNC_PROXY $@
sleep 1
echo "Done"
exit 0
@@ -144,20 +183,22 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
"Done\n",
provider.WorkingDir(),
targetDir,
fmt.Sprintf(
"%s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"%s %s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 %s %s",
provider.username, provider.password, provider.upstreamURL, provider.WorkingDir(),
"--timeout=30 -4 --delete-excluded %s %s",
provider.username, provider.password, proxyAddr,
provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
@@ -168,6 +209,141 @@ exit 0
})
}
func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
Convey("Rsync Provider with overridden options should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := rsyncConfig{
name: "tuna",
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
rsyncNeverTimeout: true,
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
extraOptions: []string{"--delete-excluded"},
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
interval: 600 * time.Second,
}
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Let's try a run", func() {
scriptContent := `#!/bin/bash
echo "syncing to $(pwd)"
echo $@
sleep 1
echo "Done"
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"-aHvh --no-o --no-g --stats -6 --delete-excluded %s %s\n"+
"Done\n",
targetDir,
provider.upstreamURL,
provider.WorkingDir(),
)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
// fmt.Println(string(loggedContent))
})
})
}
func TestRsyncProviderWithDocker(t *testing.T) {
Convey("Rsync in Docker should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
excludeFile := filepath.Join(tmpDir, "exclude.txt")
g := &Config{
Global: globalConfig{
Retry: 2,
},
Docker: dockerConfig{
Enable: true,
Volumes: []string{
scriptFile + ":/bin/myrsync",
"/etc/gai.conf:/etc/gai.conf:ro",
},
},
}
c := mirrorConfig{
Name: "tuna",
Provider: provRsync,
Upstream: "rsync://rsync.tuna.moe/tuna/",
Command: "/bin/myrsync",
ExcludeFile: excludeFile,
DockerImage: "alpine:3.8",
LogDir: tmpDir,
MirrorDir: tmpDir,
UseIPv6: true,
Timeout: 100,
Interval: 600,
}
provider := newMirrorProvider(c, g)
So(provider.Type(), ShouldEqual, provRsync)
So(provider.Name(), ShouldEqual, c.Name)
So(provider.WorkingDir(), ShouldEqual, c.MirrorDir)
So(provider.LogDir(), ShouldEqual, c.LogDir)
cmdScriptContent := `#!/bin/sh
#echo "$@"
while [[ $# -gt 0 ]]; do
if [[ "$1" = "--exclude-from" ]]; then
cat "$2"
shift
fi
shift
done
`
err = ioutil.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.preExec()
So(err, ShouldBeNil)
}
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.postExec()
So(err, ShouldBeNil)
}
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, "__some_pattern")
})
}
func TestCmdProvider(t *testing.T) {
Convey("Command Provider should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
@@ -221,7 +397,7 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
@@ -237,29 +413,146 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
Convey("If a long job is killed", func(ctx C) {
scriptContent := `#!/bin/bash
sleep 5
sleep 10
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 1)
go func() {
err = provider.Run()
err := provider.Run(started)
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
})
})
Convey("Command Provider without log file should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
c := cmdConfig{
name: "run-ls",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: "/dev/null",
interval: 600 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
So(provider.IsMaster(), ShouldEqual, false)
So(provider.ZFS(), ShouldBeNil)
So(provider.Type(), ShouldEqual, provCommand)
So(provider.Name(), ShouldEqual, c.name)
So(provider.WorkingDir(), ShouldEqual, c.workingDir)
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
Convey("Run the command", func() {
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
})
Convey("Command Provider with RegExprs should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "run-uptime",
upstreamURL: "http://mirrors.tuna.moe/",
command: "uptime",
failOnMatch: "",
sizePattern: "",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 600 * time.Second,
}
Convey("when fail-on-match regexp matches", func() {
c.failOnMatch = `[a-z]+`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when fail-on-match regexp does not match", func() {
c.failOnMatch = `load average_`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
Convey("when fail-on-match regexp meets /dev/null", func() {
c.failOnMatch = `load average_`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
Convey("when size-pattern regexp matches", func() {
c.sizePattern = `load average: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldNotBeEmpty)
_, err = strconv.ParseFloat(provider.DataSize(), 32)
So(err, ShouldBeNil)
})
Convey("when size-pattern regexp does not match", func() {
c.sizePattern = `load ave: ([\d\.]+)`
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
Convey("when size-pattern regexp meets /dev/null", func() {
c.sizePattern = `load ave: ([\d\.]+)`
c.logFile = "/dev/null"
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
})
}
func TestTwoStageRsyncProvider(t *testing.T) {
@@ -271,15 +564,19 @@ func TestTwoStageRsyncProvider(t *testing.T) {
tmpFile := filepath.Join(tmpDir, "log_file")
c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://mirrors.tuna.moe/",
stage1Profile: "debian",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
excludeFile: tmpFile,
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://mirrors.tuna.moe/",
stage1Profile: "debian",
rsyncCmd: scriptFile,
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
excludeFile: tmpFile,
rsyncTimeoutValue: 30,
extraOptions: []string{"--delete-excluded", "--cache"},
username: "hello",
password: "world",
}
provider, err := newTwoStageRsyncProvider(c)
@@ -303,9 +600,10 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 2))
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
expectedOutput := fmt.Sprintf(
"syncing to %s\n"+
"%s\n"+
@@ -313,18 +611,18 @@ exit 0
"syncing to %s\n"+
"%s\n"+
"Done\n",
provider.WorkingDir(),
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
"--exclude dists/ --timeout=30 -6 "+
"--exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
provider.WorkingDir(),
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --contimeout=120 -6 --exclude-from %s %s %s",
"--delete-excluded --cache --timeout=30 -6 --exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
)
@@ -338,32 +636,65 @@ exit 0
Convey("Try terminating", func(ctx C) {
scriptContent := `#!/bin/bash
echo $@
sleep 4
sleep 10
exit 0
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 2)
go func() {
err = provider.Run()
err := provider.Run(started)
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
expectedOutput := fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --contimeout=120 --exclude dists/ -6 "+
"--exclude dists/ --timeout=30 -6 "+
"--exclude-from %s %s %s\n",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
So(string(loggedContent), ShouldStartWith, expectedOutput)
// fmt.Println(string(loggedContent))
})
})
Convey("If the rsync program fails", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
tmpFile := filepath.Join(tmpDir, "log_file")
Convey("in the twoStageRsyncProvider", func() {
c := twoStageRsyncConfig{
name: "tuna-two-stage-rsync",
upstreamURL: "rsync://0.0.0.1/",
stage1Profile: "debian",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
excludeFile: tmpFile,
}
provider, err := newTwoStageRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run(make(chan empty, 2))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldContainSubstring, "Error in socket I/O")
})
})
}

查看文件

@@ -2,24 +2,35 @@ package worker
import (
"errors"
"fmt"
"strings"
"time"
"github.com/tuna/tunasync/internal"
)
type rsyncConfig struct {
name string
rsyncCmd string
upstreamURL, username, password, excludeFile string
extraOptions []string
overriddenOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6 bool
useIPv6, useIPv4 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
type rsyncProvider struct {
baseProvider
rsyncConfig
options []string
options []string
dataSize string
}
func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
@@ -27,11 +38,16 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /")
}
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &rsyncProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
rsyncConfig: c,
}
@@ -39,21 +55,46 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync"
}
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
options := []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120", "--contimeout=120",
"--safe-links",
}
if c.overriddenOptions != nil {
options = c.overriddenOptions
}
if !c.rsyncNeverTimeout {
timeo := 120
if c.rsyncTimeoutValue > 0 {
timeo = c.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if c.useIPv6 {
options = append(options, "-6")
} else if c.useIPv4 {
options = append(options, "-4")
}
if c.excludeFile != "" {
options = append(options, "--exclude-from", c.excludeFile)
}
if c.extraOptions != nil {
options = append(options, c.extraOptions...)
}
provider.options = options
provider.ctx.Set(_WorkingDirKey, c.workingDir)
@@ -71,28 +112,45 @@ func (p *rsyncProvider) Upstream() string {
return p.upstreamURL
}
func (p *rsyncProvider) Run() error {
func (p *rsyncProvider) DataSize() string {
return p.dataSize
}
func (p *rsyncProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil {
return err
}
return p.Wait()
started <- empty{}
if err := p.Wait(); err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err
}
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil
}
func (p *rsyncProvider) Start() error {
p.Lock()
defer p.Unlock()
env := map[string]string{}
if p.username != "" {
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
if p.IsRunning() {
return errors.New("provider is currently running")
}
command := []string{p.rsyncCmd}
command = append(command, p.options...)
command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil {
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(false); err != nil {
return err
}
@@ -100,5 +158,6 @@ func (p *rsyncProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

查看文件

@@ -104,7 +104,7 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
}
func (c *cmdJob) Start() error {
// logger.Debugf("Command start: %v", c.cmd.Args)
logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1)
return c.cmd.Start()
}

查看文件

@@ -15,6 +15,11 @@ type scheduleQueue struct {
jobs map[string]bool
}
type jobScheduleInfo struct {
jobName string
nextScheduled time.Time
}
func timeLessThan(l, r interface{}) bool {
tl := l.(time.Time)
tr := r.(time.Time)
@@ -28,6 +33,20 @@ func newScheduleQueue() *scheduleQueue {
return queue
}
func (q *scheduleQueue) GetJobs() (jobs []jobScheduleInfo) {
cur := q.list.Iterator()
defer cur.Close()
for cur.Next() {
cj := cur.Value().(*mirrorJob)
jobs = append(jobs, jobScheduleInfo{
cj.Name(),
cur.Key().(time.Time),
})
}
return
}
func (q *scheduleQueue) AddJob(schedTime time.Time, job *mirrorJob) {
q.Lock()
defer q.Unlock()

查看文件

@@ -5,6 +5,8 @@ import (
"fmt"
"strings"
"time"
"github.com/tuna/tunasync/internal"
)
type twoStageRsyncConfig struct {
@@ -12,9 +14,15 @@ type twoStageRsyncConfig struct {
rsyncCmd string
stage1Profile string
upstreamURL, username, password, excludeFile string
extraOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
@@ -23,6 +31,7 @@ type twoStageRsyncProvider struct {
twoStageRsyncConfig
stage1Options []string
stage2Options []string
dataSize string
}
var rsyncStage1Profiles = map[string]([]string){
@@ -38,27 +47,41 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
if !strings.HasSuffix(c.upstreamURL, "/") {
return nil, errors.New("rsync upstream URL should ends with /")
}
if c.retry == 0 {
c.retry = defaultMaxRetry
}
provider := &twoStageRsyncProvider{
baseProvider: baseProvider{
name: c.name,
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
twoStageRsyncConfig: c,
stage1Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--safe-links", "--timeout=120", "--contimeout=120",
"--safe-links",
},
stage2Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120", "--contimeout=120",
"--safe-links",
},
}
if c.rsyncEnv == nil {
provider.rsyncEnv = map[string]string{}
}
if c.username != "" {
provider.rsyncEnv["USER"] = c.username
}
if c.password != "" {
provider.rsyncEnv["RSYNC_PASSWORD"] = c.password
}
if c.rsyncCmd == "" {
provider.rsyncCmd = "rsync"
}
@@ -78,6 +101,10 @@ func (p *twoStageRsyncProvider) Upstream() string {
return p.upstreamURL
}
func (p *twoStageRsyncProvider) DataSize() string {
return p.dataSize
}
func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
var options []string
if stage == 1 {
@@ -92,10 +119,21 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
} else if stage == 2 {
options = append(options, p.stage2Options...)
if p.extraOptions != nil {
options = append(options, p.extraOptions...)
}
} else {
return []string{}, fmt.Errorf("Invalid stage: %d", stage)
}
if !p.rsyncNeverTimeout {
timeo := 120
if p.rsyncTimeoutValue > 0 {
timeo = p.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if p.useIPv6 {
options = append(options, "-6")
}
@@ -107,17 +145,15 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return options, nil
}
func (p *twoStageRsyncProvider) Run() error {
defer p.Wait()
func (p *twoStageRsyncProvider) Run(started chan empty) error {
p.Lock()
defer p.Unlock()
env := map[string]string{}
if p.username != "" {
env["USER"] = p.username
}
if p.password != "" {
env["RSYNC_PASSWORD"] = p.password
if p.IsRunning() {
return errors.New("provider is currently running")
}
p.dataSize = ""
stages := []int{1, 2}
for _, stage := range stages {
command := []string{p.rsyncCmd}
@@ -128,21 +164,33 @@ func (p *twoStageRsyncProvider) Run() error {
command = append(command, options...)
command = append(command, p.upstreamURL, p.WorkingDir())
p.cmd = newCmdJob(p, command, p.WorkingDir(), env)
if err := p.prepareLogFile(); err != nil {
p.cmd = newCmdJob(p, command, p.WorkingDir(), p.rsyncEnv)
if err := p.prepareLogFile(stage > 1); err != nil {
return err
}
defer p.closeLogFile()
if err = p.cmd.Start(); err != nil {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
started <- empty{}
err = p.cmd.Wait()
p.isRunning.Store(false)
p.Unlock()
err = p.Wait()
p.Lock()
if err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
logger.Debug("Rsync exitcode %d (%s)", code, msg)
if p.logFileFd != nil {
p.logFileFd.WriteString(msg + "\n")
}
}
return err
}
}
p.dataSize = internal.ExtractSizeFromRsyncLog(p.LogFile())
return nil
}

查看文件

@@ -12,8 +12,6 @@ import (
. "github.com/tuna/tunasync/internal"
)
var tunasyncWorker *Worker
// A Worker is a instance of tunasync worker
type Worker struct {
L sync.Mutex
@@ -29,10 +27,11 @@ type Worker struct {
httpClient *http.Client
}
// GetTUNASyncWorker returns a singalton worker
func GetTUNASyncWorker(cfg *Config) *Worker {
if tunasyncWorker != nil {
return tunasyncWorker
// NewTUNASyncWorker creates a worker
func NewTUNASyncWorker(cfg *Config) *Worker {
if cfg.Global.Retry == 0 {
cfg.Global.Retry = defaultMaxRetry
}
w := &Worker{
@@ -57,7 +56,6 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
w.initJobs()
w.makeHTTPServer()
tunasyncWorker = w
return w
}
@@ -219,7 +217,11 @@ func (w *Worker) makeHTTPServer() {
}
switch cmd.Cmd {
case CmdStart:
job.ctrlChan <- jobStart
if cmd.Options["force"] {
job.ctrlChan <- jobForceStart
} else {
job.ctrlChan <- jobStart
}
case CmdRestart:
job.ctrlChan <- jobRestart
case CmdStop:
@@ -306,6 +308,9 @@ func (w *Worker) runSchedule() {
w.L.Unlock()
schedInfo := w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
tick := time.Tick(5 * time.Second)
for {
select {
@@ -342,6 +347,9 @@ func (w *Worker) runSchedule() {
w.schedule.AddJob(schedTime, job)
}
schedInfo = w.schedule.GetJobs()
w.updateSchedInfo(schedInfo)
case <-tick:
// check schedule every 5 seconds
if job := w.schedule.Pop(); job != nil {
@@ -394,8 +402,17 @@ func (w *Worker) registorWorker() {
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
for retry := 10; retry > 0; {
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
retry--
if retry > 0 {
time.Sleep(1 * time.Second)
logger.Noticef("Retrying... (%d)", retry)
}
} else {
break
}
}
}
}
@@ -412,6 +429,12 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
ErrorMsg: jobMsg.msg,
}
// Certain Providers (rsync for example) may know the size of mirror,
// so we report it to Manager here
if len(job.size) != 0 {
smsg.Size = job.size
}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
@@ -423,6 +446,27 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
}
}
func (w *Worker) updateSchedInfo(schedInfo []jobScheduleInfo) {
var s []MirrorSchedule
for _, sched := range schedInfo {
s = append(s, MirrorSchedule{
MirrorName: sched.jobName,
NextSchedule: sched.nextScheduled,
})
}
msg := MirrorSchedules{Schedules: s}
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf(
"%s/workers/%s/schedules", root, w.Name(),
)
logger.Debugf("reporting on manager url: %s", url)
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to upload schedules: %s", err.Error())
}
}
}
func (w *Worker) fetchJobStatus() []MirrorStatus {
var mirrorList []MirrorStatus
apiBase := w.cfg.Manager.APIBaseList()[0]

255
worker/worker_test.go 普通文件
查看文件

@@ -0,0 +1,255 @@
package worker
import (
"net/http"
"strconv"
"testing"
"time"
"github.com/gin-gonic/gin"
. "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal"
)
type workTestFunc func(*Worker)
var managerPort = 5001
var workerPort = 5002
func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"_infoKey": "pong"})
})
r.POST("/workers", func(c *gin.Context) {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
recvData <- _worker
c.JSON(http.StatusOK, _worker)
})
r.POST("/workers/dut/schedules", func(c *gin.Context) {
var _sch MirrorSchedules
c.BindJSON(&_sch)
recvData <- _sch
c.JSON(http.StatusOK, empty{})
})
r.POST("/workers/dut/jobs/:job", func(c *gin.Context) {
var status MirrorStatus
c.BindJSON(&status)
recvData <- status
c.JSON(http.StatusOK, status)
})
r.GET("/workers/dut/jobs", func(c *gin.Context) {
mirrorStatusList := []MirrorStatus{}
c.JSON(http.StatusOK, mirrorStatusList)
})
return r
}
func startWorkerThenStop(cfg *Config, tester workTestFunc) {
exitedChan := make(chan int)
w := NewTUNASyncWorker(cfg)
So(w, ShouldNotBeNil)
go func() {
w.Run()
exitedChan <- 1
}()
tester(w)
w.Halt()
select {
case exited := <-exitedChan:
So(exited, ShouldEqual, 1)
case <-time.After(2 * time.Second):
So(0, ShouldEqual, 1)
}
}
func sendCommandToWorker(workerURL string, httpClient *http.Client, cmd CmdVerb, mirror string) {
workerCmd := WorkerCmd{
Cmd: cmd,
MirrorID: mirror,
}
logger.Debugf("POST to %s with cmd %s", workerURL, cmd)
_, err := PostJSON(workerURL, workerCmd, httpClient)
So(err, ShouldBeNil)
}
func TestWorker(t *testing.T) {
InitLogger(false, true, false)
recvDataChan := make(chan interface{})
_s := makeMockManagerServer(recvDataChan)
httpServer := &http.Server{
Addr: "localhost:" + strconv.Itoa(managerPort),
Handler: _s,
ReadTimeout: 2 * time.Second,
WriteTimeout: 2 * time.Second,
}
go func() {
err := httpServer.ListenAndServe()
So(err, ShouldBeNil)
}()
// Wait for http server starting
time.Sleep(500 * time.Millisecond)
Convey("Worker should work", t, func(ctx C) {
httpClient, err := CreateHTTPClient("")
So(err, ShouldBeNil)
workerPort++
workerCfg := Config{
Global: globalConfig{
Name: "dut",
LogDir: "/tmp",
MirrorDir: "/tmp",
Concurrent: 2,
Interval: 1,
},
Server: serverConfig{
Hostname: "localhost",
Addr: "127.0.0.1",
Port: workerPort,
},
Manager: managerConfig{
APIBase: "http://localhost:" + strconv.Itoa(managerPort),
},
}
logger.Debugf("worker port %d", workerPort)
Convey("with no job", func(ctx C) {
dummyTester := func(*Worker) {
registered := false
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
registered = true
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(reg.URL, httpClient, CmdStart, "foobar")
} else if sch, ok := data.(MirrorSchedules); ok {
So(len(sch.Schedules), ShouldEqual, 0)
}
case <-time.After(2 * time.Second):
So(registered, ShouldBeTrue)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with one job", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
mirrorConfig{
Name: "job-ls",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
jobRunning := false
lastStatus := SyncStatus(None)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-ls")
} else if sch, ok := data.(MirrorSchedules); ok {
if !jobRunning {
So(len(sch.Schedules), ShouldEqual, 1)
So(sch.Schedules[0].MirrorName, ShouldEqual, "job-ls")
So(sch.Schedules[0].NextSchedule,
ShouldHappenBetween,
time.Now().Add(-2*time.Second),
time.Now().Add(1*time.Minute))
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning = status.Status == PreSyncing || status.Status == Syncing
So(status.Status, ShouldNotEqual, Failed)
lastStatus = status.Status
}
case <-time.After(2 * time.Second):
So(url, ShouldNotEqual, "")
So(jobRunning, ShouldBeFalse)
So(lastStatus, ShouldEqual, Success)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
Convey("with several jobs", func(ctx C) {
workerCfg.Mirrors = []mirrorConfig{
mirrorConfig{
Name: "job-ls-1",
Provider: provCommand,
Command: "ls",
},
mirrorConfig{
Name: "job-fail",
Provider: provCommand,
Command: "non-existent-command-xxxx",
},
mirrorConfig{
Name: "job-ls-2",
Provider: provCommand,
Command: "ls",
},
}
dummyTester := func(*Worker) {
url := ""
lastStatus := make(map[string]SyncStatus)
nextSch := make(map[string]time.Time)
for {
select {
case data := <-recvDataChan:
if reg, ok := data.(WorkerStatus); ok {
So(reg.ID, ShouldEqual, "dut")
url = reg.URL
time.Sleep(500 * time.Millisecond)
sendCommandToWorker(url, httpClient, CmdStart, "job-fail")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-1")
sendCommandToWorker(url, httpClient, CmdStart, "job-ls-2")
} else if sch, ok := data.(MirrorSchedules); ok {
//So(len(sch.Schedules), ShouldEqual, 3)
for _, item := range sch.Schedules {
nextSch[item.MirrorName] = item.NextSchedule
}
} else if status, ok := data.(MirrorStatus); ok {
logger.Noticef("Job %s status %s", status.Name, status.Status.String())
jobRunning := status.Status == PreSyncing || status.Status == Syncing
if !jobRunning {
if status.Name == "job-fail" {
So(status.Status, ShouldEqual, Failed)
} else {
So(status.Status, ShouldNotEqual, Failed)
}
}
lastStatus[status.Name] = status.Status
}
case <-time.After(2 * time.Second):
So(len(lastStatus), ShouldEqual, 3)
So(len(nextSch), ShouldEqual, 3)
return
}
}
}
startWorkerThenStop(&workerCfg, dummyTester)
})
})
}

查看文件

@@ -3,6 +3,7 @@ package worker
import (
"fmt"
"os"
"os/user"
"strings"
"github.com/codeskyblue/go-sh"
@@ -10,36 +11,44 @@ import (
type zfsHook struct {
emptyHook
provider mirrorProvider
zpool string
zpool string
}
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
return &zfsHook{
provider: provider,
zpool: zpool,
emptyHook: emptyHook{
provider: provider,
},
zpool: zpool,
}
}
// create zfs dataset for a new mirror
func (z *zfsHook) printHelpMessage() {
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
zfsDataset = strings.ToLower(zfsDataset)
workingDir := z.provider.WorkingDir()
logger.Infof("You may create the ZFS dataset with:")
logger.Infof(" zfs create '%s'", zfsDataset)
logger.Infof(" zfs set mountpoint='%s' '%s'", workingDir, zfsDataset)
usr, err := user.Current()
if err != nil || usr.Uid == "0" {
return
}
logger.Infof(" chown %s '%s'", usr.Uid, workingDir)
}
// check if working directory is a zfs dataset
func (z *zfsHook) preJob() error {
workingDir := z.provider.WorkingDir()
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
// sudo zfs create $zfsDataset
// sudo zfs set mountpoint=${absPath} ${zfsDataset}
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
// Unknown issue of ZFS:
// dataset name should not contain upper case letters
zfsDataset = strings.ToLower(zfsDataset)
logger.Infof("Creating ZFS dataset %s", zfsDataset)
if err := sh.Command("sudo", "zfs", "create", zfsDataset).Run(); err != nil {
return err
}
logger.Infof("Mount ZFS dataset %s to %s", zfsDataset, workingDir)
if err := sh.Command("sudo", "zfs", "set", "mountpoint="+workingDir, zfsDataset).Run(); err != nil {
return err
}
logger.Errorf("Directory %s doesn't exist", workingDir)
z.printHelpMessage()
return err
}
if err := sh.Command("mountpoint", "-q", workingDir).Run(); err != nil {
logger.Errorf("%s is not a mount point", workingDir)
z.printHelpMessage()
return err
}
return nil
}

48
worker/zfs_hook_test.go 普通文件
查看文件

@@ -0,0 +1,48 @@
package worker
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestZFSHook(t *testing.T) {
Convey("ZFS Hook should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
tmpFile := filepath.Join(tmpDir, "log_file")
c := cmdConfig{
name: "tuna_zfs_hook_test",
upstreamURL: "http://mirrors.tuna.moe/",
command: "ls",
workingDir: tmpDir,
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
}
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
Convey("When working directory doesn't exist", func(ctx C) {
errRm := os.RemoveAll(tmpDir)
So(errRm, ShouldBeNil)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
Convey("When working directory is not a mount point", func(ctx C) {
defer os.RemoveAll(tmpDir)
hook := newZfsHook(provider, "test_pool")
err := hook.preJob()
So(err, ShouldNotBeNil)
})
})
}