1
0
镜像自地址 https://github.com/tuna/tunasync.git 已同步 2025-12-06 14:36:47 +00:00

93 次代码提交

作者 SHA1 备注 提交日期
Miao Wang
c07aaffe65 Merge branch 'cgroupv2', and bump version to v0.8.0
Signed-off-by: Miao Wang <shankerwangmiao@gmail.com>
2021-09-02 22:22:26 +08:00
Miao Wang
1804a31b6a Bugfix change the order of options in 2f9e96a
Signed-off-by: Miao Wang <shankerwangmiao@gmail.com>
2021-09-02 22:19:05 +08:00
Miao Wang
98fcb6249b doc: add docs for cgroup 2021-09-02 18:04:28 +08:00
Miao Wang
531b09c21a ci: minor fixes 2021-08-02 17:50:30 +08:00
Miao Wang
f8b6ea9c4e cgroupv2: add test for reexec 2021-07-29 14:37:10 +08:00
Miao Wang
84fcc8e76b CI: add tests in cgroupv2 2021-07-27 22:32:12 +08:00
Miao Wang
222d98d6ae CI: beautify goconvey output 2021-07-27 21:28:14 +08:00
dependabot[bot]
3e225ae940 Bump github.com/gin-gonic/gin from 1.5.0 to 1.7.0 (#157)
Bumps [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) from 1.5.0 to 1.7.0.
- [Release notes](https://github.com/gin-gonic/gin/releases)
- [Changelog](https://github.com/gin-gonic/gin/blob/master/CHANGELOG.md)
- [Commits](https://github.com/gin-gonic/gin/compare/v1.5.0...v1.7.0)

---
updated-dependencies:
- dependency-name: github.com/gin-gonic/gin
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2021-07-27 13:12:09 +08:00
Miao Wang
8080ed6648 CI: merge cov files of multiple runs 2021-07-27 12:11:45 +08:00
Miao Wang
1bc0285905 cgroupv2: bypass RequireDevices check 2021-07-27 02:16:51 +08:00
Miao Wang
c26e9fb64a cgroupv2: add tests on cgv1 for implicit cgroup 2021-07-27 00:36:40 +08:00
Miao Wang
e719dc443b cgroupv2: improve unit test 2021-07-26 23:10:16 +08:00
Miao Wang
0f05c69c36 [WIP] cgroupv2: support start with cgroupv2 2021-07-25 16:24:13 +08:00
Miao Wang
28d160a7f0 [WIP] Cgroupv2: fix test 2021-07-25 14:54:30 +08:00
Miao Wang
2c4d2d6ae0 [WIP] cgroupv2: add cgroup add and del 2021-07-25 04:15:28 +08:00
Miao Wang
02a144744f [WIP] cgroupv2: add init cgroup 2021-07-25 02:58:12 +08:00
Miao Wang
3ba70c6c71 cgroupv2: create cgroup manager on startup (WIP) 2021-07-23 18:46:39 +08:00
Miao Wang
2949b9c58c cgroupv2: change newCgroupHook interface 2021-07-23 15:29:28 +08:00
Miao Wang
27c33f86b3 cgroup: add reexec hook 2021-07-22 19:09:00 +08:00
Miao Wang
f0ccdc47dc docker: support memory limit 2021-07-12 22:26:13 +08:00
Miao Wang
3ce5c2ede3 change type of memlimit from string to int64 2021-07-12 21:44:43 +08:00
Miao Wang
80f0161eb0 CI: change package name to cgroup-tools 2021-06-14 12:23:40 +08:00
Qi Xiao
38a94758d2 Update URL of Elvish 2021-01-25 22:02:11 +00:00
Qi Xiao
ecc54bf6b7 Update rsync upstream of Elvish 2021-01-25 22:01:42 +00:00
♥️
60beeb6ccd add useIPv4 option to two stage rsync (#150) 2021-01-07 14:35:21 +08:00
z4yx
a54c969081 Bump version to 0.7.1 2020-10-23 21:27:53 +08:00
Yuxiang Zhang
5f5dba7154 Merge pull request #142 from tuna/more-db-backends
More db backends: badger and leveldb
2020-10-23 21:24:55 +08:00
Jiajie Chen
32c4d38449 Fix wrong usage of val in leveldb backend 2020-10-22 20:12:01 +08:00
Jiajie Chen
7dfe6a632c Add docs for badger and leveldb backends 2020-10-17 14:18:00 +08:00
Jiajie Chen
94154742a7 Add leveldb to db backend and fix error wrapping 2020-10-17 14:16:16 +08:00
Jiajie Chen
932dc69ae8 Add badger to db backend 2020-10-17 12:07:55 +08:00
z4yx
1f963e21aa Bump version to 0.7.0 2020-10-15 21:27:55 +08:00
Yuxiang Zhang
7629b09917 Merge pull request #141 from tuna/add-redis-db-backend
Add redis db backend
2020-10-15 21:25:51 +08:00
jiegec
4e426c891e Fix error logging in server.go 2020-10-15 07:33:02 +08:00
jiegec
992044d402 Small code cleanup 2020-10-13 23:11:07 +08:00
jiegec
3c7ee8f9fd Add mock test for redis backend 2020-10-13 23:05:39 +08:00
jiegec
d341c0c99d Rearrange and fix db tests 2020-10-13 23:01:46 +08:00
jiegec
90b4e5debb Fix DeleteWorker behavior to match tests 2020-10-13 22:49:49 +08:00
jiegec
7dd61ae8ca Add kv abstration layer for bolt and redis 2020-10-13 22:41:58 +08:00
jiegec
5880ed92dc Use ParseURL from redis library 2020-10-13 19:35:32 +08:00
jiegec
fd4c07fdb5 Add redis backend to docs 2020-10-13 14:54:25 +08:00
jiegec
a137f0676a Add redis backend for db 2020-10-13 14:50:19 +08:00
jiegec
a2887da2dd Move bolt db adapter to separate file 2020-10-13 14:27:41 +08:00
Chen
136e01f1cd Merge pull request #140 from tuna/use-string-for-cmd-verb
Use string for command verb, so it is easier to read
2020-10-10 20:51:19 +08:00
Jiajie Chen
cd73602988 Use string for command verb in json, so it is easier to read 2020-10-10 20:43:13 +08:00
Yuxiang Zhang
2a8fa5636e Merge pull request #139 from tuna/support-non-linux-platforms
Mock btrfs hook in non linux platforms like darwin
2020-09-17 21:29:51 +08:00
Chen
94b9b20626 Improve docs (#138) 2020-09-17 12:43:01 +08:00
Jiajie Chen
5a9c6b9020 Mock btrfs hook in non linux platforms like darwin 2020-09-17 12:27:46 +08:00
Yuxiang Zhang
75ee481cfa Merge pull request #137 from tuna/rsync-risk-tmp
add --filter "risk .~tmp~/" to rsync options
2020-09-16 08:10:36 +08:00
Miao Wang
2f9e96a75a add --filter "risk .~tmp~/" to rsync options
This option is for delete `.~tmp~/` folder upon successful sync,
`--delete-excluded` will not be needed.
2020-09-15 20:01:24 +08:00
Yuxiang Zhang
aa36b96828 Merge pull request #136 from tuna/show-url-in-worker-list
Display worker url in listWorkers
2020-09-15 19:34:57 +08:00
Miao Wang
e9ce7fc87a CI: change gid of files in the tar archive to 0 2020-09-14 20:42:12 +08:00
jiegec
3fd71d777b Display worker url in listWorkers 2020-09-14 14:50:12 +08:00
Chen
984f8a1eb5 Update two stage rsync profiles (#135)
* Update two stage rsync profiles

* Fix tests of two stage rsync provider
2020-09-14 14:48:20 +08:00
Miao Wang
a4d94cae07 bump to version 0.6.7 2020-09-11 18:21:15 +08:00
Miao Wang
8ebace4d9a Add support for multiarch builds 2020-09-11 17:59:33 +08:00
Yuxiang Zhang
b578237df8 Merge pull request #134 from tuna/worker-last-online-register
Worker last online and last register
2020-09-10 23:08:57 +08:00
Jiajie Chen
9f7f18c2c4 Fix missing method in mock test 2020-09-10 21:58:31 +08:00
Jiajie Chen
fd274cc976 Refresh worker LastOnline when worker updates 2020-09-10 21:51:33 +08:00
Jiajie Chen
b4b81ef7e9 Fix typo: registor -> register 2020-09-10 21:32:22 +08:00
Jiajie Chen
c8600d094e Add LastRegister to WorkerStatus 2020-09-10 21:31:31 +08:00
z4yx
2ba3a27fa3 ignore the SIGTERM failure 2020-09-06 19:23:26 +08:00
Yuxiang Zhang
b34238c097 Merge pull request #130 from tuna/add-bolt-open-timeout
Add 5 seconds timeout for bolt
2020-08-05 12:41:29 +08:00
Jiajie Chen
16e458f354 Add 5 seconds timeout for bolt 2020-08-03 14:46:45 +08:00
Yuxiang Zhang
16b4df1ec2 Merge pull request #127 from hxsf/patch-1
fix examlpe with docker_image
2020-06-30 09:47:28 +08:00
呼啸随风
e3c8cded6c fix examlpe with docker_image
If `docker.enable` not be `true`, the worker will ignore docker provider's config, and just exec the command.
so we need to doc it.
2020-06-30 09:45:57 +08:00
Yuxiang Zhang
3809df6cfb Merge pull request #126 from lrh3321/master
Add `--format` and `--status` for tunasynctl
2020-06-22 12:58:15 +08:00
zack.liu
600874ae54 Add --format and --status for tunasynctl 2020-06-22 11:25:18 +08:00
z4yx
2afe1f2e06 bump version to 0.6.6 2020-06-17 22:12:11 +08:00
z4yx
1b099520b2 [manager] protect DB with RW lock 2020-06-17 22:10:39 +08:00
z4yx
85b2105a2b [worker] retry registration 2020-06-17 21:34:55 +08:00
zyx
45e5d900fb bump version to 0.6.5 2020-06-08 22:30:28 +08:00
zyx
7b0cd490b7 fix misuse of a variable 2020-06-08 22:23:12 +08:00
zyx
9178966aed bump version to 0.6.4 2020-06-04 09:44:17 +08:00
zyx
b5d2a0ad89 bug fix: jobs not being scheduled after timeout 2020-06-04 09:37:20 +08:00
zyx
d8963c9946 test rsync inside a Docker container 2020-06-03 21:51:04 +08:00
zyx
198afa72cd bug fix: rsync can access the exclude file in Docker (close #59) 2020-06-03 21:50:38 +08:00
zyx
85ce9c1270 wait for docker container removal 2020-06-03 19:47:14 +08:00
zyx
a8a35fc259 Merge branch 'master' of github.com:tuna/tunasync 2020-06-03 13:28:58 +08:00
zyx
c00eb12a75 Two new options for rsync provider
- rsync_no_timeout=true/false # disable --timeout option
- rsync_timeout=n # set --timeout=n
related to issue #121
2020-06-03 13:26:49 +08:00
Yuxiang Zhang
95ae9c16a9 Update workers.conf 2020-06-01 16:59:44 +08:00
zyx
0392ef28c7 bump version to 0.6.3 2020-05-25 19:21:27 +08:00
zyx
b2a22a9bbc update editor config 2020-05-25 19:16:53 +08:00
zyx
31862210ba implement the timeout 2020-05-25 19:15:05 +08:00
zyx
e47ba2097e add a timeout field to providers 2020-05-25 18:24:05 +08:00
zyx
e8c7ff3d7f config items of timeout 2020-05-25 18:08:31 +08:00
Yuxiang Zhang
7e7b469f1e Update workers.conf 2020-05-23 15:28:32 +08:00
Yuxiang Zhang
eac66c7554 add config examples of the worker (#118) 2020-05-23 15:23:15 +08:00
z4yx
38b0156fae [bug fix] provider is not terminated if premature stop command received 2020-05-09 18:42:54 +08:00
z4yx
c8e7d29f34 bump version to 0.6.2 2020-04-08 20:12:41 +08:00
Yuxiang Zhang
d40638d738 Merge pull request #116 from BITNP/laststarted
Add MirrorStatus.LastStarted property
2020-04-06 23:01:58 +08:00
Phy
471d865042 Add LastStarted test case 2020-04-05 01:07:46 -04:00
Phy
c1641b6714 Add MirrorStatus.LastStarted property
- status.Status is in PreSyncing, and
- curStatus.Status is not in PreSyncing
2020-04-05 00:12:10 -04:00
共有 48 个文件被更改,包括 3372 次插入657 次删除

查看文件

@@ -21,16 +21,12 @@ jobs:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Get dependencies
run: |
go get -v -t -d ./cmd/tunasync
go get -v -t -d ./cmd/tunasynctl
- name: Build
run: |
make tunasync
make tunasynctl
tar -jcf build/tunasync-linux-bin.tar.bz2 -C build tunasync tunasynctl
for i in linux-amd64 linux-arm64; do
make ARCH=$i all
tar -cz --numeric-owner --owner root --group root -f tunasync-$i-bin.tar.gz -C build-$i tunasync tunasynctl
done
- name: Create Release
id: create_release
@@ -42,13 +38,9 @@ jobs:
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
- name: Upload Release Assets
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./build/tunasync-linux-bin.tar.bz2
asset_name: tunasync-linux-bin.tar.bz2
asset_content_type: application/x-bzip2
TAG_NAME: ${{ github.ref }}
run: |
hub release edit $(find . -type f -name "tunasync-*.tar.gz" -printf "-a %p ") -m "" "${TAG_NAME##*/}"

查看文件

@@ -9,10 +9,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.13
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.13
go-version: 1.16
id: go
- name: Check out code into the Go module directory
@@ -32,38 +32,205 @@ jobs:
uses: actions/upload-artifact@v1
with:
name: tunasync-bin
path: build/
path: build-linux-amd64/
test:
name: Test
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- name: Setup test dependencies
run: |
sudo apt-get update
sudo apt-get install -y cgroup-bin
sudo apt-get install -y cgroup-tools
docker pull alpine:3.8
lssubsys -am
sudo cgcreate -a $USER -t $USER -g cpu:tunasync
sudo cgcreate -a $USER -t $USER -g memory:tunasync
- name: Set up Go 1.13
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.13
go-version: 1.16
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Run Unit tests.
run: make test
run: |
go install github.com/wadey/gocovmerge@latest
TERM=xterm-256color make test
- name: Run Additional Unit tests.
run: |
make build-test-worker
sudo cgexec -g "*:/" bash -c "echo 0 > /sys/fs/cgroup/systemd/tasks; exec sudo -u $USER env USECURCGROUP=1 TERM=xterm-256color cgexec -g cpu,memory:tunasync ./worker.test -test.v=true -test.coverprofile profile2.cov -test.run TestCgroup"
touch /tmp/dummy_exec
chmod +x /tmp/dummy_exec
run_test_reexec (){
case="$1"
shift
argv0="$1"
shift
(TESTREEXEC="$case" TERM=xterm-256color exec -a "$argv0" ./worker.test -test.v=true -test.coverprofile "profile5_$case.cov" -test.run TestReexec -- "$@")
}
run_test_reexec 1 tunasync-exec __dummy__
run_test_reexec 2 tunasync-exec /tmp/dummy_exec
run_test_reexec 3 tunasync-exec /tmp/dummy_exec 3< <(echo -n "abrt")
run_test_reexec 4 tunasync-exec /tmp/dummy_exec 3< <(echo -n "cont")
run_test_reexec 5 tunasync-exec2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
driver-opts: network=host
- name: Cache Docker layers
uses: actions/cache@v2
if: github.event_name == 'push'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Cache Docker layers
uses: actions/cache@v2
if: github.event_name == 'pull_request'
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-pr-${{ github.event.pull_request.head.user.login }}-buildx-
${{ runner.os }}-buildx-
- name: Cache Docker layers
if: github.event_name != 'push' && github.event_name != 'pull_request'
run: |
echo "I do not know how to setup cache"
exit -1
- name: Prepare cache directory
run: |
mkdir -p /tmp/.buildx-cache
- name: Build Docker image for uml rootfs
uses: docker/build-push-action@v2
with:
context: .umlrootfs
file: .umlrootfs/Dockerfile
push: true
tags: localhost:5000/umlrootfs
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Fetch and install uml package
run: |
sudo apt-get update
sudo apt-get install -y debian-archive-keyring
sudo ln -sf /usr/share/keyrings/debian-archive-keyring.gpg /etc/apt/trusted.gpg.d/
echo "deb http://deb.debian.org/debian buster main" | sudo tee /etc/apt/sources.list.d/buster.list
sudo apt-get update
apt-get download user-mode-linux/buster
sudo rm /etc/apt/sources.list.d/buster.list
sudo apt-get update
sudo mv user-mode-linux_*.deb /tmp/uml.deb
sudo apt-get install --no-install-recommends -y /tmp/uml.deb
sudo rm /tmp/uml.deb
sudo apt-get install --no-install-recommends -y rsh-redone-client
- name: Prepare uml environment
run: |
docker container create --name umlrootfs localhost:5000/umlrootfs
sudo mkdir -p umlrootfs
docker container export umlrootfs | sudo tar -xv -C umlrootfs
docker container rm umlrootfs
sudo cp -a --target-directory=umlrootfs/lib/ /usr/lib/uml/modules
/bin/echo -e "127.0.0.1 localhost\n254.255.255.1 host" | sudo tee umlrootfs/etc/hosts
sudo ip tuntap add dev umltap mode tap
sudo ip addr add 254.255.255.1/24 dev umltap
sudo ip link set umltap up
- name: Start Uml
run: |
start_uml () {
sudo bash -c 'linux root=/dev/root rootflags=/ rw rootfstype=hostfs mem=2G eth0=tuntap,umltap hostfs="$PWD/umlrootfs" con1=pts systemd.unified_cgroup_hierarchy=1 & pid=$!; echo "UMLINUX_PID=$pid" >> '"$GITHUB_ENV"
}
( start_uml )
started=0
for i in $(seq 1 60); do
if ping -c 1 -w 1 254.255.255.2; then
started=1
break
fi
done
if [ "$started" != "1" ]; then
echo "Failed to wait Umlinux online"
exit 1
fi
- name: Prepare Uml Environment
run: |
CUSER="$(id --user --name)"
CUID="$(id --user)"
CGID="$(id --group)"
sudo chroot umlrootfs bash --noprofile --norc -eo pipefail << EOF
groupadd --gid "${CGID?}" "${CUSER?}"
useradd --create-home --home-dir "/home/${CUSER}" --gid "${CGID?}" \
--uid "${CUID?}" --shell "\$(which bash)" "${CUSER?}"
EOF
ln ./worker.test "umlrootfs/home/${CUSER}/worker.test"
- name: Run Tests in Cgroupv2
run: |
CUSER="$(id --user --name)"
sudo rsh 254.255.255.2 bash --noprofile --norc -eo pipefail << EOF
cd "/home/${CUSER}"
mkdir -p /sys/fs/cgroup/tunasync
TERM=xterm-256color ./worker.test -test.v=true -test.coverprofile \
profile3.cov -test.run TestCgroup
rmdir /sys/fs/cgroup/tunasync
systemd-run --service-type=oneshot --uid="${CUSER}" --pipe --wait \
--property=Delegate=yes --setenv=USECURCGROUP=1 \
--setenv=TERM=xterm-256color --same-dir \
"\${PWD}/worker.test" -test.v=true -test.coverprofile \
profile4.cov -test.run TestCgroup
EOF
- name: Stop Uml
run: |
sudo rsh 254.255.255.2 systemctl poweroff
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -TERM "$UMLINUX_PID" || true
sleep 1
fi
fi
if [ -e "/proc/$UMLINUX_PID" ]; then
sleep 10
if [ -e "/proc/$UMLINUX_PID" ]; then
sudo kill -KILL "$UMLINUX_PID" || true
sleep 1
fi
fi
- name: Combine coverage files
run : |
CUSER="$(id --user --name)"
"${HOME}/go/bin/gocovmerge" profile.cov profile2.cov \
"umlrootfs/home/${CUSER}/profile3.cov" \
"umlrootfs/home/${CUSER}/profile4.cov" \
profile5_*.cov > profile-all.cov
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.0
with:
infile: profile.cov
infile: profile-all.cov
outfile: coverage.lcov
- name: Coveralls

1
.gitignore vendored
查看文件

@@ -1 +1,2 @@
/build
/build-*

13
.umlrootfs/Dockerfile 普通文件
查看文件

@@ -0,0 +1,13 @@
FROM debian:buster
RUN apt-get update && apt-get install -y systemd rsh-redone-server ifupdown sudo kmod
RUN echo "host" > /root/.rhosts && \
chmod 600 /root/.rhosts && \
/bin/echo -e "auto eth0\niface eth0 inet static\naddress 254.255.255.2/24" > /etc/network/interfaces.d/eth0 && \
sed -i '/pam_securetty/d' /etc/pam.d/rlogin && \
cp /usr/share/systemd/tmp.mount /etc/systemd/system && \
systemctl enable tmp.mount
RUN echo "deb http://deb.debian.org/debian experimental main" >> /etc/apt/sources.list && \
apt-get update && \
apt-get install -y make && \
apt-get install -y -t experimental cgroup-tools

13
.vscode/settings.json vendored 普通文件
查看文件

@@ -0,0 +1,13 @@
{
"cSpell.words": [
"Btrfs",
"Debugf",
"Infof",
"Noticef",
"Warningf",
"cgroup",
"mergo",
"tmpl",
"zpool"
]
}

查看文件

@@ -1,19 +1,25 @@
LDFLAGS="-X main.buildstamp=`date -u '+%s'` -X main.githash=`git rev-parse HEAD`"
ARCH ?= linux-amd64
ARCH_LIST = $(subst -, ,$(ARCH))
GOOS = $(word 1, $(ARCH_LIST))
GOARCH = $(word 2, $(ARCH_LIST))
BUILDBIN = tunasync tunasynctl
all: get tunasync tunasynctl
all: $(BUILDBIN)
get:
go get ./cmd/tunasync
go get ./cmd/tunasynctl
build-$(ARCH):
mkdir -p $@
build:
mkdir -p build
$(BUILDBIN): % : build-$(ARCH) build-$(ARCH)/%
tunasync: build
go build -o build/tunasync -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasync
tunasynctl: build
go build -o build/tunasynctl -ldflags ${LDFLAGS} github.com/tuna/tunasync/cmd/tunasynctl
$(BUILDBIN:%=build-$(ARCH)/%) : build-$(ARCH)/% : cmd/%
GOOS=$(GOOS) GOARCH=$(GOARCH) go get ./$<
GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ -ldflags ${LDFLAGS} github.com/tuna/tunasync/$<
test:
go test -v -covermode=count -coverprofile=profile.cov ./...
build-test-worker:
go test -c -covermode=count ./worker
.PHONY: all test $(BUILDBIN) build-test-worker

查看文件

@@ -1,5 +1,4 @@
tunasync
========
# tunasync
![Build Status](https://github.com/tuna/tunasync/workflows/tunasync/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/tuna/tunasync/badge.svg?branch=master)](https://coveralls.io/github/tuna/tunasync?branch=master)
@@ -12,11 +11,11 @@ tunasync
## Download
Pre-built binary for Linux x86_64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
Pre-built binary for Linux x86_64 and ARM64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
## Design
```
```text
# Architecture
- Manager: Central instance for status and job management
@@ -50,13 +49,12 @@ PreSyncing Syncing Succe
+-----------------+
```
## Building
Go version: 1.13
```
make all
```shell
> make all
```
Binaries in the `build/`.
Binaries in the `build-linux-amd64/`.

查看文件

@@ -12,6 +12,7 @@ import (
"github.com/pkg/profile"
"gopkg.in/op/go-logging.v1"
"github.com/urfave/cli"
"github.com/moby/moby/pkg/reexec"
tunasync "github.com/tuna/tunasync/internal"
"github.com/tuna/tunasync/manager"
@@ -109,6 +110,10 @@ func startWorker(c *cli.Context) error {
func main() {
if reexec.Init() {
return
}
cli.VersionPrinter = func(c *cli.Context) {
var builddate string
if buildstamp == "" {

查看文件

@@ -8,6 +8,7 @@ import (
"os"
"strconv"
"strings"
"text/template"
"time"
"github.com/BurntSushi/toml"
@@ -160,8 +161,31 @@ func listJobs(c *cli.Context) error {
"of all jobs from manager server: %s", err.Error()),
1)
}
if statusStr := c.String("status"); statusStr != "" {
filteredJobs := make([]tunasync.WebMirrorStatus, 0, len(jobs))
var statuses []tunasync.SyncStatus
for _, s := range strings.Split(statusStr, ",") {
var status tunasync.SyncStatus
err = status.UnmarshalJSON([]byte("\"" + strings.TrimSpace(s) + "\""))
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error parsing status: %s", err.Error()),
1)
}
statuses = append(statuses, status)
}
for _, job := range jobs {
for _, s := range statuses {
if job.Status == s {
filteredJobs = append(filteredJobs, job)
break
}
}
}
genericJobs = filteredJobs
} else {
genericJobs = jobs
}
} else {
var jobs []tunasync.MirrorStatus
args := c.Args()
@@ -196,6 +220,37 @@ func listJobs(c *cli.Context) error {
genericJobs = jobs
}
if format := c.String("format"); format != "" {
tpl := template.New("")
_, err := tpl.Parse(format)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error parsing format template: %s", err.Error()),
1)
}
switch jobs := genericJobs.(type) {
case []tunasync.WebMirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
case []tunasync.MirrorStatus:
for _, job := range jobs {
err = tpl.Execute(os.Stdout, job)
if err != nil {
return cli.NewExitError(
fmt.Sprintf("Error printing out information: %s", err.Error()),
1)
}
fmt.Println()
}
}
} else {
b, err := json.MarshalIndent(genericJobs, "", " ")
if err != nil {
return cli.NewExitError(
@@ -203,6 +258,8 @@ func listJobs(c *cli.Context) error {
1)
}
fmt.Println(string(b))
}
return nil
}
@@ -506,6 +563,14 @@ func main() {
Name: "all, a",
Usage: "List all jobs of all workers",
},
cli.StringFlag{
Name: "status, s",
Usage: "Filter output based on status provided",
},
cli.StringFlag{
Name: "format, f",
Usage: "Pretty-print containers using a Go template",
},
}...),
Action: initializeWrapper(listJobs),
},

141
docs/cgroup.md 普通文件
查看文件

@@ -0,0 +1,141 @@
# About Tunasync and cgroup
Optionally, tunasync can be integrated with cgroup to have better control and tracking processes started by mirror jobs. Also, limiting memory usage of a mirror job also requires cgroup support.
## How cgroup are utilized in tunasync?
If cgroup are enabled globally, all the mirror jobs, except those running in docker containers, are run in separate cgroups. If `mem_limit` is specified, it will be applied to the cgroup. For jobs running in docker containers, `mem_limit` is applied via `docker run` command.
## Tl;dr: What's the recommended configuration?
### If you are using v1 (legacy, hybrid) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
PermissionsStartOnly=true
ExecStartPre=/usr/bin/cgcreate -t tunasync -a tunasync -g memory:tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
ExecStopPost=/usr/bin/cgdelete memory:tunasync
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
group = "tunasync"
```
### If you are using v2 (unified) cgroup hierarchy:
`tunasync-worker.service`:
```
[Unit]
Description = TUNA mirrors sync worker
After=network.target
[Service]
Type=simple
User=tunasync
ExecStart=/home/bin/tunasync worker -c /etc/tunasync/worker.conf --with-systemd
ExecReload=/bin/kill -SIGHUP $MAINPID
Delegate=yes
[Install]
WantedBy=multi-user.target
```
`worker.conf`:
``` toml
[cgroup]
enable = true
```
## Two versions of cgroups
Due to various of reasons, there are two versions of cgroups in the kernel, which are incompatible with each other. Most of the current linux distributions adopts systemd as the init system, which relies on cgroup and is responsible for initializing cgroup. As a result, the selection of the version of cgroups is mainly decided by systemd. Since version 243, the "unified" cgroup hierarchy setup has become the default.
Tunasync can automatically detect which version of cgroup is in use and enable the corresponding operating interface, but due to the fact that systemd behaves slightly differently in the two cases, different configurations for tunasync are recomended.
## Two modes of group name discovery
Two modes of group name discovery are provided: implicit mode and manual mode.
### Manual Mode
In this mode, the administrator should 1. manually create an empty cgroup (for cgroup v2 unified hierarchy) or empty cgroups in certain controller subsystems with the same name (for cgroup v1 hybird hierarchy); 2. change the ownership of the cgroups to the running user of the tunasync worker; and 3. specify the path in the configuration. On start, tunasync will automatically detect which controllers are enabled (for v1) or enable needed controllers (for v2).
Example 1:
``` bash
# suppose we have cgroup v1
sudo mkdir -p /sys/fs/cgroup/cpu/test/tunasync
sudo mkdir -p /sys/fs/cgroup/memory/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/cpu/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/memory/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will detect the enabled subsystem controllers are cpu and memory. When running a mirror job named `foo`, sub-cgroups will be created in both `/sys/fs/cgroup/cpu/test/tunasync/foo` and `/sys/fs/cgroup/memory/test/tunasync/foo`.
Example 2 (not recommended):
``` bash
# suppose we have cgroup v2
sudo mkdir -p /sys/fs/cgroup/test/tunasync
sudo chown -R tunasync:tunasync /sys/fs/cgroup/test/tunasync
# in worker.conf, we have group = "/test/tunasync" or "test/tunasync"
tunasync worker -c /path/to/worker.conf
```
In the above scenario, tunasync will directly use the cgroup `/sys/fs/cgroup/test/tunasync`. In most cases, due to the design of cgroupv2, since tunasync is not running as root, tunasync won't have the permission to move the processes it starts to the correct cgroup. That's because cgroup2 requires the operating process should also have the write permission of the common ancestor of the source group and the target group when moving processes between groups. So this example is only for demonstration of the functionality and you should prevent it.
### Implicit mode
In this mode, tunasync will use the cgroup it is currently running in and create sub-groups for jobs in that group. Tunasync will first create a sub-group named `__worker` in that group, and move itself in the `__worker` sub-group, to prevent processes in non-leaf cgroups.
Mostly, this mode is cooperated with the `Delegate=yes` option of the systemd service configuration of tunasync, which will permit the running process to self-manage the cgroup the service in running in. Due to security considerations, systemd won't give write permissions of the current running cgroups to the service when using v1 (legacy, hybrid) cgroup hierarchy and non-root user, so it is more meaningful to use this mode with v2 cgroup hierarchy.
## Configruation
``` toml
[cgroup]
enable = true
base_path = "/sys/fs/cgroup"
group = "tunasync"
subsystem = "memory"
```
The defination of the above options is:
* `enable`: `Bool`, specifies whether cgroup is enabled. When cgroup is disabled, `memory_limit` for non-docker jobs will be ignored, and the following options are also ignored.
* `group`: `String`, specifies the cgroup tunasync will use. When not provided, or provided with empty string, cgroup discovery will work in "Implicit mode", i.e. will create sub-cgroups in the current running cgroup. Otherwise, cgroup discovery will work in "Manual mode", where tunasync will create sub-cgroups in the specified cgroup.
* `base_path`: `String`, ignored. It originally specifies the mounting path of cgroup filesystem, but for making everything work, it is now required that the cgroup filesystem should be mounted at its default path(`/sys/fs/cgroup`).
* `subsystem `: `String`, ignored. It originally specifies which cgroupv1 controller is enabled and now becomes meaningless since the discovery is now automatic.
## References:
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html]()
* [https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/index.html]()
* [https://systemd.io/CGROUP_DELEGATION/]()
* [https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Delegate=]()

查看文件

@@ -1,4 +1,5 @@
# tunasync 上手指南
date: 2016-10-31 00:50:00
[tunasync](https://github.com/tuna/tunasync) 是[清华大学 TUNA 镜像源](https://mirrors.tuna.tsinghua.edu.cn)目前使用的镜像方案。
@@ -7,32 +8,32 @@ date: 2016-10-31 00:50:00
本例中:
- 只镜像[elvish](https://elvish.io)项目
- 禁用了https
- 禁用了cgroup支持
- 只镜像[elvish](https://elv.sh)项目
- 禁用了https
- 禁用了cgroup支持
## 获得tunasync
### 二进制包
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-bin.tar.gz` 即可。
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-amd64-bin.tar.gz` 即可。
### 自行编译
```
$ make
```shell
> make
```
## 配置
```
$ mkdir ~/tunasync_demo
$ mkdir /tmp/tunasync
```shell
> mkdir ~/tunasync_demo
> mkdir /tmp/tunasync
```
`~/tunasync_demo/worker.conf`:
编辑 `~/tunasync_demo/worker.conf`:
```
```conf
[global]
name = "test_worker"
log_dir = "/tmp/tunasync/log/tunasync/{{.Name}}"
@@ -60,13 +61,13 @@ ssl_key = ""
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elvish.io/elvish/"
upstream = "rsync://rsync.elv.sh/elvish/"
use_ipv6 = false
```
`~/tunasync_demo/manager.conf`:
编辑 `~/tunasync_demo/manager.conf`:
```
```conf
debug = false
[server]
@@ -81,28 +82,30 @@ db_file = "/tmp/tunasync/manager.db"
ca_cert = ""
```
除了 bolt 以外,还支持 badger、leveldb 和 redis 的数据库后端。对于 badger 和 leveldb,只需要修改 db_type。如果使用 redis 作为数据库后端,把 db_type 改为 redis,并把下面的 db_file 设为 redis 服务器的地址: `redis://user:password@host:port/db_number`
### 运行
```
$ tunasync manager --config ~/tunasync_demo/manager.conf
$ tunasync worker --config ~/tunasync_demo/worker.conf
```shell
> tunasync manager --config ~/tunasync_demo/manager.conf
> tunasync worker --config ~/tunasync_demo/worker.conf
```
本例中,镜像的数据在`/tmp/tunasync/`
本例中,镜像的数据在 `/tmp/tunasync/`
### 控制
查看同步状态
```
$ tunasynctl list -p 12345 --all
```shell
> tunasynctl list -p 12345 --all
```
tunasynctl 也支持配置文件。配置文件可以放在 `/etc/tunasync/ctl.conf` 或者 `~/.config/tunasync/ctl.conf` 两个位置,后者可以覆盖前者的配置值。
配置文件内容为:
```
```conf
manager_addr = "127.0.0.1"
manager_port = 12345
ca_cert = ""
@@ -118,13 +121,13 @@ worker 和 manager 之间用 http(s) 通信,如果你 worker 和 manager 都
可以参看
```
$ tunasync manager --help
$ tunasync worker --help
```shell
> tunasync manager --help
> tunasync worker --help
```
可以看一下 log 目录
一些 worker 配置文件示例 [workers.conf](workers.conf)
一些 worker 配置文件示例 [workers.conf](workers.conf)
你可能会用到的操作 [tips.md](tips.md)
你可能会用到的操作 [tips.md](tips.md)

查看文件

@@ -83,7 +83,7 @@ snapshot_path = "/path/to/snapshot/directory"
[[mirrors]]
name = "elvish"
provider = "rsync"
upstream = "rsync://rsync.elvish.io/elvish/"
upstream = "rsync://rsync.elv.sh/elvish/"
interval = 1440
snapshot_path = "/data/publish/elvish"
```

查看文件

@@ -1,10 +1,16 @@
# /home/scripts in this example points to https://github.com/tuna/tunasync-scripts/
[global]
name = "mirror_worker"
log_dir = "/srv/tunasync/log/tunasync/{{.Name}}"
mirror_dir = "/srv/tunasync"
concurrent = 10
interval = 1
interval = 120
# ensure the exec user be add into `docker` group
[docker]
# in `command provider` can use docker_image and docker_volumes
enable = true
[manager]
api_base = "http://localhost:12345"
@@ -22,52 +28,637 @@ listen_addr = "127.0.0.1"
listen_port = 6000
ssl_cert = ""
ssl_key = ""
[[mirrors]]
name = "adobe-fonts"
interval = 1440
provider = "command"
upstream = "https://github.com/adobe-fonts"
#https://github.com/tuna/tunasync-scripts/blob/master/adobe-fonts.sh
command = "/home/scripts/adobe-fonts.sh"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "AdoptOpenJDK"
interval = 5760
provider = "command"
command = "/home/scripts/adoptopenjdk.py"
upstream = "https://adoptopenjdk.jfrog.io/adoptopenjdk"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "alpine"
provider = "rsync"
upstream = "rsync://rsync.alpinelinux.org/alpine/"
memory_limit = "256M"
[[mirrors]]
name = "anaconda"
provider = "command"
upstream = "https://repo.continuum.io/"
#https://github.com/tuna/tunasync-scripts/blob/master/anaconda.py
command = "/home/scripts/anaconda.py"
command = "/home/scripts/anaconda.py --delete"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "apache"
provider = "rsync"
upstream = "rsync://rsync.apache.org/apache-dist/"
use_ipv4 = true
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "armbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/apt/"
memory_limit = "256M"
[[mirrors]]
name = "armbian-releases"
provider = "rsync"
stage1_profile = "debian"
upstream = "rsync://rsync.armbian.com/dl/"
memory_limit = "256M"
[[mirrors]]
name = "bananian"
provider = "command"
upstream = "https://dl.bananian.org/"
command = "/home/scripts/lftp.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gnu"
name = "bioconductor"
provider = "rsync"
upstream = "rsync://mirrors.ocf.berkeley.edu/gnu/"
upstream = "master.bioconductor.org:./"
rsync_options = [ "--rsh=ssh -i /root/id_rsa -o PasswordAuthentication=no -l sync" ]
exclude_file = "/etc/excludes/bioconductor.txt"
memory_limit = "256M"
[[mirrors]]
name = "blender"
provider = "rsync"
upstream = "rsync://mirrors.dotsrc.org/blender/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/blender.txt"
interval = 1440
memory_limit = "256M"
[[mirrors]]
name = "chakra"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/packages/"
memory_limit = "256M"
[[mirrors]]
name = "chakra-releases"
provider = "rsync"
upstream = "rsync://rsync.chakralinux.org/releases/"
memory_limit = "256M"
[[mirrors]]
name = "chef"
interval = 1440
provider = "command"
upstream = "https://packages.chef.io/repos"
command = "/home/scripts/chef.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "clickhouse"
interval = 2880
provider = "rsync"
upstream = "rsync://repo.yandex.ru/yandexrepo/clickhouse/"
exclude_file = "/etc/excludes/clickhouse.txt"
memory_limit = "256M"
[[mirrors]]
name = "clojars"
provider = "command"
upstream = "s3://clojars-repo-production/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.dualstack.us-east-2.amazonaws.com"
#TUNASYNC_S3_ENDPOINT = "https://s3.us-east-2.amazonaws.com"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "CPAN"
provider = "rsync"
upstream = "rsync://cpan-rsync.perl.org/CPAN/"
memory_limit = "256M"
[[mirrors]]
name = "CRAN"
provider = "rsync"
upstream = "rsync://cran.r-project.org/CRAN/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "CTAN"
provider = "rsync"
upstream = "rsync://mirrors.rit.edu/CTAN/"
memory_limit = "256M"
[[mirrors]]
name = "dart-pub"
provider = "command"
upstream = "https://pub.dev/api"
command = "/home/scripts/pub.sh"
interval = 30
docker_image = "tunathu/pub-mirror:latest"
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/dart-pub"
[[mirrors]]
name = "debian"
provider = "command"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
command = "/home/scripts/debian.sh sync:archive:debian"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/ftpsync"
docker_volumes = [
"/etc/misc/ftpsync-debian.conf:/ftpsync/etc/ftpsync-debian.conf:ro",
"/log/ftpsync:/home/log/tunasync/ftpsync",
]
[mirrors.env]
FTPSYNC_LOG_DIR = "/home/log/tunasync/ftpsync"
[[mirrors]]
name = "docker-ce"
provider = "command"
upstream = "https://download.docker.com/"
command = "timeout 3h /home/scripts/docker-ce.py --workers 10 --fast-skip"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ELK"
interval = 1440
provider = "command"
upstream = "https://packages.elastic.co"
command = "/home/scripts/ELK.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
WGET_OPTIONS = "-6"
[[mirrors]]
name = "elasticstack"
interval = 1440
provider = "command"
upstream = "https://artifacts.elastic.co/"
command = "/home/scripts/elastic.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "erlang-solutions"
interval = 1440
provider = "command"
upstream = "https://packages.erlang-solutions.com"
command = "/home/scripts/erlang.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "flutter"
interval = 1440
provider = "command"
upstream = "https://storage.googleapis.com/flutter_infra/"
command = "/home/scripts/flutter.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "github-release"
provider = "command"
upstream = "https://api.github.com/repos/"
command = "/home/scripts/github-release.py --workers 5"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
interval = 720
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
GITHUB_TOKEN = "xxxxx"
[[mirrors]]
name = "gitlab-ce"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ce/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-ee"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/gitlab/gitlab-ee/"
command = "/home/scripts/gitlab-ce.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "gitlab-runner"
interval = 1440
provider = "command"
upstream = "https://packages.gitlab.com/runner/gitlab-runner"
command = "/home/scripts/gitlab-runner.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "grafana"
interval = 1440
provider = "command"
upstream = "https://packages.grafana.com/oss"
command = "/home/scripts/grafana.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "hackage"
provider = "command"
command = "/home/scripts/hackage.sh"
upstream = "https://hackage.haskell.org/"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew-bottles"
provider = "command"
upstream = "https://homebrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "influxdata"
interval = 1440
provider = "command"
upstream = "https://repos.influxdata.com"
command = "/home/scripts/influxdata.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "kali"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.nluug.nl/kali/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "kali-images"
provider = "rsync"
upstream = "rsync://ftp.nluug.nl/kali-images/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "KaOS"
provider = "rsync"
upstream = "rsync://kaosx.tk/kaos/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kernel"
provider = "rsync"
upstream = "rsync://rsync.kernel.org/pub/linux/kernel/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "kicad"
provider = "command"
upstream = "s3://kicad-downloads/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.cern.ch"
TUNASYNC_AWS_OPTIONS = "--delete --exclude index.html"
[[mirrors]]
name = "kodi"
provider = "rsync"
upstream = "rsync://mirror.yandex.ru/mirrors/xbmc/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
use_ipv6 = true
[[mirrors]]
name = "kubernetes"
interval = 2880
provider = "command"
upstream = "http://packages.cloud.google.com"
command = "/home/scripts/kubernetes.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "linuxbrew-bottles"
provider = "command"
upstream = "https://linuxbrew.bintray.com"
command = "/home/scripts/linuxbrew-bottles.sh"
docker_image = "tunathu/homebrew-mirror"
# set environment varialbes
[mirrors.env]
RUN_LINUXBREW = "true"
HOMEBREW_REPO = "https://neomirrors.tuna.tsinghua.edu.cn/git/homebrew"
[[mirrors]]
name = "linuxmint"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://mirrors.kernel.org/linuxmint-packages/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "lxc-images"
provider = "command"
upstream = "https://us.images.linuxcontainers.org/"
command = "/home/scripts/lxc-images.sh"
docker_image = "tunathu/tunasync-scripts:latest"
interval = 720
[[mirrors]]
name = "lyx"
provider = "command"
upstream = "ftp://ftp.lyx.org/pub/lyx/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "mongodb"
interval = 1440
provider = "command"
upstream = "https://repo.mongodb.org"
command = "/home/scripts/mongodb.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "msys2"
provider = "command"
upstream = "http://repo.msys2.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "mysql"
interval = 30
provider = "command"
upstream = "https://repo.mysql.com"
command = "/home/scripts/mysql.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
USE_IPV6 = "1"
[[mirrors]]
name = "nix"
interval = 1440
provider = "command"
upstream = "s3://nix-releases/nix/"
command = "/home/scripts/nix.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
MIRROR_BASE_URL = 'https://mirrors.tuna.tsinghua.edu.cn/nix/'
[[mirrors]]
name = "nix-channels"
interval = 300
provider = "command"
upstream = "https://nixos.org/channels"
command = "timeout 20h /home/scripts/nix-channels.py"
docker_image = "tunathu/nix-channels:latest"
docker_options = [
"--cpus", "20",
]
[[mirrors]]
name = "nodesource"
provider = "command"
upstream = "https://deb.nodesource.com/"
command = "/home/scripts/nodesource.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "openresty"
provider = "command"
upstream = "https://openresty.org/package/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer"
[[mirrors]]
name = "packagist"
provider = "command"
upstream = "http://packagist.org/"
command = "/home/scripts/packagist.sh"
interval = 1440
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "proxmox"
interval = 1440
provider = "command"
upstream = "http://download.proxmox.com"
command = "/home/scripts/proxmox.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "pypi"
provider = "command"
upstream = "https://pypi.python.org/"
#https://github.com/tuna/tunasync-scripts/blob/master/pypi.sh
command = "/home/scripts/pypi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
docker_image = "tunathu/bandersnatch:latest"
interval = 5
[[mirrors]]
name = "qt"
provider = "rsync"
upstream = "rsync://master.qt-project.org/qt-all/"
exclude_file = "/etc/excludes/qt.txt"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "raspberrypi"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://apt-repo.raspberrypi.org/archive/debian/"
memory_limit = "256M"
[[mirrors]]
name = "raspbian-images"
interval = 5760
provider = "command"
upstream = "https://downloads.raspberrypi.org/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x ^icons/$ -c --only-missing -v --no-perms"
[[mirrors]]
name = "raspbian"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://archive.raspbian.org/archive/"
rsync_options = [ "--delete-excluded" ] # delete .~tmp~ folders
memory_limit = "256M"
[[mirrors]]
name = "redhat"
provider = "rsync"
upstream = "rsync://ftp.redhat.com/redhat/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
exclude_file = "/etc/excludes/redhat.txt"
interval = 1440
[mirrors.env]
RSYNC_PROXY="127.0.0.1:8123"
[[mirrors]]
name = "remi"
interval = 1440
provider = "command"
upstream = "rsync://rpms.remirepo.net"
command = "/home/scripts/remi.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "repo-ck"
provider = "command"
upstream = "http://repo-ck.com"
command = "/home/scripts/repo-ck.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ros"
provider = "rsync"
upstream = "rsync://mirror.umd.edu/packages.ros.org/ros/"
memory_limit = "256M"
[[mirrors]]
name = "ros2"
interval = 1440
provider = "command"
upstream = "http://packages.ros.org/ros2"
command = "/home/scripts/ros2.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "rubygems"
provider = "command"
upstream = "https://rubygems.org"
command = "/home/scripts/rubygems.sh"
docker_image = "tunathu/rubygems-mirror"
interval = 60
# set environment varialbes
[mirrors.env]
INIT = "0"
[[mirrors]]
name = "rudder"
interval = 2880
provider = "command"
upstream = "https://repository.rudder.io"
command = "/home/scripts/rudder.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "debian"
interval = 720
name = "rustup"
provider = "command"
upstream = "https://rustup.rs/"
command = "/home/scripts/rustup.sh"
interval = 1440
docker_image = "tunathu/rustup-mirror:latest"
docker_volumes = [
]
docker_options = [
]
[mirrors.env]
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/rustup"
[[mirrors]]
name = "saltstack"
interval = 1440 # required on http://repo.saltstack.com/#mirror
provider = "command"
upstream = "s3://s3/"
command = "/home/scripts/s3.sh"
docker_image = "tunathu/ftpsync:latest"
[mirrors.env]
TUNASYNC_S3_ENDPOINT = "https://s3.repo.saltstack.com"
TUNASYNC_AWS_OPTIONS = "--delete --exact-timestamps"
[[mirrors]]
name = "solus"
provider = "rsync"
upstream = "rsync://mirrors.tuna.tsinghua.edu.cn/debian/"
upstream = "rsync://mirrors.rit.edu/solus/"
rsync_options = [ "--exclude", "/shannon", "--exclude", "/unstable" ]
memory_limit = "256M"
[[mirrors]]
name = "stackage"
provider = "command"
command = "/home/scripts/stackage.py"
upstream = "https://www.stackage.org/"
docker_image = "tunathu/tunasync-scripts:latest"
# set environment varialbes
[mirrors.env]
GIT_COMMITTER_NAME = "TUNA mirrors"
GIT_COMMITTER_EMAIL = "mirrors@tuna.tsinghua.edu.cn"
[[mirrors]]
name = "steamos"
interval = 1440
provider = "command"
upstream = "http://repo.steampowered.com"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "--only-newer --exclude icons/ "
[[mirrors]]
name = "termux"
interval = 1440
provider = "command"
upstream = "https://dl.bintray.com/termux/termux-packages-24/"
command = "/home/scripts/termux.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "ubuntu"
provider = "two-stage-rsync"
@@ -76,4 +667,156 @@ upstream = "rsync://archive.ubuntu.com/ubuntu/"
rsync_options = [ "--delete-excluded" ]
memory_limit = "256M"
[[mirrors]]
name = "ubuntu-ports"
provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ports.ubuntu.com/ubuntu-ports/"
rsync_options = [ "--delete-excluded" ]
exclude_file = "/etc/excludes/ubuntu-ports-exclude.txt"
memory_limit = "256M"
[[mirrors]]
name = "virtualbox"
interval = 1440
provider = "command"
upstream = "http://download.virtualbox.org/virtualbox"
command = "/home/scripts/virtualbox.sh"
size_pattern = "size-sum: ([0-9\\.]+[KMGTP])"
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "winehq"
provider = "command"
upstream = "ftp://ftp.winehq.org/pub/"
command = "/home/scripts/lftp.sh"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
TUNASYNC_LFTP_OPTIONS = "-x wine-builds.old/ -x /\\..+"
[[mirrors]]
name = "zabbix"
provider = "rsync"
upstream = "rsync://repo.zabbix.com/mirror/"
rsync_options = [ "--delete-excluded", "--chmod=o+r,Do+x,Fa-x" ]
memory_limit = "256M"
[[mirrors]]
name = "AOSP"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://android.googlesource.com/mirror/manifest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "lineageOS"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/aosp.sh"
upstream = "https://github.com/LineageOS/mirror"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
REPO = "/usr/local/bin/aosp-repo"
REPO_URL = "https://mirrors.tuna.tsinghua.edu.cn/git/git-repo"
USE_BITMAP_INDEX = "1"
[[mirrors]]
name = "chromiumos"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/cros.sh"
upstream = "https://chromium.googlesource.com"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[mirrors.env]
USE_BITMAP_INDEX = "1"
CONCURRENT_JOBS = "20"
[[mirrors]]
name = "crates.io-index.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "https://github.com/rust-lang/crates.io-index.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "flutter-sdk.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/flutter/flutter.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gcc.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://gcc.gnu.org/git/gcc.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "gentoo-portage.git"
provider = "command"
command = "/home/tunasync-scripts/git.sh"
upstream = "git://github.com/gentoo-mirror/gentoo.git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
[[mirrors]]
name = "git-repo"
provider = "command"
command = "/home/tunasync-scripts/git-repo.sh"
upstream = "https://gerrit.googlesource.com/git-repo"
size_pattern = "size-pack: ([0-9\\.]+[KMGTP])"
fail_on_match = "fatal: "
docker_image = "tunathu/tunasync-scripts:latest"
[[mirrors]]
name = "homebrew"
provider = "command"
command = "/home/tunasync-scripts/homebrew.sh"
upstream = "https://github.com/Homebrew"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "CocoaPods"
provider = "command"
command = "/home/tunasync-scripts/cocoapods.sh"
upstream = "https://github.com/CocoaPods"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[[mirrors]]
name = "pybombs"
interval = 720
provider = "command"
command = "/home/tunasync-scripts/pybombs.sh"
upstream = "https://github.com/scateu/pybombs-mirror/"
docker_image = "tunathu/tunasync-scripts:latest"
docker_volumes = ["/home/pybombs-mirror:/opt/pybombs-mirror"]
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
[mirrors.env]
PYBOMBS_MIRROR_SCRIPT_PATH = "/opt/pybombs-mirror"
MIRROR_BASE_URL = "https://mirrors.tuna.tsinghua.edu.cn/pybombs"
[[mirrors]]
name = "llvm"
provider = "command"
command = "/home/tunasync-scripts/llvm.sh"
upstream = "https://git.llvm.org/git"
docker_image = "tunathu/tunasync-scripts:latest"
size_pattern = "Total size is ([0-9\\.]+[KMGTP]?)"
# vim: ft=toml

25
go.mod
查看文件

@@ -4,18 +4,35 @@ go 1.13
require (
github.com/BurntSushi/toml v0.3.1
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239
github.com/boltdb/bolt v1.3.1
github.com/cilium/ebpf v0.6.2 // indirect
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27
github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035
github.com/gin-gonic/gin v1.5.0
github.com/dgraph-io/badger/v2 v2.2007.2
github.com/docker/go-units v0.4.0
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/gin-gonic/gin v1.7.0
github.com/go-redis/redis/v8 v8.3.0
github.com/gomodule/redigo v1.8.2 // indirect
github.com/imdario/mergo v0.3.9
github.com/mattn/goveralls v0.0.5 // indirect
github.com/moby/moby v20.10.7+incompatible
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.4.0
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/smartystreets/assertions v1.2.0 // indirect
github.com/smartystreets/goconvey v1.6.4
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli v1.22.3
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 // indirect
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
gotest.tools/v3 v3.0.3 // indirect
)

252
go.sum
查看文件

@@ -1,15 +1,44 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27 h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=
github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a h1:Se756mbFRj+3RITm/9NYHknEo1TJEpCV8jHI2e8QOEo=
github.com/containerd/cgroups v1.0.2-0.20210729163027-ddda8a174e9a/go.mod h1:M9MzGh4G4yzSq0e3Bf6tQCoDsvGewJdfhIix9CRaOWo=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -17,88 +46,249 @@ github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035 h1:4e+UEZaKPx0ZEiCMPU
github.com/dennwc/btrfs v0.0.0-20190517175702-d917b30ff035/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA=
github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg=
github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0=
github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.5.0 h1:fi+bqFAx/oLK54somfCtEZs9HeH1LHVoEPUgARpTqyc=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU=
github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-redis/redis/v8 v8.3.0 h1:Xrwvn8+QqUYD1MbQmda3cVR2U9li5XbtRFkKZN5Y0hk=
github.com/go-redis/redis/v8 v8.3.0/go.mod h1:a2xkpBM7NJUN5V5kiF46X5Ltx4WeXJ9757X/ScKUBdE=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/goveralls v0.0.5 h1:spfq8AyZ0cCk57Za6/juJ5btQxeE1FaEGMdfcI+XO48=
github.com/mattn/goveralls v0.0.5/go.mod h1:Xg2LHi51faXLyKXwsndxiW6uxEEQT9+3sjGzzwU4xy0=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/moby v20.10.7+incompatible h1:mMDsIjUeon2FpxCJz0Xj32wzRcTbGLVzG1uEbPalok4=
github.com/moby/moby v20.10.7+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.3 h1:FpNT6zq26xNpHZy08emi755QwzLPs6Pukqjlc7RfOMU=
github.com/urfave/cli v1.22.3/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA=
go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200113040837-eac381796e91/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2 h1:6TB4+MaZlkcSsJDu+BS5yxSEuZIYhjWz+jhbSLEZylI=
golang.org/x/tools v0.0.0-20200312194400-c312e98713c2/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=

查看文件

@@ -1,6 +1,8 @@
package internal
import (
"bytes"
"encoding/json"
"fmt"
"time"
)
@@ -13,6 +15,7 @@ type MirrorStatus struct {
IsMaster bool `json:"is_master"`
Status SyncStatus `json:"status"`
LastUpdate time.Time `json:"last_update"`
LastStarted time.Time `json:"last_started"`
LastEnded time.Time `json:"last_ended"`
Scheduled time.Time `json:"next_schedule"`
Upstream string `json:"upstream"`
@@ -27,6 +30,7 @@ type WorkerStatus struct {
URL string `json:"url"` // worker url
Token string `json:"token"` // session token
LastOnline time.Time `json:"last_online"` // last seen
LastRegister time.Time `json:"last_register"` // last register time
}
type MirrorSchedules struct {
@@ -58,21 +62,45 @@ const (
)
func (c CmdVerb) String() string {
switch c {
case CmdStart:
return "start"
case CmdStop:
return "stop"
case CmdDisable:
return "disable"
case CmdRestart:
return "restart"
case CmdPing:
return "ping"
case CmdReload:
return "reload"
mapping := map[CmdVerb]string{
CmdStart: "start",
CmdStop: "stop",
CmdDisable: "disable",
CmdRestart: "restart",
CmdPing: "ping",
CmdReload: "reload",
}
return "unknown"
return mapping[c]
}
func NewCmdVerbFromString(s string) CmdVerb {
mapping := map[string]CmdVerb{
"start": CmdStart,
"stop": CmdStop,
"disable": CmdDisable,
"restart": CmdRestart,
"ping": CmdPing,
"reload": CmdReload,
}
return mapping[s]
}
// Marshal and Unmarshal for CmdVerb
func (s CmdVerb) MarshalJSON() ([]byte, error) {
buffer := bytes.NewBufferString(`"`)
buffer.WriteString(s.String())
buffer.WriteString(`"`)
return buffer.Bytes(), nil
}
func (s *CmdVerb) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
*s = NewCmdVerbFromString(j)
return nil
}
// A WorkerCmd is the command message send from the

查看文件

@@ -43,6 +43,8 @@ type WebMirrorStatus struct {
Status SyncStatus `json:"status"`
LastUpdate textTime `json:"last_update"`
LastUpdateTs stampTime `json:"last_update_ts"`
LastStarted textTime `json:"last_started"`
LastStartedTs stampTime `json:"last_started_ts"`
LastEnded textTime `json:"last_ended"`
LastEndedTs stampTime `json:"last_ended_ts"`
Scheduled textTime `json:"next_schedule"`
@@ -58,6 +60,8 @@ func BuildWebMirrorStatus(m MirrorStatus) WebMirrorStatus {
Status: m.Status,
LastUpdate: textTime{m.LastUpdate},
LastUpdateTs: stampTime{m.LastUpdate},
LastStarted: textTime{m.LastStarted},
LastStartedTs: stampTime{m.LastStarted},
LastEnded: textTime{m.LastEnded},
LastEndedTs: stampTime{m.LastEnded},
Scheduled: textTime{m.Scheduled},

查看文件

@@ -19,6 +19,8 @@ func TestStatus(t *testing.T) {
Status: Success,
LastUpdate: textTime{t},
LastUpdateTs: stampTime{t},
LastStarted: textTime{t},
LastStartedTs: stampTime{t},
LastEnded: textTime{t},
LastEndedTs: stampTime{t},
Scheduled: textTime{t},
@@ -40,6 +42,10 @@ func TestStatus(t *testing.T) {
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())
@@ -58,6 +64,7 @@ func TestStatus(t *testing.T) {
IsMaster: true,
Status: Failed,
LastUpdate: time.Now().Add(-time.Minute * 30),
LastStarted: time.Now().Add(-time.Minute * 1),
LastEnded: time.Now(),
Scheduled: time.Now().Add(time.Minute * 5),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
@@ -73,6 +80,10 @@ func TestStatus(t *testing.T) {
So(m2.LastUpdateTs.Unix(), ShouldEqual, m.LastUpdate.Unix())
So(m2.LastUpdate.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastUpdateTs.UnixNano(), ShouldEqual, m.LastUpdate.UnixNano())
So(m2.LastStarted.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStartedTs.Unix(), ShouldEqual, m.LastStarted.Unix())
So(m2.LastStarted.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastStartedTs.UnixNano(), ShouldEqual, m.LastStarted.UnixNano())
So(m2.LastEnded.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEndedTs.Unix(), ShouldEqual, m.LastEnded.Unix())
So(m2.LastEnded.UnixNano(), ShouldEqual, m.LastEnded.UnixNano())

查看文件

@@ -1,4 +1,4 @@
package internal
// Version of the program
const Version string = "0.6.0"
const Version string = "0.8.0"

查看文件

@@ -29,6 +29,7 @@ type FileConfig struct {
CACert string `toml:"ca_cert"`
}
// LoadConfig loads config from specified file
func LoadConfig(cfgFile string, c *cli.Context) (*Config, error) {
cfg := new(Config)

查看文件

@@ -4,8 +4,13 @@ import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/boltdb/bolt"
"github.com/dgraph-io/badger/v2"
"github.com/go-redis/redis/v8"
"github.com/pkg/errors"
"github.com/syndtr/goleveldb/leveldb"
. "github.com/tuna/tunasync/internal"
)
@@ -16,6 +21,7 @@ type dbAdapter interface {
GetWorker(workerID string) (WorkerStatus, error)
DeleteWorker(workerID string) error
CreateWorker(w WorkerStatus) (WorkerStatus, error)
RefreshWorker(workerID string) (WorkerStatus, error)
UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error)
GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error)
ListMirrorStatus(workerID string) ([]MirrorStatus, error)
@@ -24,21 +30,14 @@ type dbAdapter interface {
Close() error
}
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
if dbType == "bolt" {
innerDB, err := bolt.Open(dbFile, 0600, nil)
if err != nil {
return nil, err
}
db := boltAdapter{
db: innerDB,
dbFile: dbFile,
}
err = db.Init()
return &db, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
// interface for a kv database
type kvAdapter interface {
InitBucket(bucket string) error
Get(bucket string, key string) ([]byte, error)
GetAll(bucket string) (map[string][]byte, error)
Put(bucket string, key string, value []byte) error
Delete(bucket string, key string) error
Close() error
}
const (
@@ -46,166 +45,222 @@ const (
_statusBucketKey = "mirror_status"
)
type boltAdapter struct {
db *bolt.DB
dbFile string
func makeDBAdapter(dbType string, dbFile string) (dbAdapter, error) {
if dbType == "bolt" {
innerDB, err := bolt.Open(dbFile, 0600, &bolt.Options{
Timeout: 5 * time.Second,
})
if err != nil {
return nil, err
}
db := boltAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "redis" {
opt, err := redis.ParseURL(dbFile)
if err != nil {
return nil, fmt.Errorf("bad redis url: %s", err)
}
innerDB := redis.NewClient(opt)
db := redisAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "badger" {
innerDB, err := badger.Open(badger.DefaultOptions(dbFile))
if err != nil {
return nil, err
}
db := badgerAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
} else if dbType == "leveldb" {
innerDB, err := leveldb.OpenFile(dbFile, nil)
if err != nil {
return nil, err
}
db := leveldbAdapter{
db: innerDB,
}
kv := kvDBAdapter{
db: &db,
}
err = kv.Init()
return &kv, err
}
// unsupported db-type
return nil, fmt.Errorf("unsupported db-type: %s", dbType)
}
func (b *boltAdapter) Init() (err error) {
return b.db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(_workerBucketKey))
// use the underlying kv database to store data
type kvDBAdapter struct {
db kvAdapter
}
func (b *kvDBAdapter) Init() error {
err := b.db.InitBucket(_workerBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
_, err = tx.CreateBucketIfNotExists([]byte(_statusBucketKey))
err = b.db.InitBucket(_statusBucketKey)
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _statusBucketKey, err.Error())
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return nil
})
return err
}
func (b *boltAdapter) ListWorkers() (ws []WorkerStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
c := bucket.Cursor()
func (b *kvDBAdapter) ListWorkers() (ws []WorkerStatus, err error) {
var workers map[string][]byte
workers, err = b.db.GetAll(_workerBucketKey)
var w WorkerStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
for _, v := range workers {
jsonErr := json.Unmarshal(v, &w)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
err = errors.Wrap(err, jsonErr.Error())
continue
}
ws = append(ws, w)
}
return err
})
return
}
func (b *boltAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v := bucket.Get([]byte(workerID))
func (b *kvDBAdapter) GetWorker(workerID string) (w WorkerStatus, err error) {
var v []byte
v, err = b.db.Get(_workerBucketKey, workerID)
if v == nil {
err = fmt.Errorf("invalid workerID %s", workerID)
} else {
err = json.Unmarshal(v, &w)
}
return
}
func (b *kvDBAdapter) DeleteWorker(workerID string) error {
v, _ := b.db.Get(_workerBucketKey, workerID)
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
err := json.Unmarshal(v, &w)
return err
})
return
return b.db.Delete(_workerBucketKey, workerID)
}
func (b *boltAdapter) DeleteWorker(workerID string) (err error) {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
v := bucket.Get([]byte(workerID))
if v == nil {
return fmt.Errorf("invalid workerID %s", workerID)
}
err := bucket.Delete([]byte(workerID))
return err
})
return
}
func (b *boltAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_workerBucketKey))
func (b *kvDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
v, err := json.Marshal(w)
if err != nil {
return err
if err == nil {
err = b.db.Put(_workerBucketKey, w.ID, v)
}
err = bucket.Put([]byte(w.ID), v)
return err
})
return w, err
}
func (b *boltAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
func (b *kvDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *kvDBAdapter) UpdateMirrorStatus(workerID, mirrorID string, status MirrorStatus) (MirrorStatus, error) {
id := mirrorID + "/" + workerID
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
v, err := json.Marshal(status)
err = bucket.Put([]byte(id), v)
return err
})
if err == nil {
err = b.db.Put(_statusBucketKey, id, v)
}
return status, err
}
func (b *boltAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
func (b *kvDBAdapter) GetMirrorStatus(workerID, mirrorID string) (m MirrorStatus, err error) {
id := mirrorID + "/" + workerID
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
v := bucket.Get([]byte(id))
var v []byte
v, err = b.db.Get(_statusBucketKey, id)
if v == nil {
return fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
err = fmt.Errorf("no mirror '%s' exists in worker '%s'", mirrorID, workerID)
} else if err == nil {
err = json.Unmarshal(v, &m)
}
err := json.Unmarshal(v, &m)
return err
})
return
}
func (b *boltAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
func (b *kvDBAdapter) ListMirrorStatus(workerID string) (ms []MirrorStatus, err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for k, v := range vals {
if wID := strings.Split(k, "/")[1]; wID == workerID {
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
if wID := strings.Split(string(k), "/")[1]; wID == workerID {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
err = errors.Wrap(err, jsonErr.Error())
continue
}
ms = append(ms, m)
}
}
return err
})
return
}
func (b *boltAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
func (b *kvDBAdapter) ListAllMirrorStatus() (ms []MirrorStatus, err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for _, v := range vals {
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
err = errors.Wrap(err, jsonErr.Error())
continue
}
ms = append(ms, m)
}
return err
})
return
}
func (b *boltAdapter) FlushDisabledJobs() (err error) {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(_statusBucketKey))
c := bucket.Cursor()
func (b *kvDBAdapter) FlushDisabledJobs() (err error) {
var vals map[string][]byte
vals, err = b.db.GetAll(_statusBucketKey)
if err != nil {
return
}
for k, v := range vals {
var m MirrorStatus
for k, v := c.First(); k != nil; k, v = c.Next() {
jsonErr := json.Unmarshal(v, &m)
if jsonErr != nil {
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
err = errors.Wrap(err, jsonErr.Error())
continue
}
if m.Status == Disabled || len(m.Name) == 0 {
err = c.Delete()
deleteErr := b.db.Delete(_statusBucketKey, k)
if deleteErr != nil {
err = errors.Wrap(err, deleteErr.Error())
}
}
}
return err
})
return
}
func (b *boltAdapter) Close() error {
func (b *kvDBAdapter) Close() error {
if b.db != nil {
return b.db.Close()
}

67
manager/db_badger.go 普通文件
查看文件

@@ -0,0 +1,67 @@
package manager
import (
"github.com/dgraph-io/badger/v2"
)
// implement kv interface backed by badger
type badgerAdapter struct {
db *badger.DB
}
func (b *badgerAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *badgerAdapter) Get(bucket string, key string) (v []byte, err error) {
b.db.View(func(tx *badger.Txn) error {
var item *badger.Item
item, err = tx.Get([]byte(bucket + key))
if item != nil {
v, err = item.ValueCopy(nil)
}
return nil
})
return
}
func (b *badgerAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
b.db.View(func(tx *badger.Txn) error {
it := tx.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
prefix := []byte(bucket)
m = make(map[string][]byte)
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
k := string(item.Key())
actualKey := k[len(bucket):]
var v []byte
v, err = item.ValueCopy(nil)
m[actualKey] = v
}
return nil
})
return
}
func (b *badgerAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Set([]byte(bucket+key), value)
return err
})
return err
}
func (b *badgerAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *badger.Txn) error {
err := tx.Delete([]byte(bucket + key))
return err
})
return err
}
func (b *badgerAdapter) Close() error {
return b.db.Close()
}

66
manager/db_bolt.go 普通文件
查看文件

@@ -0,0 +1,66 @@
package manager
import (
"fmt"
"github.com/boltdb/bolt"
)
// implement kv interface backed by boltdb
type boltAdapter struct {
db *bolt.DB
}
func (b *boltAdapter) InitBucket(bucket string) (err error) {
return b.db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists([]byte(bucket))
if err != nil {
return fmt.Errorf("create bucket %s error: %s", _workerBucketKey, err.Error())
}
return nil
})
}
func (b *boltAdapter) Get(bucket string, key string) (v []byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
v = bucket.Get([]byte(key))
return nil
})
return
}
func (b *boltAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
c := bucket.Cursor()
m = make(map[string][]byte)
for k, v := c.First(); k != nil; k, v = c.Next() {
m[string(k)] = v
}
return nil
})
return
}
func (b *boltAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Put([]byte(key), value)
return err
})
return err
}
func (b *boltAdapter) Delete(bucket string, key string) error {
err := b.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucket))
err := bucket.Delete([]byte(key))
return err
})
return err
}
func (b *boltAdapter) Close() error {
return b.db.Close()
}

51
manager/db_leveldb.go 普通文件
查看文件

@@ -0,0 +1,51 @@
package manager
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
)
// implement kv interface backed by leveldb
type leveldbAdapter struct {
db *leveldb.DB
}
func (b *leveldbAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *leveldbAdapter) Get(bucket string, key string) (v []byte, err error) {
v, err = b.db.Get([]byte(bucket+key), nil)
return
}
func (b *leveldbAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
it := b.db.NewIterator(util.BytesPrefix([]byte(bucket)), nil)
defer it.Release()
m = make(map[string][]byte)
for it.Next() {
k := string(it.Key())
actualKey := k[len(bucket):]
// it.Value() changes on next iteration
val := it.Value()
v := make([]byte, len(val))
copy(v, val)
m[actualKey] = v
}
return
}
func (b *leveldbAdapter) Put(bucket string, key string, value []byte) error {
err := b.db.Put([]byte(bucket+key), []byte(value), nil)
return err
}
func (b *leveldbAdapter) Delete(bucket string, key string) error {
err := b.db.Delete([]byte(bucket+key), nil)
return err
}
func (b *leveldbAdapter) Close() error {
return b.db.Close()
}

54
manager/db_redis.go 普通文件
查看文件

@@ -0,0 +1,54 @@
package manager
import (
"context"
"github.com/go-redis/redis/v8"
)
// implement kv interface backed by redis
type redisAdapter struct {
db *redis.Client
}
var ctx = context.Background()
func (b *redisAdapter) InitBucket(bucket string) (err error) {
// no-op
return
}
func (b *redisAdapter) Get(bucket string, key string) (v []byte, err error) {
var val string
val, err = b.db.HGet(ctx, bucket, key).Result()
if err == nil {
v = []byte(val)
}
return
}
func (b *redisAdapter) GetAll(bucket string) (m map[string][]byte, err error) {
var val map[string]string
val, err = b.db.HGetAll(ctx, bucket).Result()
if err == nil && val != nil {
m = make(map[string][]byte)
for k, v := range val {
m[k] = []byte(v)
}
}
return
}
func (b *redisAdapter) Put(bucket string, key string, value []byte) error {
_, err := b.db.HSet(ctx, bucket, key, string(value)).Result()
return err
}
func (b *redisAdapter) Delete(bucket string, key string) error {
_, err := b.db.HDel(ctx, bucket, key).Result()
return err
}
func (b *redisAdapter) Close() error {
return b.db.Close()
}

查看文件

@@ -2,17 +2,167 @@ package manager
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/alicebob/miniredis"
. "github.com/smartystreets/goconvey/convey"
. "github.com/tuna/tunasync/internal"
)
func TestBoltAdapter(t *testing.T) {
func SortMirrorStatus(status []MirrorStatus) {
sort.Slice(status, func(l, r int) bool {
return status[l].Name < status[r].Name
})
}
func DBAdapterTest(db dbAdapter) {
var err error
testWorkerIDs := []string{"test_worker1", "test_worker2"}
Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
LastRegister: time.Now(),
}
w, err = db.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("get existent worker", func() {
_, err := db.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
})
Convey("list existent workers", func() {
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
Convey("get non-existent worker", func() {
_, err := db.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
})
Convey("delete existent worker", func() {
err := db.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = db.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := db.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := db.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
Convey("update mirror status", func() {
status := []MirrorStatus{
MirrorStatus{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastStarted: time.Now().Add(-time.Minute),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Minute),
LastStarted: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
SortMirrorStatus(status)
for _, s := range status {
_, err := db.UpdateMirrorStatus(s.Worker, s.Name, s)
So(err, ShouldBeNil)
}
Convey("get mirror status", func() {
m, err := db.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list mirror status", func() {
ms, err := db.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list all mirror status", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
SortMirrorStatus(ms)
expectedJSON, err := json.Marshal(status)
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("flush disabled jobs", func() {
ms, err := db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = db.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = db.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
})
return
}
func TestDBAdapter(t *testing.T) {
Convey("boltAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
@@ -28,135 +178,60 @@ func TestBoltAdapter(t *testing.T) {
So(err, ShouldBeNil)
}()
testWorkerIDs := []string{"test_worker1", "test_worker2"}
Convey("create worker", func() {
for _, id := range testWorkerIDs {
w := WorkerStatus{
ID: id,
Token: "token_" + id,
LastOnline: time.Now(),
}
w, err = boltDB.CreateWorker(w)
So(err, ShouldBeNil)
}
Convey("get existent worker", func() {
_, err := boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
DBAdapterTest(boltDB)
})
Convey("list existent workers", func() {
ws, err := boltDB.ListWorkers()
Convey("redisAdapter should work", t, func() {
mr, err := miniredis.Run()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
addr := fmt.Sprintf("redis://%s", mr.Addr())
redisDB, err := makeDBAdapter("redis", addr)
So(err, ShouldBeNil)
defer func() {
// close redisDB
err := redisDB.Close()
So(err, ShouldBeNil)
mr.Close()
}()
DBAdapterTest(redisDB)
})
Convey("get non-existent worker", func() {
_, err := boltDB.GetWorker("invalid workerID")
So(err, ShouldNotBeNil)
Convey("badgerAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
dbType, dbFile := "badger", filepath.Join(tmpDir, "badger.db")
badgerDB, err := makeDBAdapter(dbType, dbFile)
So(err, ShouldBeNil)
defer func() {
// close badgerDB
err := badgerDB.Close()
So(err, ShouldBeNil)
}()
DBAdapterTest(badgerDB)
})
Convey("delete existent worker", func() {
err := boltDB.DeleteWorker(testWorkerIDs[0])
So(err, ShouldBeNil)
_, err = boltDB.GetWorker(testWorkerIDs[0])
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 1)
})
Convey("delete non-existent worker", func() {
err := boltDB.DeleteWorker("invalid workerID")
So(err, ShouldNotBeNil)
ws, err := boltDB.ListWorkers()
So(err, ShouldBeNil)
So(len(ws), ShouldEqual, 2)
})
})
Convey("update mirror status", func() {
status := []MirrorStatus{
MirrorStatus{
Name: "arch-sync1",
Worker: testWorkerIDs[0],
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "3GB",
},
MirrorStatus{
Name: "arch-sync2",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Disabled,
LastUpdate: time.Now().Add(-time.Hour),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
MirrorStatus{
Name: "arch-sync3",
Worker: testWorkerIDs[1],
IsMaster: true,
Status: Success,
LastUpdate: time.Now().Add(-time.Second),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
},
}
for _, s := range status {
_, err := boltDB.UpdateMirrorStatus(s.Worker, s.Name, s)
Convey("leveldbAdapter should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
}
dbType, dbFile := "leveldb", filepath.Join(tmpDir, "leveldb.db")
leveldbDB, err := makeDBAdapter(dbType, dbFile)
So(err, ShouldBeNil)
Convey("get mirror status", func() {
m, err := boltDB.GetMirrorStatus(testWorkerIDs[0], status[0].Name)
defer func() {
// close leveldbDB
err := leveldbDB.Close()
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status[0])
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(m)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list mirror status", func() {
ms, err := boltDB.ListMirrorStatus(testWorkerIDs[0])
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal([]MirrorStatus{status[0]})
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("list all mirror status", func() {
ms, err := boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
expectedJSON, err := json.Marshal(status)
So(err, ShouldBeNil)
actualJSON, err := json.Marshal(ms)
So(err, ShouldBeNil)
So(string(actualJSON), ShouldEqual, string(expectedJSON))
})
Convey("flush disabled jobs", func() {
ms, err := boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 3)
err = boltDB.FlushDisabledJobs()
So(err, ShouldBeNil)
ms, err = boltDB.ListAllMirrorStatus()
So(err, ShouldBeNil)
So(len(ms), ShouldEqual, 2)
})
})
}()
DBAdapterTest(leveldbDB)
})
}

查看文件

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/gin-gonic/gin"
@@ -23,6 +24,7 @@ type Manager struct {
cfg *Config
engine *gin.Engine
adapter dbAdapter
rwmu sync.RWMutex
httpClient *http.Client
}
@@ -127,9 +129,11 @@ func (s *Manager) Run() {
}
}
// listAllJobs repond with all jobs of specified workers
// listAllJobs respond with all jobs of specified workers
func (s *Manager) listAllJobs(c *gin.Context) {
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListAllMirrorStatus()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list all mirror status: %s",
err.Error(),
@@ -150,7 +154,9 @@ func (s *Manager) listAllJobs(c *gin.Context) {
// flushDisabledJobs deletes all jobs that marks as deleted
func (s *Manager) flushDisabledJobs(c *gin.Context) {
s.rwmu.Lock()
err := s.adapter.FlushDisabledJobs()
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to flush disabled jobs: %s",
err.Error(),
@@ -165,7 +171,9 @@ func (s *Manager) flushDisabledJobs(c *gin.Context) {
// deleteWorker deletes one worker by id
func (s *Manager) deleteWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.Lock()
err := s.adapter.DeleteWorker(workerID)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to delete worker: %s",
err.Error(),
@@ -178,10 +186,12 @@ func (s *Manager) deleteWorker(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{_infoKey: "deleted"})
}
// listWrokers respond with informations of all the workers
// listWorkers respond with information of all the workers
func (s *Manager) listWorkers(c *gin.Context) {
var workerInfos []WorkerStatus
s.rwmu.RLock()
workers, err := s.adapter.ListWorkers()
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list workers: %s",
err.Error(),
@@ -194,7 +204,10 @@ func (s *Manager) listWorkers(c *gin.Context) {
workerInfos = append(workerInfos,
WorkerStatus{
ID: w.ID,
URL: w.URL,
Token: "REDACTED",
LastOnline: w.LastOnline,
LastRegister: w.LastRegister,
})
}
c.JSON(http.StatusOK, workerInfos)
@@ -205,6 +218,7 @@ func (s *Manager) registerWorker(c *gin.Context) {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
newWorker, err := s.adapter.CreateWorker(_worker)
if err != nil {
err := fmt.Errorf("failed to register worker: %s",
@@ -223,7 +237,9 @@ func (s *Manager) registerWorker(c *gin.Context) {
// listJobsOfWorker respond with all the jobs of the specified worker
func (s *Manager) listJobsOfWorker(c *gin.Context) {
workerID := c.Param("id")
s.rwmu.RLock()
mirrorStatusList, err := s.adapter.ListMirrorStatus(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("failed to list jobs of worker %s: %s",
workerID, err.Error(),
@@ -255,9 +271,12 @@ func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
)
}
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
fmt.Errorf("failed to get job %s of worker %s: %s",
logger.Errorf("failed to get job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
)
continue
@@ -269,7 +288,9 @@ func (s *Manager) updateSchedulesOfWorker(c *gin.Context) {
}
curStatus.Scheduled = schedule.NextSchedule
s.rwmu.Lock()
_, err = s.adapter.UpdateMirrorStatus(workerID, mirrorName, curStatus)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@@ -295,16 +316,26 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
)
}
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
curStatus, _ := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
curTime := time.Now()
if status.Status == PreSyncing && curStatus.Status != PreSyncing {
status.LastStarted = curTime
} else {
status.LastStarted = curStatus.LastStarted
}
// Only successful syncing needs last_update
if status.Status == Success {
status.LastUpdate = time.Now()
status.LastUpdate = curTime
} else {
status.LastUpdate = curStatus.LastUpdate
}
if status.Status == Success || status.Status == Failed {
status.LastEnded = time.Now()
status.LastEnded = curTime
} else {
status.LastEnded = curStatus.LastEnded
}
@@ -324,7 +355,9 @@ func (s *Manager) updateJobOfWorker(c *gin.Context) {
logger.Noticef("Job [%s] @<%s> %s", status.Name, status.Worker, status.Status)
}
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@@ -346,7 +379,10 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
c.BindJSON(&msg)
mirrorName := msg.Name
s.rwmu.RLock()
s.adapter.RefreshWorker(workerID)
status, err := s.adapter.GetMirrorStatus(workerID, mirrorName)
s.rwmu.RUnlock()
if err != nil {
logger.Errorf(
"Failed to get status of mirror %s @<%s>: %s",
@@ -363,7 +399,9 @@ func (s *Manager) updateMirrorSize(c *gin.Context) {
logger.Noticef("Mirror size of [%s] @<%s>: %s", status.Name, status.Worker, status.Size)
s.rwmu.Lock()
newStatus, err := s.adapter.UpdateMirrorStatus(workerID, mirrorName, status)
s.rwmu.Unlock()
if err != nil {
err := fmt.Errorf("failed to update job %s of worker %s: %s",
mirrorName, workerID, err.Error(),
@@ -386,7 +424,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
return
}
s.rwmu.RLock()
w, err := s.adapter.GetWorker(workerID)
s.rwmu.RUnlock()
if err != nil {
err := fmt.Errorf("worker %s is not registered yet", workerID)
s.returnErrJSON(c, http.StatusBadRequest, err)
@@ -403,7 +443,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
// update job status, even if the job did not disable successfully,
// this status should be set as disabled
s.rwmu.RLock()
curStat, _ := s.adapter.GetMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID)
s.rwmu.RUnlock()
changed := false
switch clientCmd.Cmd {
case CmdDisable:
@@ -414,7 +456,9 @@ func (s *Manager) handleClientCmd(c *gin.Context) {
changed = true
}
if changed {
s.rwmu.Lock()
s.adapter.UpdateMirrorStatus(clientCmd.WorkerID, clientCmd.MirrorID, curStat)
s.rwmu.Unlock()
}
logger.Noticef("Posting command '%s %s' to <%s>", clientCmd.Cmd, clientCmd.MirrorID, clientCmd.WorkerID)

查看文件

@@ -7,6 +7,7 @@ import (
"math/rand"
"net/http"
"strings"
"sync/atomic"
"testing"
"time"
@@ -64,6 +65,34 @@ func TestHTTPServer(t *testing.T) {
So(msg[_errorKey], ShouldEqual, fmt.Sprintf("failed to list jobs of worker %s: %s", _magicBadWorkerID, "database fail"))
})
Convey("when register multiple workers", func(ctx C) {
N := 10
var cnt uint32
for i := 0; i < N; i++ {
go func(id int) {
w := WorkerStatus{
ID: fmt.Sprintf("worker%d", id),
}
resp, err := PostJSON(baseURL+"/workers", w, nil)
ctx.So(err, ShouldBeNil)
ctx.So(resp.StatusCode, ShouldEqual, http.StatusOK)
atomic.AddUint32(&cnt, 1)
}(i)
}
time.Sleep(2 * time.Second)
So(cnt, ShouldEqual, N)
Convey("list all workers", func(ctx C) {
resp, err := http.Get(baseURL + "/workers")
So(err, ShouldBeNil)
defer resp.Body.Close()
var actualResponseObj []WorkerStatus
err = json.NewDecoder(resp.Body).Decode(&actualResponseObj)
So(err, ShouldBeNil)
So(len(actualResponseObj), ShouldEqual, N+1)
})
})
Convey("when register a worker", func(ctx C) {
w := WorkerStatus{
ID: "test_worker1",
@@ -151,10 +180,41 @@ func TestHTTPServer(t *testing.T) {
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(m.LastStarted.IsZero(), ShouldBeTrue) // hasn't been initialized yet
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
// start syncing
status.Status = PreSyncing
time.Sleep(1 * time.Second)
resp, err = PostJSON(fmt.Sprintf("%s/workers/%s/jobs/%s", baseURL, status.Worker, status.Name), status, nil)
So(err, ShouldBeNil)
defer resp.Body.Close()
So(resp.StatusCode, ShouldEqual, http.StatusOK)
Convey("update mirror status to PreSync - starting sync", func(ctx C) {
var ms []MirrorStatus
resp, err := GetJSON(baseURL+"/workers/test_worker1/jobs", &ms, nil)
So(err, ShouldBeNil)
So(resp.StatusCode, ShouldEqual, http.StatusOK)
// err = json.NewDecoder(resp.Body).Decode(&mirrorStatusList)
m := ms[0]
So(m.Name, ShouldEqual, status.Name)
So(m.Worker, ShouldEqual, status.Worker)
So(m.Status, ShouldEqual, status.Status)
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 1*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeGreaterThan, 1*time.Second)
})
Convey("list all job status of all workers", func(ctx C) {
var ms []WebMirrorStatus
resp, err := GetJSON(baseURL+"/jobs", &ms, nil)
@@ -167,8 +227,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastEnded.Time), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastUpdate.Time), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted.Time), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded.Time), ShouldBeLessThan, 3*time.Second)
})
@@ -197,8 +258,9 @@ func TestHTTPServer(t *testing.T) {
So(m.Upstream, ShouldEqual, status.Upstream)
So(m.Size, ShouldEqual, "5GB")
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
So(time.Now().Sub(m.LastUpdate), ShouldBeLessThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeLessThan, 2*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 3*time.Second)
})
})
@@ -251,6 +313,7 @@ func TestHTTPServer(t *testing.T) {
So(m.Size, ShouldEqual, status.Size)
So(m.IsMaster, ShouldEqual, status.IsMaster)
So(time.Now().Sub(m.LastUpdate), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastStarted), ShouldBeGreaterThan, 3*time.Second)
So(time.Now().Sub(m.LastEnded), ShouldBeLessThan, 1*time.Second)
})
})
@@ -263,6 +326,7 @@ func TestHTTPServer(t *testing.T) {
IsMaster: true,
Status: Success,
LastUpdate: time.Now(),
LastStarted: time.Now(),
LastEnded: time.Now(),
Upstream: "mirrors.tuna.tsinghua.edu.cn",
Size: "4GB",
@@ -398,6 +462,15 @@ func (b *mockDBAdapter) CreateWorker(w WorkerStatus) (WorkerStatus, error) {
return w, nil
}
func (b *mockDBAdapter) RefreshWorker(workerID string) (w WorkerStatus, err error) {
w, err = b.GetWorker(workerID)
if err == nil {
w.LastOnline = time.Now()
w, err = b.CreateWorker(w)
}
return w, err
}
func (b *mockDBAdapter) GetMirrorStatus(workerID, mirrorID string) (MirrorStatus, error) {
id := mirrorID + "/" + workerID
status, ok := b.statusStore[id]

查看文件

@@ -16,6 +16,7 @@ type baseProvider struct {
name string
interval time.Duration
retry int
timeout time.Duration
isMaster bool
cmd *cmdJob
@@ -56,6 +57,10 @@ func (p *baseProvider) Retry() int {
return p.retry
}
func (p *baseProvider) Timeout() time.Duration {
return p.timeout
}
func (p *baseProvider) IsMaster() bool {
return p.isMaster
}
@@ -142,7 +147,7 @@ func (p *baseProvider) closeLogFile() (err error) {
return
}
func (p *baseProvider) Run() error {
func (p *baseProvider) Run(started chan empty) error {
panic("Not Implemented")
}
@@ -169,6 +174,7 @@ func (p *baseProvider) Terminate() error {
defer p.Unlock()
logger.Debugf("terminating provider: %s", p.Name())
if !p.IsRunning() {
logger.Warningf("Terminate() called while IsRunning is false: %s", p.Name())
return nil
}

查看文件

@@ -1,3 +1,5 @@
// +build linux
package worker
import (

查看文件

@@ -0,0 +1,30 @@
// +build !linux
package worker
type btrfsSnapshotHook struct {
}
func newBtrfsSnapshotHook(provider mirrorProvider, snapshotPath string, mirror mirrorConfig) *btrfsSnapshotHook {
return &btrfsSnapshotHook{}
}
func (h *btrfsSnapshotHook) postExec() error {
return nil
}
func (h *btrfsSnapshotHook) postFail() error {
return nil
}
func (h *btrfsSnapshotHook) postSuccess() error {
return nil
}
func (h *btrfsSnapshotHook) preExec() error {
return nil
}
func (h *btrfsSnapshotHook) preJob() error {
return nil
}

查看文件

@@ -1,64 +1,296 @@
package worker
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"golang.org/x/sys/unix"
"github.com/codeskyblue/go-sh"
"github.com/moby/moby/pkg/reexec"
cgv1 "github.com/containerd/cgroups"
cgv2 "github.com/containerd/cgroups/v2"
contspecs "github.com/opencontainers/runtime-spec/specs-go"
)
type cgroupHook struct {
emptyHook
basePath string
baseGroup string
created bool
subsystem string
memLimit string
cgCfg cgroupConfig
memLimit MemBytes
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
}
func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
if basePath == "" {
basePath = "/sys/fs/cgroup"
type execCmd string
const (
cmdCont execCmd = "cont"
cmdAbrt execCmd = "abrt"
)
func init () {
reexec.Register("tunasync-exec", waitExec)
}
func waitExec () {
binary, err := exec.LookPath(os.Args[1])
if err != nil {
panic(err)
}
pipe := os.NewFile(3, "pipe")
if pipe != nil {
if _, err := pipe.Stat(); err == nil {
cmdBytes, err := ioutil.ReadAll(pipe)
if err != nil {
panic(err)
}
if err := pipe.Close(); err != nil {
}
cmd := execCmd(string(cmdBytes))
switch cmd {
case cmdAbrt:
fallthrough
default:
panic("Exited on request")
case cmdCont:
}
}
}
args := os.Args[1:]
env := os.Environ()
if err := syscall.Exec(binary, args, env); err != nil {
panic(err)
}
panic("Exec failed.")
}
func initCgroup(cfg *cgroupConfig) (error) {
logger.Debugf("Initializing cgroup")
baseGroup := cfg.Group
//subsystem := cfg.Subsystem
// If baseGroup is empty, it implies using the cgroup of the current process
// otherwise, it refers to a absolute group path
if baseGroup != "" {
baseGroup = filepath.Join("/", baseGroup)
}
cfg.isUnified = cgv1.Mode() == cgv1.Unified
if cfg.isUnified {
logger.Debugf("Cgroup V2 detected")
g := baseGroup
if g == "" {
logger.Debugf("Detecting my cgroup path")
var err error
if g, err = cgv2.NestedGroupPath(""); err != nil {
return err
}
}
logger.Infof("Using cgroup path: %s", g)
var err error
if cfg.cgMgrV2, err = cgv2.LoadManager("/sys/fs/cgroup", g); err != nil {
return err
}
if baseGroup == "" {
baseGroup = "tunasync"
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV2.NewChild("__worker", nil);
if err != nil {
return err
}
if subsystem == "" {
subsystem = "cpu"
for {
logger.Debugf("Reading pids")
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) == 0 {
break
}
for _, p := range(procs) {
if err := wkrMgr.AddProc(p); err != nil{
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV2.NewChild("__test", nil);
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
procs, err := cfg.cgMgrV2.Procs(false)
if err != nil {
logger.Errorf("Cannot read pids in that group")
return err
}
if len(procs) != 0 {
return fmt.Errorf("There are remaining processes in cgroup %s", baseGroup)
}
}
} else {
logger.Debugf("Cgroup V1 detected")
var pather cgv1.Path
if baseGroup != "" {
pather = cgv1.StaticPath(baseGroup)
} else {
pather = (func(p cgv1.Path) (cgv1.Path){
return func(subsys cgv1.Name) (string, error){
path, err := p(subsys);
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
}
logger.Infof("Loading cgroup")
var err error
if cfg.cgMgrV1, err = cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{
cfg.InitCheck = cgv1.AllowAny
return nil
}); err != nil {
return err
}
logger.Debugf("Available subsystems:")
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Debugf("%s: %s", subsys.Name(), p)
}
if baseGroup == "" {
logger.Debugf("Creating a sub group and move all processes into it")
wkrMgr, err := cfg.cgMgrV1.New("__worker", &contspecs.LinuxResources{});
if err != nil {
return err
}
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
for {
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) == 0 {
break
}
for _, proc := range(procs) {
if err := wkrMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
return err
}
}
}
}
}
} else {
logger.Debugf("Trying to create a sub group in that group")
testMgr, err := cfg.cgMgrV1.New("__test", &contspecs.LinuxResources{});
if err != nil {
logger.Errorf("Cannot create a sub group in the cgroup")
return err
}
if err := testMgr.Delete(); err != nil {
return err
}
for _, subsys := range(cfg.cgMgrV1.Subsystems()) {
logger.Debugf("Reading pids for subsystem %s", subsys.Name())
procs, err := cfg.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
p, err := pather(subsys.Name())
if err != nil {
return err
}
logger.Errorf("Cannot read pids in group %s of subsystem %s", p, subsys.Name())
return err
}
if len(procs) != 0 {
p, err := pather(subsys.Name())
if err != nil {
return err
}
return fmt.Errorf("There are remaining processes in cgroup %s of subsystem %s", p, subsys.Name())
}
}
}
}
return nil
}
func newCgroupHook(p mirrorProvider, cfg cgroupConfig, memLimit MemBytes) *cgroupHook {
return &cgroupHook{
emptyHook: emptyHook{
provider: p,
},
basePath: basePath,
baseGroup: baseGroup,
subsystem: subsystem,
cgCfg: cfg,
memLimit: memLimit,
}
}
func (c *cgroupHook) preExec() error {
c.created = true
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
if c.cgCfg.isUnified {
logger.Debugf("Creating v2 cgroup for task %s", c.provider.Name())
var resSet *cgv2.Resources
if c.memLimit != 0 {
resSet = &cgv2.Resources {
Memory: &cgv2.Memory{
Max: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
}
}
subMgr, err := c.cgCfg.cgMgrV2.NewChild(c.provider.Name(), resSet)
if err != nil {
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
if c.subsystem != "memory" {
return nil
c.cgMgrV2 = subMgr
} else {
logger.Debugf("Creating v1 cgroup for task %s", c.provider.Name())
var resSet contspecs.LinuxResources
if c.memLimit != 0 {
resSet = contspecs.LinuxResources {
Memory: &contspecs.LinuxMemory{
Limit: func(i int64) *int64 { return &i }(c.memLimit.Value()),
},
}
if c.memLimit != "" {
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
return sh.Command(
"cgset", "-r",
fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
gname,
).Run()
}
subMgr, err := c.cgCfg.cgMgrV1.New(c.provider.Name(), &resSet)
if err != nil {
logger.Errorf("Failed to create cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = subMgr
}
return nil
}
@@ -69,36 +301,59 @@ func (c *cgroupHook) postExec() error {
logger.Errorf("Error killing tasks: %s", err.Error())
}
c.created = false
return sh.Command("cgdelete", c.Cgroup()).Run()
}
func (c *cgroupHook) Cgroup() string {
name := c.provider.Name()
return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
if c.cgCfg.isUnified {
logger.Debugf("Deleting v2 cgroup for task %s", c.provider.Name())
if err := c.cgMgrV2.Delete(); err != nil {
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV2 = nil
} else {
logger.Debugf("Deleting v1 cgroup for task %s", c.provider.Name())
if err := c.cgMgrV1.Delete(); err != nil {
logger.Errorf("Failed to delete cgroup for task %s: %s", c.provider.Name(), err.Error())
return err
}
c.cgMgrV1 = nil
}
return nil
}
func (c *cgroupHook) killAll() error {
if !c.created {
if c.cgCfg.isUnified {
if c.cgMgrV2 == nil {
return nil
}
name := c.provider.Name()
} else {
if c.cgMgrV1 == nil {
return nil
}
}
readTaskList := func() ([]int, error) {
taskList := []int{}
taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
if err != nil {
return taskList, err
if c.cgCfg.isUnified {
procs, err := c.cgMgrV2.Procs(false)
if (err != nil) {
return []int{}, err
}
defer taskFile.Close()
scanner := bufio.NewScanner(taskFile)
for scanner.Scan() {
pid, err := strconv.Atoi(scanner.Text())
if err != nil {
return taskList, err
for _, proc := range procs {
taskList = append(taskList, int(proc))
}
} else {
taskSet := make(map[int]struct{})
for _, subsys := range(c.cgMgrV1.Subsystems()) {
procs, err := c.cgMgrV1.Processes(subsys.Name(), false)
if err != nil {
return []int{}, err
}
for _, proc := range(procs) {
taskSet[proc.Pid] = struct{}{}
}
}
for proc := range(taskSet) {
taskList = append(taskList, proc)
}
taskList = append(taskList, pid)
}
return taskList, nil
}

查看文件

@@ -3,17 +3,101 @@ package worker
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"errors"
"syscall"
cgv1 "github.com/containerd/cgroups"
cgv2 "github.com/containerd/cgroups/v2"
units "github.com/docker/go-units"
"github.com/moby/moby/pkg/reexec"
. "github.com/smartystreets/goconvey/convey"
)
func init() {
_, testReexec := os.LookupEnv("TESTREEXEC")
if ! testReexec {
reexec.Init()
}
}
func TestReexec(t *testing.T) {
testCase, testReexec := os.LookupEnv("TESTREEXEC")
if ! testReexec {
return
}
for len(os.Args) > 1 {
thisArg := os.Args[1]
os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
if thisArg == "--" {
break
}
}
switch testCase {
case "1":
Convey("Reexec should panic when command not found", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, exec.ErrNotFound)
})
case "2":
Convey("Reexec should run when fd 3 is not open", t, func(ctx C){
So((func() error{
pipe := os.NewFile(3, "pipe")
if pipe == nil {
return errors.New("pipe is nil")
} else {
_, err := pipe.Stat()
return err
}
})(), ShouldNotBeNil)
So(func(){
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "3":
Convey("Reexec should fail when fd 3 is sent with abrt cmd", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, "Exited on request")
})
case "4":
Convey("Reexec should run when fd 3 is sent with cont cmd", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldPanicWith, syscall.ENOEXEC)
})
case "5":
Convey("Reexec should not be triggered when argv[0] is not reexec", t, func(ctx C){
So(func(){
reexec.Init()
}, ShouldNotPanic)
})
}
}
func TestCgroup(t *testing.T) {
Convey("Cgroup Should Work", t, func(ctx C) {
var cgcf *cgroupConfig
Convey("init cgroup", t, func(ctx C){
_, useCurrentCgroup := os.LookupEnv("USECURCGROUP")
cgcf = &cgroupConfig{BasePath: "/sys/fs/cgroup", Group: "tunasync", Subsystem: "cpu"}
if useCurrentCgroup {
cgcf.Group = ""
}
err := initCgroup(cgcf)
So(err, ShouldBeNil)
if cgcf.isUnified {
So(cgcf.cgMgrV2, ShouldNotBeNil)
} else {
So(cgcf.cgMgrV1, ShouldNotBeNil)
}
Convey("Cgroup Should Work", func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@@ -72,18 +156,14 @@ sleep 30
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
cg := newCgroupHook(provider, *cgcf, 0)
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
}
So(err, ShouldBeNil)
go func() {
err = provider.Run()
err := provider.Run(make(chan empty, 1))
ctx.So(err, ShouldNotBeNil)
}()
@@ -111,7 +191,7 @@ sleep 30
})
Convey("Rsync Memory Should Be Limited", t, func() {
Convey("Rsync Memory Should Be Limited", func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
@@ -132,19 +212,112 @@ sleep 30
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
cg := newCgroupHook(provider, *cgcf, 512 * units.MiB)
provider.AddHook(cg)
err = cg.preExec()
if err != nil {
logger.Errorf("Failed to create cgroup")
return
So(err, ShouldBeNil)
if cgcf.isUnified {
cgpath := filepath.Join(cgcf.BasePath, cgcf.Group, provider.Name())
if useCurrentCgroup {
group, err := cgv2.NestedGroupPath(filepath.Join("..", provider.Name()))
So(err, ShouldBeNil)
cgpath = filepath.Join(cgcf.BasePath, group)
}
if cg.subsystem == "memory" {
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
memoLimit, err := ioutil.ReadFile(filepath.Join(cgpath, "memory.max"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
} else {
for _, subsys := range(cg.cgMgrV1.Subsystems()) {
if subsys.Name() == cgv1.Memory {
cgpath := filepath.Join(cgcf.Group, provider.Name())
if useCurrentCgroup {
p, err := cgv1.NestedPath(filepath.Join("..", provider.Name()))(cgv1.Memory)
So(err, ShouldBeNil)
cgpath = p
}
memoLimit, err := ioutil.ReadFile(filepath.Join(cgcf.BasePath, "memory", cgpath, "memory.limit_in_bytes"))
So(err, ShouldBeNil)
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
}
}
}
cg.postExec()
So(cg.cgMgrV1, ShouldBeNil)
})
Reset(func() {
if cgcf.isUnified {
if cgcf.Group == "" {
wkrg, err := cgv2.NestedGroupPath("");
So(err, ShouldBeNil)
wkrMgr, err := cgv2.LoadManager("/sys/fs/cgroup", wkrg);
allCtrls, err := wkrMgr.Controllers()
So(err, ShouldBeNil)
err = wkrMgr.ToggleControllers(allCtrls, cgv2.Disable)
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV2
for {
logger.Debugf("Restoring pids")
procs, err := wkrMgr.Procs(false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, p := range(procs) {
if err := origMgr.AddProc(p); err != nil{
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
} else {
if cgcf.Group == "" {
pather := (func(p cgv1.Path) (cgv1.Path){
return func(subsys cgv1.Name) (string, error){
path, err := p(subsys);
if err != nil {
return "", err
}
if path == "/" {
return "", cgv1.ErrControllerNotActive
}
return path, err
}
})(cgv1.NestedPath(""))
wkrMgr, err := cgv1.Load(cgv1.V1, pather, func(cfg *cgv1.InitConfig) error{
cfg.InitCheck = cgv1.AllowAny
return nil
})
So(err, ShouldBeNil)
origMgr := cgcf.cgMgrV1
for _, subsys := range(wkrMgr.Subsystems()){
for {
procs, err := wkrMgr.Processes(subsys.Name(), false)
So(err, ShouldBeNil)
if len(procs) == 0 {
break
}
for _, proc := range(procs) {
if err := origMgr.Add(proc); err != nil {
if errors.Is(err, syscall.ESRCH) {
logger.Debugf("Write pid %d to sub group failed: process vanished, ignoring")
} else {
So(err, ShouldBeNil)
}
}
}
}
}
err = wkrMgr.Delete()
So(err, ShouldBeNil)
}
}
})
})
}

查看文件

@@ -16,6 +16,7 @@ type cmdConfig struct {
workingDir, logDir, logFile string
interval time.Duration
retry int
timeout time.Duration
env map[string]string
failOnMatch string
sizePattern string
@@ -41,6 +42,7 @@ func newCmdProvider(c cmdConfig) (*cmdProvider, error) {
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
cmdConfig: c,
}
@@ -86,12 +88,13 @@ func (p *cmdProvider) DataSize() string {
return p.dataSize
}
func (p *cmdProvider) Run() error {
func (p *cmdProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil {
return err
}
started <- empty{}
if err := p.Wait(); err != nil {
return err
}
@@ -139,5 +142,6 @@ func (p *cmdProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

查看文件

@@ -1,6 +1,6 @@
package worker
// put global viables and types here
// put global variables and types here
import (
"gopkg.in/op/go-logging.v1"

查看文件

@@ -7,6 +7,9 @@ import (
"github.com/BurntSushi/toml"
"github.com/imdario/mergo"
units "github.com/docker/go-units"
cgv1 "github.com/containerd/cgroups"
cgv2 "github.com/containerd/cgroups/v2"
)
type providerEnum uint8
@@ -53,6 +56,7 @@ type globalConfig struct {
Concurrent int `toml:"concurrent"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
ExecOnSuccess []string `toml:"exec_on_success"`
ExecOnFailure []string `toml:"exec_on_failure"`
@@ -86,6 +90,9 @@ type cgroupConfig struct {
BasePath string `toml:"base_path"`
Group string `toml:"group"`
Subsystem string `toml:"subsystem"`
isUnified bool
cgMgrV1 cgv1.Cgroup
cgMgrV2 *cgv2.Manager
}
type dockerConfig struct {
@@ -112,12 +119,39 @@ type includedMirrorConfig struct {
Mirrors []mirrorConfig `toml:"mirrors"`
}
type MemBytes int64
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalText(s []byte) error {
val, err := units.RAMInBytes(string(s))
*m = MemBytes(val)
return err
}
type mirrorConfig struct {
Name string `toml:"name"`
Provider providerEnum `toml:"provider"`
Upstream string `toml:"upstream"`
Interval int `toml:"interval"`
Retry int `toml:"retry"`
Timeout int `toml:"timeout"`
MirrorDir string `toml:"mirror_dir"`
MirrorSubDir string `toml:"mirror_subdir"`
LogDir string `toml:"log_dir"`
@@ -140,11 +174,13 @@ type mirrorConfig struct {
ExcludeFile string `toml:"exclude_file"`
Username string `toml:"username"`
Password string `toml:"password"`
RsyncNoTimeo bool `toml:"rsync_no_timeout"`
RsyncTimeout int `toml:"rsync_timeout"`
RsyncOptions []string `toml:"rsync_options"`
RsyncOverride []string `toml:"rsync_override"`
Stage1Profile string `toml:"stage1_profile"`
MemoryLimit string `toml:"memory_limit"`
MemoryLimit MemBytes `toml:"memory_limit"`
DockerImage string `toml:"docker_image"`
DockerVolumes []string `toml:"docker_volumes"`

查看文件

@@ -6,6 +6,8 @@ import (
"os"
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
. "github.com/smartystreets/goconvey/convey"
)
@@ -19,6 +21,7 @@ mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
retry = 3
timeout = 86400
[manager]
api_base = "https://127.0.0.1:5000"
@@ -37,6 +40,7 @@ provider = "command"
upstream = "https://aosp.google.com/"
interval = 720
retry = 2
timeout = 3600
mirror_dir = "/data/git/AOSP"
exec_on_success = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@@ -50,12 +54,15 @@ provider = "two-stage-rsync"
stage1_profile = "debian"
upstream = "rsync://ftp.debian.org/debian/"
use_ipv6 = true
memory_limit = "256MiB"
[[mirrors]]
name = "fedora"
provider = "rsync"
upstream = "rsync://ftp.fedoraproject.org/fedora/"
use_ipv6 = true
memory_limit = "128M"
exclude_file = "/etc/tunasync.d/fedora-exclude.txt"
exec_on_failure = [
"bash -c 'echo ${TUNASYNC_JOB_EXIT_STATUS} > ${TUNASYNC_WORKING_DIR}/exit_status'"
@@ -119,6 +126,7 @@ use_ipv6 = true
So(cfg.Global.Name, ShouldEqual, "test_worker")
So(cfg.Global.Interval, ShouldEqual, 240)
So(cfg.Global.Retry, ShouldEqual, 3)
So(cfg.Global.Timeout, ShouldEqual, 86400)
So(cfg.Global.MirrorDir, ShouldEqual, "/data/mirrors")
So(cfg.Manager.APIBase, ShouldEqual, "https://127.0.0.1:5000")
@@ -130,23 +138,27 @@ use_ipv6 = true
So(m.Provider, ShouldEqual, provCommand)
So(m.Interval, ShouldEqual, 720)
So(m.Retry, ShouldEqual, 2)
So(m.Timeout, ShouldEqual, 3600)
So(m.Env["REPO"], ShouldEqual, "/usr/local/bin/aosp-repo")
m = cfg.Mirrors[1]
So(m.Name, ShouldEqual, "debian")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 256 * units.MiB)
m = cfg.Mirrors[2]
So(m.Name, ShouldEqual, "fedora")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provRsync)
So(m.ExcludeFile, ShouldEqual, "/etc/tunasync.d/fedora-exclude.txt")
So(m.MemoryLimit.Value(), ShouldEqual, 128 * units.MiB)
m = cfg.Mirrors[3]
So(m.Name, ShouldEqual, "debian-cd")
So(m.MirrorDir, ShouldEqual, "")
So(m.Provider, ShouldEqual, provTwoStageRsync)
So(m.MemoryLimit.Value(), ShouldEqual, 0)
m = cfg.Mirrors[4]
So(m.Name, ShouldEqual, "debian-security")
@@ -316,6 +328,7 @@ log_dir = "/var/log/tunasync/{{.Name}}"
mirror_dir = "/data/mirrors"
concurrent = 10
interval = 240
timeout = 86400
retry = 3
[manager]
@@ -388,5 +401,6 @@ use_ipv6 = true
rp, ok := p.(*rsyncProvider)
So(ok, ShouldBeTrue)
So(rp.WorkingDir(), ShouldEqual, "/data/mirrors/debian-cd")
So(p.Timeout(), ShouldEqual, 86400*time.Second)
})
}

查看文件

@@ -3,6 +3,9 @@ package worker
import (
"fmt"
"os"
"time"
"github.com/codeskyblue/go-sh"
)
type dockerHook struct {
@@ -10,12 +13,17 @@ type dockerHook struct {
image string
volumes []string
options []string
memoryLimit MemBytes
}
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
volumes := []string{}
volumes = append(volumes, gCfg.Volumes...)
volumes = append(volumes, mCfg.DockerVolumes...)
if len(mCfg.ExcludeFile) > 0 {
arg := fmt.Sprintf("%s:%s:ro", mCfg.ExcludeFile, mCfg.ExcludeFile)
volumes = append(volumes, arg)
}
options := []string{}
options = append(options, gCfg.Options...)
@@ -28,6 +36,7 @@ func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dock
image: mCfg.DockerImage,
volumes: volumes,
options: options,
memoryLimit: mCfg.MemoryLimit,
}
}
@@ -60,6 +69,27 @@ func (d *dockerHook) postExec() error {
// sh.Command(
// "docker", "rm", "-f", d.Name(),
// ).Run()
name := d.Name()
retry := 10
for ; retry > 0; retry-- {
out, err := sh.Command(
"docker", "ps", "-a",
"--filter", "name=^"+name+"$",
"--format", "{{.Status}}",
).Output()
if err != nil {
logger.Errorf("docker ps failed: %v", err)
break
}
if len(out) == 0 {
break
}
logger.Debugf("container %s still exists: '%s'", name, string(out))
time.Sleep(1 * time.Second)
}
if retry == 0 {
logger.Warningf("container %s not removed automatically, next sync may fail", name)
}
d.provider.ExitContext()
return nil
}

查看文件

@@ -8,6 +8,7 @@ import (
"path/filepath"
"testing"
"time"
units "github.com/docker/go-units"
"github.com/codeskyblue/go-sh"
. "github.com/smartystreets/goconvey/convey"
@@ -77,6 +78,7 @@ sleep 20
volumes: []string{
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
},
memoryLimit: 512 * units.MiB,
}
provider.AddHook(d)
So(provider.Docker(), ShouldNotBeNil)
@@ -87,29 +89,34 @@ sleep 20
cmdRun("docker", []string{"images"})
exitedErr := make(chan error, 1)
go func() {
err = provider.Run()
err = provider.Run(make(chan empty, 1))
logger.Debugf("provider.Run() exited")
if err != nil {
logger.Errorf("provider.Run() failed: %v", err)
}
exitedErr <- err
}()
cmdRun("ps", []string{"aux"})
// Wait for docker running
time.Sleep(8 * time.Second)
cmdRun("ps", []string{"aux"})
for wait := 0; wait < 8; wait++ {
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
if names != "" {
break
}
time.Sleep(1 * time.Second)
}
// cmdRun("ps", []string{"aux"})
// assert container running
names, err := getDockerByName(d.Name())
So(err, ShouldBeNil)
// So(names, ShouldEqual, d.Name()+"\n")
So(names, ShouldEqual, d.Name()+"\n")
err = provider.Terminate()
// So(err, ShouldBeNil)
So(err, ShouldBeNil)
cmdRun("ps", []string{"aux"})
// cmdRun("ps", []string{"aux"})
<-exitedErr
// container should be terminated and removed

查看文件

@@ -155,24 +155,43 @@ func (m *mirrorJob) Run(managerChan chan<- jobMessage, semaphore chan empty) err
var syncErr error
syncDone := make(chan error, 1)
started := make(chan empty, 10) // we may receive "started" more than one time (e.g. two_stage_rsync)
go func() {
err := provider.Run()
err := provider.Run(started)
syncDone <- err
}()
select { // Wait until provider started or error happened
case err := <-syncDone:
logger.Errorf("failed to start provider %s: %s", m.Name(), err.Error())
syncDone <- err // it will be read again later
case <-started:
logger.Debug("provider started")
}
// Now terminating the provider is feasible
var termErr error
timeout := provider.Timeout()
if timeout <= 0 {
timeout = 100000 * time.Hour // never time out
}
select {
case syncErr = <-syncDone:
logger.Debug("syncing done")
case <-time.After(timeout):
logger.Notice("provider timeout")
termErr = provider.Terminate()
syncErr = fmt.Errorf("%s timeout after %v", m.Name(), timeout)
case <-kill:
logger.Debug("received kill")
stopASAP = true
err := provider.Terminate()
if err != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), err.Error())
return err
}
termErr = provider.Terminate()
syncErr = errors.New("killed by manager")
}
if termErr != nil {
logger.Errorf("failed to terminate provider %s: %s", m.Name(), termErr.Error())
return termErr
}
// post-exec hooks
herr := runHooks(rHooks, func(h jobHook) error { return h.postExec() }, "post-exec")

查看文件

@@ -31,6 +31,7 @@ func TestMirrorJob(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
interval: 1 * time.Second,
timeout: 7 * time.Second,
}
provider, err := newCmdProvider(c)
@@ -41,6 +42,7 @@ func TestMirrorJob(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("For a normal mirror job", func(ctx C) {
scriptContent := `#!/bin/bash
@@ -333,6 +335,66 @@ echo $TUNASYNC_WORKING_DIR
})
})
Convey("When a job timed out", func(ctx C) {
scriptContent := `#!/bin/bash
echo $TUNASYNC_WORKING_DIR
sleep 10
echo $TUNASYNC_WORKING_DIR
`
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
managerChan := make(chan jobMessage, 10)
semaphore := make(chan empty, 1)
job := newMirrorJob(provider)
Convey("It should be automatically terminated", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
expectedOutput := fmt.Sprintf("%s\n", provider.WorkingDir())
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, expectedOutput)
job.ctrlChan <- jobDisable
<-job.disabled
})
Convey("It should be retried", func(ctx C) {
go job.Run(managerChan, semaphore)
job.ctrlChan <- jobStart
time.Sleep(1 * time.Second)
msg := <-managerChan
So(msg.status, ShouldEqual, PreSyncing)
for i := 0; i < defaultMaxRetry; i++ {
msg = <-managerChan
So(msg.status, ShouldEqual, Syncing)
job.ctrlChan <- jobStart // should be ignored
msg = <-managerChan
So(msg.status, ShouldEqual, Failed)
So(msg.msg, ShouldContainSubstring, "timeout after")
// re-schedule after last try
So(msg.schedule, ShouldEqual, i == defaultMaxRetry-1)
}
job.ctrlChan <- jobDisable
<-job.disabled
})
})
})
}

查看文件

@@ -24,9 +24,9 @@ type mirrorProvider interface {
Type() providerEnum
// run mirror job in background
Run() error
// run mirror job in background
// Start then Wait
Run(started chan empty) error
// Start the job
Start() error
// Wait job to finish
Wait() error
@@ -46,6 +46,7 @@ type mirrorProvider interface {
Interval() time.Duration
Retry() int
Timeout() time.Duration
WorkingDir() string
LogDir() string
@@ -91,6 +92,9 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
if mirror.Retry == 0 {
mirror.Retry = cfg.Global.Retry
}
if mirror.Timeout == 0 {
mirror.Timeout = cfg.Global.Timeout
}
logDir = formatLogDir(logDir, mirror)
// IsMaster
@@ -118,6 +122,7 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
logFile: filepath.Join(logDir, "latest.log"),
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
env: mirror.Env,
}
p, err := newCmdProvider(pc)
@@ -135,6 +140,8 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
overriddenOptions: mirror.RsyncOverride,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
@@ -144,6 +151,7 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
}
p, err := newRsyncProvider(rc)
if err != nil {
@@ -161,13 +169,17 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
password: mirror.Password,
excludeFile: mirror.ExcludeFile,
extraOptions: mirror.RsyncOptions,
rsyncNeverTimeout: mirror.RsyncNoTimeo,
rsyncTimeoutValue: mirror.RsyncTimeout,
rsyncEnv: mirror.Env,
workingDir: mirrorDir,
logDir: logDir,
logFile: filepath.Join(logDir, "latest.log"),
useIPv6: mirror.UseIPv6,
useIPv4: mirror.UseIPv4,
interval: time.Duration(mirror.Interval) * time.Minute,
retry: mirror.Retry,
timeout: time.Duration(mirror.Timeout) * time.Second,
}
p, err := newTwoStageRsyncProvider(rc)
if err != nil {
@@ -200,8 +212,7 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
// Add Cgroup Hook
provider.AddHook(
newCgroupHook(
provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
provider, cfg.Cgroup, mirror.MemoryLimit,
),
)
}

查看文件

@@ -28,6 +28,7 @@ func TestRsyncProvider(t *testing.T) {
logDir: tmpDir,
logFile: tmpFile,
useIPv6: true,
timeout: 100 * time.Second,
interval: 600 * time.Second,
}
@@ -40,6 +41,7 @@ func TestRsyncProvider(t *testing.T) {
So(provider.LogDir(), ShouldEqual, c.logDir)
So(provider.LogFile(), ShouldEqual, c.logFile)
So(provider.Interval(), ShouldEqual, c.interval)
So(provider.Timeout(), ShouldEqual, c.timeout)
Convey("When entering a context (auto exit)", func() {
func() {
@@ -89,14 +91,14 @@ exit 0
"Done\n",
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 -6 %s %s",
provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
@@ -127,7 +129,7 @@ exit 0
provider, err := newRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
@@ -153,6 +155,7 @@ func TestRsyncProviderWithAuthentication(t *testing.T) {
password: "tunasyncpassword",
workingDir: tmpDir,
extraOptions: []string{"--delete-excluded"},
rsyncTimeoutValue: 30,
rsyncEnv: map[string]string{"RSYNC_PROXY": proxyAddr},
logDir: tmpDir,
logFile: tmpFile,
@@ -187,15 +190,15 @@ exit 0
"Done\n",
targetDir,
fmt.Sprintf(
"%s %s %s -aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"%s %s %s -aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 -4 --delete-excluded %s %s",
"--timeout=30 -4 --delete-excluded %s %s",
provider.username, provider.password, proxyAddr,
provider.upstreamURL, provider.WorkingDir(),
),
)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
@@ -219,6 +222,7 @@ func TestRsyncProviderWithOverriddenOptions(t *testing.T) {
upstreamURL: "rsync://rsync.tuna.moe/tuna/",
rsyncCmd: scriptFile,
workingDir: tmpDir,
rsyncNeverTimeout: true,
overriddenOptions: []string{"-aHvh", "--no-o", "--no-g", "--stats"},
extraOptions: []string{"--delete-excluded"},
logDir: tmpDir,
@@ -257,7 +261,7 @@ exit 0
provider.WorkingDir(),
)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
@@ -268,6 +272,78 @@ exit 0
})
}
func TestRsyncProviderWithDocker(t *testing.T) {
Convey("Rsync in Docker should work", t, func() {
tmpDir, err := ioutil.TempDir("", "tunasync")
defer os.RemoveAll(tmpDir)
So(err, ShouldBeNil)
scriptFile := filepath.Join(tmpDir, "myrsync")
excludeFile := filepath.Join(tmpDir, "exclude.txt")
g := &Config{
Global: globalConfig{
Retry: 2,
},
Docker: dockerConfig{
Enable: true,
Volumes: []string{
scriptFile + ":/bin/myrsync",
"/etc/gai.conf:/etc/gai.conf:ro",
},
},
}
c := mirrorConfig{
Name: "tuna",
Provider: provRsync,
Upstream: "rsync://rsync.tuna.moe/tuna/",
Command: "/bin/myrsync",
ExcludeFile: excludeFile,
DockerImage: "alpine:3.8",
LogDir: tmpDir,
MirrorDir: tmpDir,
UseIPv6: true,
Timeout: 100,
Interval: 600,
}
provider := newMirrorProvider(c, g)
So(provider.Type(), ShouldEqual, provRsync)
So(provider.Name(), ShouldEqual, c.Name)
So(provider.WorkingDir(), ShouldEqual, c.MirrorDir)
So(provider.LogDir(), ShouldEqual, c.LogDir)
cmdScriptContent := `#!/bin/sh
#echo "$@"
while [[ $# -gt 0 ]]; do
if [[ "$1" = "--exclude-from" ]]; then
cat "$2"
shift
fi
shift
done
`
err = ioutil.WriteFile(scriptFile, []byte(cmdScriptContent), 0755)
So(err, ShouldBeNil)
err = ioutil.WriteFile(excludeFile, []byte("__some_pattern"), 0755)
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.preExec()
So(err, ShouldBeNil)
}
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
for _, hook := range provider.Hooks() {
err = hook.postExec()
So(err, ShouldBeNil)
}
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)
So(string(loggedContent), ShouldEqual, "__some_pattern")
})
}
func TestCmdProvider(t *testing.T) {
Convey("Command Provider should work", t, func(ctx C) {
tmpDir, err := ioutil.TempDir("", "tunasync")
@@ -321,7 +397,7 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
@@ -337,7 +413,7 @@ echo $AOSP_REPO_BIN
So(err, ShouldBeNil)
So(readedScriptContent, ShouldResemble, []byte(scriptContent))
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
@@ -349,11 +425,14 @@ sleep 10
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 1)
go func() {
err = provider.Run()
err := provider.Run(started)
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
@@ -389,7 +468,7 @@ sleep 10
Convey("Run the command", func() {
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
@@ -417,7 +496,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
@@ -427,7 +506,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
})
@@ -437,7 +516,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
})
@@ -446,7 +525,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldNotBeEmpty)
_, err = strconv.ParseFloat(provider.DataSize(), 32)
@@ -458,7 +537,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
@@ -469,7 +548,7 @@ sleep 10
provider, err := newCmdProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 1))
So(err, ShouldNotBeNil)
So(provider.DataSize(), ShouldBeEmpty)
})
@@ -494,6 +573,7 @@ func TestTwoStageRsyncProvider(t *testing.T) {
logFile: tmpFile,
useIPv6: true,
excludeFile: tmpFile,
rsyncTimeoutValue: 30,
extraOptions: []string{"--delete-excluded", "--cache"},
username: "hello",
password: "world",
@@ -520,7 +600,7 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 2))
So(err, ShouldBeNil)
targetDir, _ := filepath.EvalSymlinks(provider.WorkingDir())
@@ -533,16 +613,16 @@ exit 0
"Done\n",
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --exclude dists/ -6 "+
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --exclude=*.diff/Index --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --include=i18n/by-hash --exclude=i18n/* --exclude=ls-lR* --timeout=30 -6 "+
"--exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
targetDir,
fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ "+
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ "+
"--delete --delete-after --delay-updates --safe-links "+
"--timeout=120 --delete-excluded --cache -6 --exclude-from %s %s %s",
"--delete-excluded --cache --timeout=30 -6 --exclude-from %s %s %s",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
),
)
@@ -562,18 +642,21 @@ exit 0
err = ioutil.WriteFile(scriptFile, []byte(scriptContent), 0755)
So(err, ShouldBeNil)
started := make(chan empty, 2)
go func() {
err = provider.Run()
err := provider.Run(started)
ctx.So(err, ShouldNotBeNil)
}()
<-started
So(provider.IsRunning(), ShouldBeTrue)
time.Sleep(1 * time.Second)
err = provider.Terminate()
So(err, ShouldBeNil)
expectedOutput := fmt.Sprintf(
"-aHvh --no-o --no-g --stats --exclude .~tmp~/ --safe-links "+
"--timeout=120 --exclude dists/ -6 "+
"-aHvh --no-o --no-g --stats --filter risk .~tmp~/ --exclude .~tmp~/ --safe-links "+
"--include=*.diff/ --exclude=*.diff/Index --exclude=Packages* --exclude=Sources* --exclude=Release* --exclude=InRelease --include=i18n/by-hash --exclude=i18n/* --exclude=ls-lR* --timeout=30 -6 "+
"--exclude-from %s %s %s\n",
provider.excludeFile, provider.upstreamURL, provider.WorkingDir(),
)
@@ -606,7 +689,7 @@ exit 0
provider, err := newTwoStageRsyncProvider(c)
So(err, ShouldBeNil)
err = provider.Run()
err = provider.Run(make(chan empty, 2))
So(err, ShouldNotBeNil)
loggedContent, err := ioutil.ReadFile(provider.LogFile())
So(err, ShouldBeNil)

查看文件

@@ -2,6 +2,7 @@ package worker
import (
"errors"
"fmt"
"strings"
"time"
@@ -14,11 +15,14 @@ type rsyncConfig struct {
upstreamURL, username, password, excludeFile string
extraOptions []string
overriddenOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6, useIPv4 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
@@ -43,6 +47,7 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
rsyncConfig: c,
}
@@ -62,14 +67,22 @@ func newRsyncProvider(c rsyncConfig) (*rsyncProvider, error) {
options := []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--filter" , "risk .~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120",
"--safe-links",
}
if c.overriddenOptions != nil {
options = c.overriddenOptions
}
if !c.rsyncNeverTimeout {
timeo := 120
if c.rsyncTimeoutValue > 0 {
timeo = c.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if c.useIPv6 {
options = append(options, "-6")
} else if c.useIPv4 {
@@ -103,12 +116,13 @@ func (p *rsyncProvider) DataSize() string {
return p.dataSize
}
func (p *rsyncProvider) Run() error {
func (p *rsyncProvider) Run(started chan empty) error {
p.dataSize = ""
defer p.closeLogFile()
if err := p.Start(); err != nil {
return err
}
started <- empty{}
if err := p.Wait(); err != nil {
code, msg := internal.TranslateRsyncErrorCode(err)
if code != 0 {
@@ -144,5 +158,6 @@ func (p *rsyncProvider) Start() error {
return err
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
return nil
}

查看文件

@@ -12,6 +12,8 @@ import (
"github.com/codeskyblue/go-sh"
"golang.org/x/sys/unix"
"github.com/moby/moby/pkg/reexec"
cgv1 "github.com/containerd/cgroups"
)
// runner is to run os commands giving command line, env and log file
@@ -56,6 +58,10 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
kv := fmt.Sprintf("%s=%s", k, v)
args = append(args, "-e", kv)
}
// set memlimit
if d.memoryLimit != 0 {
args = append(args, "-m", fmt.Sprint(d.memoryLimit.Value()))
}
// apply options
args = append(args, d.options...)
// apply image and command
@@ -66,10 +72,7 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
cmd = exec.Command(c, args...)
} else if provider.Cgroup() != nil {
c := "cgexec"
args := []string{"-g", provider.Cgroup().Cgroup()}
args = append(args, cmdAndArgs...)
cmd = exec.Command(c, args...)
cmd = reexec.Command(append([]string{"tunasync-exec"}, cmdAndArgs...)...)
} else {
if len(cmdAndArgs) == 1 {
@@ -104,9 +107,59 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
}
func (c *cmdJob) Start() error {
cg := c.provider.Cgroup()
var (
pipeR *os.File
pipeW *os.File
)
if cg != nil {
logger.Debugf("Preparing cgroup sync pipes for job %s", c.provider.Name())
var err error
pipeR, pipeW, err = os.Pipe();
if err != nil {
return err
}
c.cmd.ExtraFiles = []*os.File{pipeR}
defer pipeR.Close()
defer pipeW.Close()
}
logger.Debugf("Command start: %v", c.cmd.Args)
c.finished = make(chan empty, 1)
return c.cmd.Start()
if err := c.cmd.Start(); err != nil {
return err
}
if cg != nil {
if err := pipeR.Close(); err != nil {
return err
}
if c.cmd == nil || c.cmd.Process == nil {
return errProcessNotStarted
}
pid := c.cmd.Process.Pid
if cg.cgCfg.isUnified {
if err := cg.cgMgrV2.AddProc(uint64(pid)); err != nil{
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
} else {
if err := cg.cgMgrV1.Add(cgv1.Process{Pid: pid}); err != nil{
if errors.Is(err, syscall.ESRCH) {
logger.Infof("Write pid %d to cgroup failed: process vanished, ignoring")
} else {
return err
}
}
}
if _, err := pipeW.WriteString(string(cmdCont)); err != nil {
return err
}
}
return nil
}
func (c *cmdJob) Wait() error {
@@ -149,10 +202,10 @@ func (c *cmdJob) Terminate() error {
select {
case <-time.After(2 * time.Second):
unix.Kill(c.cmd.Process.Pid, syscall.SIGKILL)
return errors.New("SIGTERM failed to kill the job")
logger.Warningf("SIGTERM failed to kill the job in 2s. SIGKILL sent")
case <-c.finished:
return nil
}
return nil
}
// Copied from go-sh

查看文件

@@ -15,11 +15,14 @@ type twoStageRsyncConfig struct {
stage1Profile string
upstreamURL, username, password, excludeFile string
extraOptions []string
rsyncNeverTimeout bool
rsyncTimeoutValue int
rsyncEnv map[string]string
workingDir, logDir, logFile string
useIPv6 bool
useIPv6, useIPv4 bool
interval time.Duration
retry int
timeout time.Duration
}
// An RsyncProvider provides the implementation to rsync-based syncing jobs
@@ -31,11 +34,12 @@ type twoStageRsyncProvider struct {
dataSize string
}
// ref: https://salsa.debian.org/mirror-team/archvsync/-/blob/master/bin/ftpsync#L431
var rsyncStage1Profiles = map[string]([]string){
"debian": []string{"dists/"},
"debian": []string{"--include=*.diff/", "--exclude=*.diff/Index", "--exclude=Packages*", "--exclude=Sources*", "--exclude=Release*", "--exclude=InRelease", "--include=i18n/by-hash", "--exclude=i18n/*", "--exclude=ls-lR*"},
"debian-oldstyle": []string{
"Packages*", "Sources*", "Release*",
"InRelease", "i18n/*", "ls-lR*", "dep11/*",
"--exclude=Packages*", "--exclude=Sources*", "--exclude=Release*",
"--exclude=InRelease", "--exclude=i18n/*", "--exclude=ls-lR*", "--exclude=dep11/*",
},
}
@@ -54,18 +58,19 @@ func newTwoStageRsyncProvider(c twoStageRsyncConfig) (*twoStageRsyncProvider, er
ctx: NewContext(),
interval: c.interval,
retry: c.retry,
timeout: c.timeout,
},
twoStageRsyncConfig: c,
stage1Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--safe-links", "--timeout=120",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
"--safe-links",
},
stage2Options: []string{
"-aHvh", "--no-o", "--no-g", "--stats",
"--exclude", ".~tmp~/",
"--filter", "risk .~tmp~/", "--exclude", ".~tmp~/",
"--delete", "--delete-after", "--delay-updates",
"--safe-links", "--timeout=120",
"--safe-links",
},
}
@@ -105,12 +110,12 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
var options []string
if stage == 1 {
options = append(options, p.stage1Options...)
stage1Excludes, ok := rsyncStage1Profiles[p.stage1Profile]
stage1Profile, ok := rsyncStage1Profiles[p.stage1Profile]
if !ok {
return nil, errors.New("Invalid Stage 1 Profile")
}
for _, exc := range stage1Excludes {
options = append(options, "--exclude", exc)
for _, exc := range stage1Profile {
options = append(options, exc)
}
} else if stage == 2 {
@@ -122,8 +127,18 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return []string{}, fmt.Errorf("Invalid stage: %d", stage)
}
if !p.rsyncNeverTimeout {
timeo := 120
if p.rsyncTimeoutValue > 0 {
timeo = p.rsyncTimeoutValue
}
options = append(options, fmt.Sprintf("--timeout=%d", timeo))
}
if p.useIPv6 {
options = append(options, "-6")
} else if p.useIPv4 {
options = append(options, "-4")
}
if p.excludeFile != "" {
@@ -133,7 +148,7 @@ func (p *twoStageRsyncProvider) Options(stage int) ([]string, error) {
return options, nil
}
func (p *twoStageRsyncProvider) Run() error {
func (p *twoStageRsyncProvider) Run(started chan empty) error {
p.Lock()
defer p.Unlock()
@@ -163,6 +178,7 @@ func (p *twoStageRsyncProvider) Run() error {
}
p.isRunning.Store(true)
logger.Debugf("set isRunning to true: %s", p.Name())
started <- empty{}
p.Unlock()
err = p.Wait()

查看文件

@@ -54,6 +54,12 @@ func NewTUNASyncWorker(cfg *Config) *Worker {
w.httpClient = httpClient
}
if cfg.Cgroup.Enable {
if err := initCgroup(&cfg.Cgroup); err != nil {
logger.Errorf("Error initializing Cgroup: %s", err.Error())
return nil
}
}
w.initJobs()
w.makeHTTPServer()
return w
@@ -61,7 +67,7 @@ func NewTUNASyncWorker(cfg *Config) *Worker {
// Run runs worker forever
func (w *Worker) Run() {
w.registorWorker()
w.registerWorker()
go w.runHTTPServer()
w.runSchedule()
}
@@ -393,7 +399,7 @@ func (w *Worker) URL() string {
return fmt.Sprintf("%s://%s:%d/", proto, w.cfg.Server.Hostname, w.cfg.Server.Port)
}
func (w *Worker) registorWorker() {
func (w *Worker) registerWorker() {
msg := WorkerStatus{
ID: w.Name(),
URL: w.URL(),
@@ -402,8 +408,17 @@ func (w *Worker) registorWorker() {
for _, root := range w.cfg.Manager.APIBaseList() {
url := fmt.Sprintf("%s/workers", root)
logger.Debugf("register on manager url: %s", url)
for retry := 10; retry > 0; {
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
logger.Errorf("Failed to register worker")
retry--
if retry > 0 {
time.Sleep(1 * time.Second)
logger.Noticef("Retrying... (%d)", retry)
}
} else {
break
}
}
}
}

查看文件

@@ -25,6 +25,7 @@ func makeMockManagerServer(recvData chan interface{}) *gin.Engine {
var _worker WorkerStatus
c.BindJSON(&_worker)
_worker.LastOnline = time.Now()
_worker.LastRegister = time.Now()
recvData <- _worker
c.JSON(http.StatusOK, _worker)
})