镜像自地址
https://github.com/tuna/tunasync.git
已同步 2025-12-06 06:26:46 +00:00
比较提交
18 次代码提交
travis-upl
...
v0.2.0a2
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
9ac3193d50 | ||
|
|
9ffb101cc7 | ||
|
|
fd277388d5 | ||
|
|
c5cba66786 | ||
|
|
97e9725774 | ||
|
|
54740388b3 | ||
|
|
7601e5793f | ||
|
|
9645fd44ec | ||
|
|
ebd462be36 | ||
|
|
21c832c8fb | ||
|
|
81a15e7dd1 | ||
|
|
3f31e83c14 | ||
|
|
a0b8ef08ab | ||
|
|
86153c59e3 | ||
|
|
96f9db8bb8 | ||
|
|
6dd06c954c | ||
|
|
03d22b7683 | ||
|
|
e9a7fc2de2 |
10
.travis.yml
10
.travis.yml
@@ -1,3 +1,5 @@
|
||||
sudo: required
|
||||
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
@@ -11,8 +13,14 @@ before_install:
|
||||
os:
|
||||
- linux
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
before_script:
|
||||
- sudo cgcreate -t travis -a travis -g memory:tunasync
|
||||
- lssubsys -am
|
||||
- sudo cgcreate -a $USER -t $USER -g cpu:tunasync
|
||||
- sudo cgcreate -a $USER -t $USER -g memory:tunasync
|
||||
- docker pull alpine
|
||||
|
||||
script:
|
||||
- ./.testandcover.bash
|
||||
|
||||
@@ -10,6 +10,9 @@ tunasync
|
||||
|
||||
- [中文文档](https://github.com/tuna/tunasync/blob/master/docs/zh_CN/get_started.md)
|
||||
|
||||
## Download
|
||||
|
||||
Pre-built binary for Linux x86_64 is available at [Github releases](https://github.com/tuna/tunasync/releases/latest).
|
||||
|
||||
## Design
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ func main() {
|
||||
app.Name = "tunasync"
|
||||
app.Usage = "tunasync mirror job management tool"
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
app.Version = tunasync.Version
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "manager",
|
||||
|
||||
@@ -99,8 +99,11 @@ func initialize(c *cli.Context) error {
|
||||
}
|
||||
|
||||
// parse base url of the manager server
|
||||
baseURL = fmt.Sprintf("https://%s:%d",
|
||||
cfg.ManagerAddr, cfg.ManagerPort)
|
||||
if cfg.CACert != "" {
|
||||
baseURL = fmt.Sprintf("https://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
|
||||
} else {
|
||||
baseURL = fmt.Sprintf("http://%s:%d", cfg.ManagerAddr, cfg.ManagerPort)
|
||||
}
|
||||
|
||||
logger.Infof("Use manager address: %s", baseURL)
|
||||
|
||||
@@ -322,7 +325,7 @@ func main() {
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Version = "0.1"
|
||||
app.Version = tunasync.Version
|
||||
app.Name = "tunasynctl"
|
||||
app.Usage = "control client for tunasync manager"
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ date: 2016-10-31 00:50:00
|
||||
|
||||
### 二进制包
|
||||
|
||||
TODO
|
||||
到 [Github Releases](https://github.com/tuna/tunasync/releases/latest) 下载 `tunasync-linux-bin.tar.gz` 即可。
|
||||
|
||||
### 自行编译
|
||||
|
||||
|
||||
3
internal/version.go
普通文件
3
internal/version.go
普通文件
@@ -0,0 +1,3 @@
|
||||
package internal
|
||||
|
||||
const Version string = "0.2-dev"
|
||||
@@ -182,7 +182,7 @@ func (b *boltAdapter) FlushDisabledJobs() (err error) {
|
||||
err = fmt.Errorf("%s; %s", err.Error(), jsonErr)
|
||||
continue
|
||||
}
|
||||
if m.Status == Disabled {
|
||||
if m.Status == Disabled || len(m.Name) == 0 {
|
||||
err = c.Delete()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
#!/bin/bash
|
||||
# requires: wget, lftp, jq
|
||||
#
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
CONDA_REPO_BASE=${CONDA_REPO_BASE:-"http://repo.continuum.io"}
|
||||
LOCAL_DIR_BASE="${TUNASYNC_WORKING_DIR}/pkgs"
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
CONDA_REPOS=("free" "r" "mro" "pro")
|
||||
CONDA_ARCHES=("linux-64" "linux-32" "linux-armv6l" "linux-armv7l" "linux-ppc64le" "osx-64" "osx-32" "win-64" "win-32")
|
||||
|
||||
function check-and-download() {
|
||||
remote_file=$1
|
||||
local_file=$2
|
||||
wget -q --spider ${remote_file}
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "downloading ${remote_file}"
|
||||
wget -q -N -O ${local_file} ${remote_file}
|
||||
return
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function cleanup () {
|
||||
echo "cleaning up"
|
||||
[ -d ${TMP_DIR} ] && {
|
||||
[ -f ${TMP_DIR}/repodata.json ] && rm ${TMP_DIR}/repodata.json
|
||||
[ -f ${TMP_DIR}/repodata.json.bz2 ] && rm ${TMP_DIR}/repodata.json.bz2
|
||||
rmdir ${TMP_DIR}
|
||||
}
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
echo ${TMP_DIR}
|
||||
|
||||
for repo in ${CONDA_REPOS[@]}; do
|
||||
for arch in ${CONDA_ARCHES[@]}; do
|
||||
PKG_REPO_BASE="${CONDA_REPO_BASE}/pkgs/$repo/$arch"
|
||||
repodata_url="${PKG_REPO_BASE}/repodata.json"
|
||||
bz2_repodata_url="${PKG_REPO_BASE}/repodata.json.bz2"
|
||||
LOCAL_DIR="${LOCAL_DIR_BASE}/$repo/$arch"
|
||||
[ ! -d ${LOCAL_DIR} ] && mkdir -p ${LOCAL_DIR}
|
||||
tmp_repodata="${TMP_DIR}/repodata.json"
|
||||
tmp_bz2_repodata="${TMP_DIR}/repodata.json.bz2"
|
||||
|
||||
check-and-download ${repodata_url} ${tmp_repodata}
|
||||
check-and-download ${bz2_repodata_url} ${tmp_bz2_repodata}
|
||||
|
||||
jq_cmd='.packages | to_entries[] | [.key, .value.size, .value.md5] | map(tostring) | join(" ")'
|
||||
bzip2 -c -d ${tmp_bz2_repodata} | jq -r "${jq_cmd}" | while read line;
|
||||
do
|
||||
read -a tokens <<< $line
|
||||
pkgfile=${tokens[0]}
|
||||
pkgsize=${tokens[1]}
|
||||
pkgmd5=${tokens[2]}
|
||||
|
||||
pkg_url="${PKG_REPO_BASE}/${pkgfile}"
|
||||
dest_file="${LOCAL_DIR}/${pkgfile}"
|
||||
|
||||
declare downloaded=false
|
||||
if [ -f ${dest_file} ]; then
|
||||
rsize=`stat -c "%s" ${dest_file}`
|
||||
if [ ${rsize} -eq ${pkgsize} ]; then
|
||||
downloaded=true
|
||||
echo "Skipping ${pkgfile}, size ${pkgsize}"
|
||||
fi
|
||||
fi
|
||||
while [ $downloaded != true ]; do
|
||||
echo "downloading ${pkg_url}"
|
||||
wget -q -O ${dest_file} ${pkg_url} && {
|
||||
# two space for md5sum check format
|
||||
echo "${pkgmd5} ${dest_file}" | md5sum -c - && downloaded=true
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
mv -f "${TMP_DIR}/repodata.json" "${LOCAL_DIR}/repodata.json"
|
||||
mv -f "${TMP_DIR}/repodata.json.bz2" "${LOCAL_DIR}/repodata.json.bz2"
|
||||
done
|
||||
done
|
||||
|
||||
function sync_installer() {
|
||||
repo_url="$1"
|
||||
repo_dir="$2"
|
||||
|
||||
[ ! -d "$repo_dir" ] && mkdir -p "$repo_dir"
|
||||
cd $repo_dir
|
||||
lftp "${repo_url}/" -e "mirror --verbose -P 5; bye"
|
||||
}
|
||||
|
||||
sync_installer "${CONDA_REPO_BASE}/archive/" "${TUNASYNC_WORKING_DIR}/archive/"
|
||||
sync_installer "${CONDA_REPO_BASE}/miniconda/" "${TUNASYNC_WORKING_DIR}/miniconda/"
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
REPO=${REPO:-"/usr/local/bin/repo"}
|
||||
|
||||
function repo_init() {
|
||||
mkdir -p $TUNASYNC_WORKING_DIR
|
||||
cd $TUNASYNC_WORKING_DIR
|
||||
$REPO init -u https://android.googlesource.com/mirror/manifest --mirror
|
||||
}
|
||||
|
||||
function repo_sync() {
|
||||
cd $TUNASYNC_WORKING_DIR
|
||||
$REPO sync -f
|
||||
}
|
||||
|
||||
if [ ! -d "$TUNASYNC_WORKING_DIR/git-repo.git" ]; then
|
||||
echo "Initializing AOSP mirror"
|
||||
repo_init
|
||||
fi
|
||||
|
||||
repo_sync
|
||||
@@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
# reqires: wget, yum-utils
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
_here=`dirname $(realpath $0)`
|
||||
. ${_here}/helpers/apt-download
|
||||
APT_VERSIONS=("debian-wheezy" "debian-jessie" "ubuntu-precise" "ubuntu-trusty" "ubuntu-xenial")
|
||||
|
||||
BASE_PATH="${TUNASYNC_WORKING_DIR}"
|
||||
APT_PATH="${BASE_PATH}/apt/repo"
|
||||
YUM_PATH="${BASE_PATH}/yum/repo"
|
||||
|
||||
mkdir -p ${APT_PATH} ${YUM_PATH}
|
||||
|
||||
wget -q -N -O ${BASE_PATH}/yum/gpg https://yum.dockerproject.org/gpg
|
||||
wget -q -N -O ${BASE_PATH}/apt/gpg https://apt.dockerproject.org/gpg
|
||||
|
||||
# YUM mirror
|
||||
cache_dir="/tmp/yum-docker-cache/"
|
||||
cfg="/tmp/docker-yum.conf"
|
||||
cat <<EOF > ${cfg}
|
||||
[main]
|
||||
keepcache=0
|
||||
|
||||
[centos6]
|
||||
name=Docker Repository
|
||||
baseurl=https://yum.dockerproject.org/repo/main/centos/6
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
gpgkey=https://yum.dockerproject.org/gpg
|
||||
sslverify=0
|
||||
|
||||
[centos7]
|
||||
name=Docker Repository
|
||||
baseurl=https://yum.dockerproject.org/repo/main/centos/7
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
gpgkey=https://yum.dockerproject.org/gpg
|
||||
sslverify=0
|
||||
EOF
|
||||
|
||||
[ ! -d ${YUM_PATH}/centos6 ] && mkdir -p ${YUM_PATH}/centos6
|
||||
[ ! -d ${YUM_PATH}/centos7 ] && mkdir -p ${YUM_PATH}/centos7
|
||||
reposync -c $cfg -d -p ${YUM_PATH} -e $cache_dir
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/centos6 ${YUM_PATH}/centos7
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/centos6 ${YUM_PATH}/centos7
|
||||
rm $cfg
|
||||
|
||||
# APT mirror
|
||||
base_url="http://apt.dockerproject.org/repo"
|
||||
for version in ${APT_VERSIONS[@]}; do
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${APT_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${APT_PATH}" || true
|
||||
done
|
||||
|
||||
# sync_docker "http://apt.dockerproject.org/" "${TUNASYNC_WORKING_DIR}/apt"
|
||||
# sync_docker "http://yum.dockerproject.org/" "${TUNASYNC_WORKING_DIR}/yum"
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
ARCH_EXCLUDE = ['armel', 'alpha', 'hurd-i386', 'ia64', 'kfreebsd-amd64', 'kfreebsd-i386', 'mips', 'powerpc', 'ppc64el', 's390', 's390x', 'sparc']
|
||||
|
||||
CONTENT_EXCLUDE = ['binary-{arch}', 'installer-{arch}', 'Contents-{arch}.gz', 'Contents-udeb-{arch}.gz', 'Contents-{arch}.diff', 'arch-{arch}.files', 'arch-{arch}.list.gz', '*_{arch}.deb', '*_{arch}.udeb', '*_{arch}.changes']
|
||||
|
||||
with open("debian-exclude.txt", 'wb') as f:
|
||||
f.write(".~tmp~/\n")
|
||||
f.write(".*\n")
|
||||
for arch in ARCH_EXCLUDE:
|
||||
for content in CONTENT_EXCLUDE:
|
||||
f.write(content.format(arch=arch))
|
||||
f.write('\n')
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
ARCH_EXCLUDE = ['armel', 'armhf']
|
||||
|
||||
CONTENT_EXCLUDE = ['binary-{arch}', 'installer-{arch}', 'Contents-{arch}.gz', 'Contents-udeb-{arch}.gz', 'Contents-{arch}.diff', 'arch-{arch}.files', 'arch-{arch}.list.gz', '*_{arch}.deb', '*_{arch}.udeb', '*_{arch}.changes']
|
||||
|
||||
with open("kali-exclude.txt", 'wb') as f:
|
||||
f.write(".~tmp~/\n")
|
||||
f.write(".*\n")
|
||||
for arch in ARCH_EXCLUDE:
|
||||
for content in CONTENT_EXCLUDE:
|
||||
f.write(content.format(arch=arch))
|
||||
f.write('\n')
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
ARCH_EXCLUDE = ['powerpc', 'ppc64el', 'ia64', 'sparc', 'armel']
|
||||
|
||||
CONTENT_EXCLUDE = ['binary-{arch}', 'installer-{arch}', 'Contents-{arch}.gz', 'Contents-udeb-{arch}.gz', 'Contents-{arch}.diff', 'arch-{arch}.files', 'arch-{arch}.list.gz', '*_{arch}.deb', '*_{arch}.udeb', '*_{arch}.changes']
|
||||
|
||||
with open("ubuntu-ports-exclude.txt", 'wb') as f:
|
||||
f.write(".~tmp~/\n")
|
||||
f.write(".*\n")
|
||||
for arch in ARCH_EXCLUDE:
|
||||
for content in CONTENT_EXCLUDE:
|
||||
f.write(content.format(arch=arch))
|
||||
f.write('\n')
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
_here=`dirname $(realpath $0)`
|
||||
. ${_here}/helpers/apt-download
|
||||
|
||||
[ -z "${LOADED_APT_DOWNLOAD}" ] && (echo "failed to load apt-download"; exit 1)
|
||||
|
||||
BASE_PATH="${TUNASYNC_WORKING_DIR}"
|
||||
|
||||
YUM_PATH="${BASE_PATH}/yum"
|
||||
|
||||
UBUNTU_VERSIONS=("trusty" "wily")
|
||||
DEBIAN_VERSIONS=("wheezy" "jessie" "stretch")
|
||||
UBUNTU_PATH="${BASE_PATH}/ubuntu/"
|
||||
DEBIAN_PATH="${BASE_PATH}/debian/"
|
||||
|
||||
mkdir -p $UBUNTU_PATH $DEBIAN_PATH $YUM_PATH
|
||||
|
||||
cache_dir="/tmp/yum-gitlab-ce-cache/"
|
||||
cfg="/tmp/gitlab-ce-yum.conf"
|
||||
cat <<EOF > ${cfg}
|
||||
[main]
|
||||
keepcache=0
|
||||
|
||||
[el6]
|
||||
name=el6
|
||||
baseurl=https://packages.gitlab.com/gitlab/gitlab-ce/el/6/x86_64
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
gpgkey=https://packages.gitlab.com/gpg.key
|
||||
sslverify=0
|
||||
|
||||
[el7]
|
||||
name=el7
|
||||
baseurl=https://packages.gitlab.com/gitlab/gitlab-ce/el/7/x86_64
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
gpgkey=https://packages.gitlab.com/gpg.key
|
||||
sslverify=0
|
||||
EOF
|
||||
|
||||
reposync -c $cfg -d -p ${YUM_PATH} -e $cache_dir
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el6 ${YUM_PATH}/el6
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el7 ${YUM_PATH}/el7
|
||||
rm $cfg
|
||||
|
||||
base_url="https://packages.gitlab.com/gitlab/gitlab-ce/ubuntu"
|
||||
for version in ${UBUNTU_VERSIONS[@]}; do
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${UBUNTU_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${UBUNTU_PATH}" || true
|
||||
done
|
||||
echo "Ubuntu finished"
|
||||
|
||||
base_url="https://packages.gitlab.com/gitlab/gitlab-ce/debian"
|
||||
for version in ${DEBIAN_VERSIONS[@]}; do
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${DEBIAN_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${DEBIAN_PATH}" || true
|
||||
done
|
||||
echo "Debian finished"
|
||||
|
||||
|
||||
# vim: ts=4 sts=4 sw=4
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/bin/bash
|
||||
# reqires: wget, yum-utils
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
_here=`dirname $(realpath $0)`
|
||||
. ${_here}/helpers/apt-download
|
||||
|
||||
[ -z "${LOADED_APT_DOWNLOAD}" ] && (echo "failed to load apt-download"; exit 1)
|
||||
|
||||
BASE_PATH="${TUNASYNC_WORKING_DIR}"
|
||||
|
||||
YUM_PATH="${BASE_PATH}/yum"
|
||||
|
||||
UBUNTU_VERSIONS=("trusty" "xenial")
|
||||
DEBIAN_VERSIONS=("wheezy" "jessie" "stretch")
|
||||
UBUNTU_PATH="${BASE_PATH}/ubuntu/"
|
||||
DEBIAN_PATH="${BASE_PATH}/debian/"
|
||||
|
||||
mkdir -p $UBUNTU_PATH $DEBIAN_PATH $YUM_PATH
|
||||
|
||||
cache_dir="/tmp/yum-gitlab-runner-cache/"
|
||||
cfg="/tmp/gitlab-runner-yum.conf"
|
||||
cat <<EOF > ${cfg}
|
||||
[main]
|
||||
keepcache=0
|
||||
|
||||
[el6]
|
||||
name=gitlab-ci-multi-runner-el6
|
||||
baseurl=https://packages.gitlab.com/runner/gitlab-ci-multi-runner/el/6/x86_64
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
gpgkey=https://packages.gitlab.com/gpg.key
|
||||
sslverify=0
|
||||
|
||||
[el7]
|
||||
name=gitlab-ci-multi-runner-el7
|
||||
baseurl=https://packages.gitlab.com/runner/gitlab-ci-multi-runner/el/7/x86_64
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
gpgkey=https://packages.gitlab.com/gpg.key
|
||||
sslverify=0
|
||||
EOF
|
||||
|
||||
reposync -c $cfg -d -p ${YUM_PATH} -e $cache_dir
|
||||
[ ! -d ${YUM_PATH}/el6 ] && mkdir -p ${YUM_PATH}/el6
|
||||
[ ! -d ${YUM_PATH}/el7 ] && mkdir -p ${YUM_PATH}/el7
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el6 ${YUM_PATH}/el6
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el7 ${YUM_PATH}/el7
|
||||
rm $cfg
|
||||
|
||||
base_url="https://packages.gitlab.com/runner/gitlab-ci-multi-runner/ubuntu"
|
||||
for version in ${UBUNTU_VERSIONS[@]}; do
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${UBUNTU_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${UBUNTU_PATH}" || true
|
||||
done
|
||||
echo "Ubuntu finished"
|
||||
|
||||
base_url="https://packages.gitlab.com/runner/gitlab-ci-multi-runner/debian"
|
||||
for version in ${DEBIAN_VERSIONS[@]}; do
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${DEBIAN_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${DEBIAN_PATH}" || true
|
||||
done
|
||||
echo "Debian finished"
|
||||
|
||||
|
||||
# vim: ts=4 sts=4 sw=4
|
||||
@@ -1,92 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
function remove_broken() {
|
||||
interval=$1
|
||||
interval_file="/tmp/hackage_lastcheck"
|
||||
now=`date +%s`
|
||||
|
||||
if [[ -f ${interval_file} ]]; then
|
||||
lastcheck=`cat ${interval_file}`
|
||||
between=$(echo "${now}-${lastcheck}" | bc)
|
||||
[[ $between -lt $interval ]] && echo "skip checking"; return 0
|
||||
fi
|
||||
echo "start checking"
|
||||
|
||||
mkdir -p "${TUNASYNC_WORKING_DIR}/package"
|
||||
cd "${TUNASYNC_WORKING_DIR}/package"
|
||||
|
||||
ls | while read line; do
|
||||
echo -n "$line\t\t"
|
||||
tar -tzf $line >/dev/null || (echo "FAIL"; rm $line) && echo "OK"
|
||||
done
|
||||
|
||||
echo `date +%s` > $interval_file
|
||||
}
|
||||
|
||||
function must_download() {
|
||||
src=$1
|
||||
dst=$2
|
||||
while true; do
|
||||
echo "downloading: $name"
|
||||
wget "$src" -O "$dst" &>/dev/null || true
|
||||
tar -tzf package/$name >/dev/null || rm package/$name && break
|
||||
done
|
||||
}
|
||||
|
||||
function hackage_mirror() {
|
||||
local_pklist="/tmp/hackage_local_pklist_$$.list"
|
||||
remote_pklist="/tmp/hackage_remote_pklist_$$.list"
|
||||
|
||||
cd ${TUNASYNC_WORKING_DIR}
|
||||
mkdir -p package
|
||||
|
||||
echo "Downloading index..."
|
||||
rm index.tar.gz || true
|
||||
axel http://hdiff.luite.com/packages/archive/index.tar.gz -o index.tar.gz > /dev/null
|
||||
|
||||
echo "building local package list"
|
||||
ls package | sed "s/\.tar\.gz$//" > $local_pklist
|
||||
echo "preferred-versions" >> $local_pklist # ignore preferred-versions
|
||||
|
||||
echo "building remote package list"
|
||||
tar ztf index.tar.gz | (cut -d/ -f 1,2 2>/dev/null) | sed 's|/|-|' > $remote_pklist
|
||||
|
||||
echo "building download list"
|
||||
# substract local list from remote list
|
||||
comm <(sort $remote_pklist) <(sort $local_pklist) -23 | while read pk; do
|
||||
# limit concurrent level
|
||||
bgcount=`jobs | wc -l`
|
||||
while [[ $bgcount -ge 5 ]]; do
|
||||
sleep 0.5
|
||||
bgcount=`jobs | wc -l`
|
||||
done
|
||||
|
||||
name="$pk.tar.gz"
|
||||
if [ ! -a package/$name ]; then
|
||||
must_download "http://hackage.haskell.org/package/$pk/$name" "package/$name" &
|
||||
else
|
||||
echo "skip existed: $name"
|
||||
fi
|
||||
done
|
||||
|
||||
# delete redundanty files
|
||||
comm <(sort $remote_pklist) <(sort $local_pklist) -13 | while read pk; do
|
||||
name="$pk.tar.gz"
|
||||
echo "deleting ${name}"
|
||||
rm "package/$name"
|
||||
done
|
||||
|
||||
cp index.tar.gz 00-index.tar.gz
|
||||
}
|
||||
|
||||
function cleanup () {
|
||||
echo "cleaning up"
|
||||
[[ ! -z $local_pklist ]] && (rm $local_pklist $remote_pklist ; true)
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
remove_broken 86400
|
||||
hackage_mirror
|
||||
|
||||
# vim: ts=4 sts=4 sw=4
|
||||
@@ -1,132 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
LOADED_APT_DOWNLOAD="yes"
|
||||
|
||||
function check-and-download() {
|
||||
remote_file=$1
|
||||
local_file=$2
|
||||
wget -q --spider ${remote_file}
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "downloading ${remote_file}"
|
||||
wget -q -N -O ${local_file} ${remote_file}
|
||||
return
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
function apt-download-binary() {
|
||||
base_url=$1
|
||||
dist=$2
|
||||
repo=$3
|
||||
arch=$4
|
||||
dest_base_dir=$5
|
||||
if [ -z $dest_base_dir ]; then
|
||||
echo "Destination directory is empty, cannot continue"
|
||||
return 1
|
||||
fi
|
||||
|
||||
dest_dir="${dest_base_dir}/dists/${dist}"
|
||||
[ ! -d "$dest_dir" ] && mkdir -p "$dest_dir"
|
||||
check-and-download "${base_url}/dists/${dist}/Contents-${arch}.gz" "${dest_dir}/Contents-${arch}.gz" || true
|
||||
check-and-download "${base_url}/dists/${dist}/InRelease" "${dest_dir}/InRelease" || true
|
||||
check-and-download "${base_url}/dists/${dist}/Release" "${dest_dir}/Release"
|
||||
check-and-download "${base_url}/dists/${dist}/Release.gpg" "${dest_dir}/Release.gpg" || true
|
||||
|
||||
# Load Package Index URLs from Release file
|
||||
release_file="${dest_dir}/Release"
|
||||
dest_dir="${dest_base_dir}/dists/${dist}/${repo}/binary-${arch}"
|
||||
[ ! -d "$dest_dir" ] && mkdir -p "$dest_dir"
|
||||
|
||||
declare pkgidx_content=""
|
||||
declare cnt_start=false
|
||||
declare -i checksum_len
|
||||
if (grep -e '^SHA256:$' ${release_file} &>/dev/null); then
|
||||
checksum_cmd="sha256sum"; checksum_regex="^SHA256:$"; checksum_len=64
|
||||
elif (grep -e '^SHA1:$' ${release_file} &>/dev/null); then
|
||||
checksum_cmd="sha1sum"; checksum_regex="^SHA1:$"; checksum_len=40
|
||||
elif (grep -e '^MD5Sum:$' ${release_file} &>/dev/null); then
|
||||
checksum_cmd="md5sum"; checksum_regex="^MD5sum:$"; checksum_len=32
|
||||
fi
|
||||
|
||||
while read line; do
|
||||
if [[ ${cnt_start} = true ]]; then
|
||||
read -a tokens <<< $line
|
||||
checksum=${tokens[0]}
|
||||
if [[ ${#checksum} != ${checksum_len} ]]; then
|
||||
break
|
||||
fi
|
||||
filesize=${tokens[1]}
|
||||
filename=${tokens[2]}
|
||||
if [[ "$filename" =~ ${repo}/binary-${arch} ]]; then
|
||||
# Load package list from Packages file
|
||||
pkgidx_file="${dest_base_dir}/dists/${dist}/${filename}"
|
||||
dest_dir=`dirname ${pkgidx_file}`
|
||||
[ ! -d "$dest_dir" ] && mkdir -p "$dest_dir"
|
||||
pkglist_url="${base_url}/dists/${dist}/${filename}"
|
||||
check-and-download "${pkglist_url}" ${pkgidx_file} || true
|
||||
echo "${checksum} ${pkgidx_file}" | ${checksum_cmd} -c -
|
||||
if [ -z "${pkgidx_content}" -a -f ${pkgidx_file} ]; then
|
||||
echo "getting packages index content"
|
||||
case $filename in
|
||||
"*.bz2")
|
||||
pkgidx_content=`bunzip2 -c ${pkgidx_file}`
|
||||
;;
|
||||
"*.gz")
|
||||
pkgidx_content=`gunzip -c ${pkgidx_file}`
|
||||
;;
|
||||
*)
|
||||
pkgidx_content=`cat ${pkgidx_file}`
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if [[ "$line" =~ ${checksum_regex} ]]; then
|
||||
cnt_start=true
|
||||
fi
|
||||
fi
|
||||
done < ${release_file}
|
||||
|
||||
if [ -z "${pkgidx_content}" ]; then
|
||||
echo "index is empty, failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Set checksum method
|
||||
if (echo -e "${pkgidx_content}" | grep -e '^SHA256' &>/dev/null); then
|
||||
checksum_cmd="sha256sum"; checksum_regex="^SHA256"
|
||||
elif (echo -e "${pkgidx_content}" | grep -e '^SHA1' &>/dev/null); then
|
||||
checksum_cmd="sha1sum"; checksum_regex="^SHA1"
|
||||
elif (echo -e "${pkgidx_content}" | grep -e '^MD5sum' &>/dev/null); then
|
||||
checksum_cmd="md5sum"; checksum_regex="^MD5sum"
|
||||
fi
|
||||
|
||||
# Download packages
|
||||
(echo -e "${pkgidx_content}" | grep -e '^Filename' -e '^Size' -e ${checksum_regex} | cut -d' ' -f 2) | \
|
||||
while read pkg_filename; read pkg_size; read pkg_checksum; do
|
||||
dest_filename="${dest_base_dir}/${pkg_filename}"
|
||||
dest_dir=`dirname ${dest_filename}`
|
||||
[ ! -d "$dest_dir" ] && mkdir -p "$dest_dir"
|
||||
pkg_url="${base_url}/${pkg_filename}"
|
||||
declare downloaded=false
|
||||
if [ -f ${dest_filename} ]; then
|
||||
rsize=`stat -c "%s" ${dest_filename}`
|
||||
if [ ${rsize} -eq ${pkg_size} ]; then
|
||||
downloaded=true
|
||||
echo "Skipping ${pkg_filename}, size ${pkg_size}"
|
||||
fi
|
||||
fi
|
||||
while [ $downloaded != true ]; do
|
||||
echo "downloading ${pkg_url}"
|
||||
wget -q -O ${dest_filename} ${pkg_url} && {
|
||||
echo "${pkg_checksum} ${dest_filename}" | ${checksum_cmd} -c - && downloaded=true # two space for md5sum/sha1sum/sha256sum check format
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
echo "Mirroring ${base_url} ${dist}, ${repo}, ${arch} done!"
|
||||
|
||||
}
|
||||
|
||||
# vim: ts=4 sts=4 sw=4
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ ! -d "$TUNASYNC_WORKING_DIR" ]; then
|
||||
echo "Directory not exists, fail"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function update_homebrew_git() {
|
||||
repo_dir="$1"
|
||||
cd $repo_dir
|
||||
echo "==== SYNC $repo_dir START ===="
|
||||
/usr/bin/timeout -s INT 3600 git remote -v update
|
||||
echo "==== SYNC $repo_dir DONE ===="
|
||||
}
|
||||
|
||||
update_homebrew_git "$TUNASYNC_WORKING_DIR/homebrew.git"
|
||||
update_homebrew_git "$TUNASYNC_WORKING_DIR/homebrew-python.git"
|
||||
update_homebrew_git "$TUNASYNC_WORKING_DIR/homebrew-science.git"
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ ! -d "$TUNASYNC_WORKING_DIR" ]; then
|
||||
echo "Directory not exists, fail"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function update_linux_git() {
|
||||
cd $TUNASYNC_WORKING_DIR
|
||||
/usr/bin/timeout -s INT 3600 git remote -v update
|
||||
}
|
||||
|
||||
update_linux_git
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function sync_lxc_images() {
|
||||
repo_url="$1"
|
||||
repo_dir="$2"
|
||||
|
||||
[ ! -d "$repo_dir" ] && mkdir -p "$repo_dir"
|
||||
cd $repo_dir
|
||||
|
||||
# lftp "${repo_url}/" -e "mirror --verbose --log=${TUNASYNC_LOG_FILE} --exclude-glob='*/SRPMS/*' -P 5 --delete --only-newer; bye"
|
||||
lftp "${repo_url}/" -e "mirror --verbose -P 5 --delete --only-newer; bye"
|
||||
}
|
||||
|
||||
|
||||
sync_lxc_images "http://images.linuxcontainers.org/images" "${TUNASYNC_WORKING_DIR}/images"
|
||||
sync_lxc_images "http://images.linuxcontainers.org/meta" "${TUNASYNC_WORKING_DIR}/meta"
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
_here=`dirname $(realpath $0)`
|
||||
. ${_here}/helpers/apt-download
|
||||
|
||||
[ -z "${LOADED_APT_DOWNLOAD}" ] && (echo "failed to load apt-download"; exit 1)
|
||||
|
||||
BASE_PATH="${TUNASYNC_WORKING_DIR}"
|
||||
|
||||
YUM_PATH="${BASE_PATH}/yum"
|
||||
APT_PATH="${BASE_PATH}/apt"
|
||||
|
||||
UBUNTU_VERSIONS=("trusty" "precise")
|
||||
DEBIAN_VERSIONS=("wheezy")
|
||||
MONGO_VERSIONS=("3.2" "3.0")
|
||||
STABLE_VERSION="3.2"
|
||||
|
||||
UBUNTU_PATH="${APT_PATH}/ubuntu"
|
||||
DEBIAN_PATH="${APT_PATH}/debian"
|
||||
|
||||
mkdir -p $UBUNTU_PATH $DEBIAN_PATH $YUM_PATH
|
||||
|
||||
cache_dir="/tmp/yum-mongodb-cache/"
|
||||
cfg="/tmp/mongodb-yum.conf"
|
||||
cat <<EOF > ${cfg}
|
||||
[main]
|
||||
keepcache=0
|
||||
|
||||
EOF
|
||||
|
||||
for mgver in ${MONGO_VERSIONS[@]}; do
|
||||
cat <<EOF >> ${cfg}
|
||||
[el6-${mgver}]
|
||||
name=el6-${mgver}
|
||||
baseurl=https://repo.mongodb.org/yum/redhat/6/mongodb-org/${mgver}/x86_64/
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
sslverify=0
|
||||
|
||||
[el7-${mgver}]
|
||||
name=el7-${mgver}
|
||||
baseurl=https://repo.mongodb.org/yum/redhat/7/mongodb-org/${mgver}/x86_64/
|
||||
repo_gpgcheck=0
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
sslverify=0
|
||||
EOF
|
||||
done
|
||||
|
||||
reposync -c $cfg -d -p ${YUM_PATH} -e $cache_dir
|
||||
for mgver in ${MONGO_VERSIONS[@]}; do
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el6-$mgver/ ${YUM_PATH}/el6-$mgver/
|
||||
createrepo --update -v -c $cache_dir -o ${YUM_PATH}/el7-$mgver/ ${YUM_PATH}/el7-$mgver/
|
||||
done
|
||||
|
||||
[ -e ${YUM_PATH}/el6 ] || (cd ${YUM_PATH}; ln -s el6-${STABLE_VERSION} el6)
|
||||
[ -e ${YUM_PATH}/el7 ] || (cd ${YUM_PATH}; ln -s el7-${STABLE_VERSION} el7)
|
||||
|
||||
rm $cfg
|
||||
|
||||
base_url="http://repo.mongodb.org/apt/ubuntu"
|
||||
for ubver in ${UBUNTU_VERSIONS[@]}; do
|
||||
for mgver in ${MONGO_VERSIONS[@]}; do
|
||||
version="$ubver/mongodb-org/$mgver"
|
||||
apt-download-binary ${base_url} "$version" "multiverse" "amd64" "${UBUNTU_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "multiverse" "i386" "${UBUNTU_PATH}" || true
|
||||
done
|
||||
mg_basepath="${UBUNTU_PATH}/dists/$ubver/mongodb-org"
|
||||
[ -e ${mg_basepath}/stable ] || (cd ${mg_basepath}; ln -s ${STABLE_VERSION} stable)
|
||||
done
|
||||
echo "Ubuntu finished"
|
||||
|
||||
base_url="http://repo.mongodb.org/apt/debian"
|
||||
for dbver in ${DEBIAN_VERSIONS[@]}; do
|
||||
for mgver in ${MONGO_VERSIONS[@]}; do
|
||||
version="$dbver/mongodb-org/$mgver"
|
||||
apt-download-binary ${base_url} "$version" "main" "amd64" "${DEBIAN_PATH}" || true
|
||||
apt-download-binary ${base_url} "$version" "main" "i386" "${DEBIAN_PATH}" || true
|
||||
done
|
||||
mg_basepath="${DEBIAN_PATH}/dists/$dbver/mongodb-org"
|
||||
[ -e ${mg_basepath}/stable ] || (cd ${mg_basepath}; ln -s ${STABLE_VERSION} stable)
|
||||
done
|
||||
echo "Debian finished"
|
||||
|
||||
|
||||
# vim: ts=4 sts=4 sw=4
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function sync_nodesource() {
|
||||
repo_url="$1"
|
||||
repo_dir="$2"
|
||||
|
||||
[ ! -d "$repo_dir" ] && mkdir -p "$repo_dir"
|
||||
cd $repo_dir
|
||||
# lftp "${repo_url}/" -e "mirror --verbose --exclude-glob='*/SRPMS/*' -P 5 --delete --only-newer; bye"
|
||||
lftp "${repo_url}/" -e "mirror --verbose -P 5 --delete --only-newer; bye"
|
||||
}
|
||||
|
||||
sync_nodesource "https://deb.nodesource.com/node" "${TUNASYNC_WORKING_DIR}/deb"
|
||||
sync_nodesource "https://deb.nodesource.com/node_0.12" "${TUNASYNC_WORKING_DIR}/deb_0.12"
|
||||
sync_nodesource "https://deb.nodesource.com/node_4.x" "${TUNASYNC_WORKING_DIR}/deb_4.x"
|
||||
sync_nodesource "https://rpm.nodesource.com/pub" "${TUNASYNC_WORKING_DIR}/rpm"
|
||||
sync_nodesource "https://rpm.nodesource.com/pub_0.12" "${TUNASYNC_WORKING_DIR}/rpm_0.12"
|
||||
sync_nodesource "https://rpm.nodesource.com/pub_4.x" "${TUNASYNC_WORKING_DIR}/rpm_4.x"
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function sync_openwrt() {
|
||||
repo_url="$1"
|
||||
repo_dir="$2"
|
||||
|
||||
[ ! -d "$repo_dir" ] && mkdir -p "$repo_dir"
|
||||
cd $repo_dir
|
||||
lftp "${repo_url}/" -e "mirror --verbose -P 5 --delete --only-newer; bye"
|
||||
}
|
||||
|
||||
sync_openwrt "http://downloads.openwrt.org/chaos_calmer/15.05/" "${TUNASYNC_WORKING_DIR}/chaos_calmer/15.05"
|
||||
sync_openwrt "http://downloads.openwrt.org/snapshots/trunk/" "${TUNASYNC_WORKING_DIR}/snapshots/trunk"
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ ! -d "$TUNASYNC_WORKING_DIR" ]; then
|
||||
echo "Directory not exists, fail"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Syncing to $TUNASYNC_WORKING_DIR"
|
||||
|
||||
/usr/bin/timeout -s INT 3600 /home/tuna/.virtualenvs/bandersnatch/bin/bandersnatch -c /etc/bandersnatch.conf mirror || exit 1
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function sync_repo_ck() {
|
||||
repo_url="$1"
|
||||
repo_dir="$2"
|
||||
|
||||
[ ! -d "$repo_dir" ] && mkdir -p "$repo_dir"
|
||||
cd $repo_dir
|
||||
lftp "${repo_url}/" -e 'mirror -v -P 5 --delete --only-missing --only-newer --no-recursion; bye'
|
||||
wget "${repo_url}/repo-ck.db" -O "repo-ck.db"
|
||||
wget "${repo_url}/repo-ck.files" -O "repo-ck.files"
|
||||
}
|
||||
|
||||
UPSTREAM="http://repo-ck.com"
|
||||
|
||||
sync_repo_ck "${UPSTREAM}/x86_64" "${TUNASYNC_WORKING_DIR}/x86_64"
|
||||
sync_repo_ck "${UPSTREAM}/i686" "${TUNASYNC_WORKING_DIR}/i686"
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
_here=`dirname $(realpath $0)`
|
||||
. ${_here}/helpers/apt-download
|
||||
[ -z "${LOADED_APT_DOWNLOAD}" ] && (echo "failed to load apt-download"; exit 1)
|
||||
|
||||
BASE_PATH="${TUNASYNC_WORKING_DIR}"
|
||||
|
||||
base_url="http://apt.termux.com"
|
||||
ARCHES=("aarch64" "all" "arm" "i686")
|
||||
for arch in ${ARCHES[@]}; do
|
||||
echo "start syncing: ${arch}"
|
||||
apt-download-binary "${base_url}" "stable" "main" "${arch}" "${BASE_PATH}" || true
|
||||
done
|
||||
echo "finished"
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
SYNC_FILES="$TUNASYNC_WORKING_DIR"
|
||||
# SYNC_FILES="/srv/mirror_disk/ubuntu/_working/"
|
||||
#LOG_FILE="$TUNASYNC_LOG_FILE"
|
||||
|
||||
# [ -f $SYNC_LOCK ] && exit 1
|
||||
# touch $SYNC_LOCK
|
||||
|
||||
|
||||
echo ">> Starting sync on $(date --rfc-3339=seconds)"
|
||||
|
||||
arch="i386,amd64"
|
||||
sections="main,main/debian-installer,multiverse,multiverse/debian-installer,restricted,restricted/debian-installer,universe,universe/debian-installer"
|
||||
dists="precise,precise-backports,precise-proposed,precise-updates,precise-security,trusty,trusty-backports,trusty-proposed,trusty-updates,trusty-security"
|
||||
server="$1"
|
||||
inPath="/ubuntu"
|
||||
proto="rsync"
|
||||
outpath="$SYNC_FILES"
|
||||
rsyncOpt='-6 -aIL --partial'
|
||||
|
||||
debmirror -h $server --no-check-gpg -a $arch -s $sections -d $dists -r $inPath -e $proto --rsync-options "$rsyncOpt" --verbose $outpath
|
||||
|
||||
date --rfc-3339=seconds > "$SYNC_FILES/lastsync"
|
||||
echo ">> Finished sync on $(date --rfc-3339=seconds)"
|
||||
|
||||
# rm -f "$SYNC_LOCK"
|
||||
exit 0
|
||||
@@ -23,7 +23,10 @@ type baseProvider struct {
|
||||
logFile *os.File
|
||||
|
||||
cgroup *cgroupHook
|
||||
hooks []jobHook
|
||||
zfs *zfsHook
|
||||
docker *dockerHook
|
||||
|
||||
hooks []jobHook
|
||||
}
|
||||
|
||||
func (p *baseProvider) Name() string {
|
||||
@@ -77,12 +80,17 @@ func (p *baseProvider) LogFile() string {
|
||||
return s
|
||||
}
|
||||
}
|
||||
panic("log dir is impossible to be unavailable")
|
||||
panic("log file is impossible to be unavailable")
|
||||
}
|
||||
|
||||
func (p *baseProvider) AddHook(hook jobHook) {
|
||||
if cg, ok := hook.(*cgroupHook); ok {
|
||||
p.cgroup = cg
|
||||
switch v := hook.(type) {
|
||||
case *cgroupHook:
|
||||
p.cgroup = v
|
||||
case *zfsHook:
|
||||
p.zfs = v
|
||||
case *dockerHook:
|
||||
p.docker = v
|
||||
}
|
||||
p.hooks = append(p.hooks, hook)
|
||||
}
|
||||
@@ -95,6 +103,14 @@ func (p *baseProvider) Cgroup() *cgroupHook {
|
||||
return p.cgroup
|
||||
}
|
||||
|
||||
func (p *baseProvider) ZFS() *zfsHook {
|
||||
return p.zfs
|
||||
}
|
||||
|
||||
func (p *baseProvider) Docker() *dockerHook {
|
||||
return p.docker
|
||||
}
|
||||
|
||||
func (p *baseProvider) prepareLogFile() error {
|
||||
if p.LogFile() == "/dev/null" {
|
||||
p.cmd.SetLogFile(nil)
|
||||
|
||||
@@ -15,35 +15,31 @@ import (
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
var cgSubsystem = "cpu"
|
||||
|
||||
type cgroupHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
basePath string
|
||||
baseGroup string
|
||||
created bool
|
||||
subsystem string
|
||||
memLimit string
|
||||
}
|
||||
|
||||
func initCgroup(basePath string) {
|
||||
if _, err := os.Stat(filepath.Join(basePath, "memory")); err == nil {
|
||||
cgSubsystem = "memory"
|
||||
return
|
||||
}
|
||||
logger.Warning("Memory subsystem of cgroup not enabled, fallback to cpu")
|
||||
}
|
||||
|
||||
func newCgroupHook(p mirrorProvider, basePath, baseGroup string) *cgroupHook {
|
||||
func newCgroupHook(p mirrorProvider, basePath, baseGroup, subsystem, memLimit string) *cgroupHook {
|
||||
if basePath == "" {
|
||||
basePath = "/sys/fs/cgroup"
|
||||
}
|
||||
if baseGroup == "" {
|
||||
baseGroup = "tunasync"
|
||||
}
|
||||
if subsystem == "" {
|
||||
subsystem = "cpu"
|
||||
}
|
||||
return &cgroupHook{
|
||||
provider: p,
|
||||
basePath: basePath,
|
||||
baseGroup: baseGroup,
|
||||
subsystem: subsystem,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,13 +48,15 @@ func (c *cgroupHook) preExec() error {
|
||||
if err := sh.Command("cgcreate", "-g", c.Cgroup()).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
if cgSubsystem != "memory" {
|
||||
if c.subsystem != "memory" {
|
||||
return nil
|
||||
}
|
||||
if c.provider.Type() == provRsync || c.provider.Type() == provTwoStageRsync {
|
||||
if c.memLimit != "" {
|
||||
gname := fmt.Sprintf("%s/%s", c.baseGroup, c.provider.Name())
|
||||
return sh.Command(
|
||||
"cgset", "-r", "memory.limit_in_bytes=128M", gname,
|
||||
"cgset", "-r",
|
||||
fmt.Sprintf("memory.limit_in_bytes=%s", c.memLimit),
|
||||
gname,
|
||||
).Run()
|
||||
}
|
||||
return nil
|
||||
@@ -76,7 +74,7 @@ func (c *cgroupHook) postExec() error {
|
||||
|
||||
func (c *cgroupHook) Cgroup() string {
|
||||
name := c.provider.Name()
|
||||
return fmt.Sprintf("%s:%s/%s", cgSubsystem, c.baseGroup, name)
|
||||
return fmt.Sprintf("%s:%s/%s", c.subsystem, c.baseGroup, name)
|
||||
}
|
||||
|
||||
func (c *cgroupHook) killAll() error {
|
||||
@@ -87,7 +85,7 @@ func (c *cgroupHook) killAll() error {
|
||||
|
||||
readTaskList := func() ([]int, error) {
|
||||
taskList := []int{}
|
||||
taskFile, err := os.Open(filepath.Join(c.basePath, cgSubsystem, c.baseGroup, name, "tasks"))
|
||||
taskFile, err := os.Open(filepath.Join(c.basePath, c.subsystem, c.baseGroup, name, "tasks"))
|
||||
if err != nil {
|
||||
return taskList, err
|
||||
}
|
||||
|
||||
@@ -72,11 +72,14 @@ sleep 30
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
initCgroup("/sys/fs/cgroup")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "")
|
||||
provider.AddHook(cg)
|
||||
|
||||
err = cg.preExec()
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create cgroup")
|
||||
return
|
||||
}
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
@@ -129,15 +132,18 @@ sleep 30
|
||||
provider, err := newRsyncProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
initCgroup("/sys/fs/cgroup")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync")
|
||||
cg := newCgroupHook(provider, "/sys/fs/cgroup", "tunasync", "cpu", "512M")
|
||||
provider.AddHook(cg)
|
||||
|
||||
cg.preExec()
|
||||
if cgSubsystem == "memory" {
|
||||
err = cg.preExec()
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create cgroup")
|
||||
return
|
||||
}
|
||||
if cg.subsystem == "memory" {
|
||||
memoLimit, err := ioutil.ReadFile(filepath.Join(cg.basePath, "memory", cg.baseGroup, provider.Name(), "memory.limit_in_bytes"))
|
||||
So(err, ShouldBeNil)
|
||||
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(128*1024*1024))
|
||||
So(strings.Trim(string(memoLimit), "\n"), ShouldEqual, strconv.Itoa(512*1024*1024))
|
||||
}
|
||||
cg.postExec()
|
||||
})
|
||||
|
||||
@@ -37,6 +37,8 @@ type Config struct {
|
||||
Manager managerConfig `toml:"manager"`
|
||||
Server serverConfig `toml:"server"`
|
||||
Cgroup cgroupConfig `toml:"cgroup"`
|
||||
ZFS zfsConfig `toml:"zfs"`
|
||||
Docker dockerConfig `toml:"docker"`
|
||||
Include includeConfig `toml:"include"`
|
||||
Mirrors []mirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
@@ -54,8 +56,17 @@ type globalConfig struct {
|
||||
|
||||
type managerConfig struct {
|
||||
APIBase string `toml:"api_base"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
Token string `toml:"token"`
|
||||
// this option overrides the APIBase
|
||||
APIList []string `toml:"api_base_list"`
|
||||
CACert string `toml:"ca_cert"`
|
||||
// Token string `toml:"token"`
|
||||
}
|
||||
|
||||
func (mc managerConfig) APIBaseList() []string {
|
||||
if len(mc.APIList) > 0 {
|
||||
return mc.APIList
|
||||
}
|
||||
return []string{mc.APIBase}
|
||||
}
|
||||
|
||||
type serverConfig struct {
|
||||
@@ -67,9 +78,21 @@ type serverConfig struct {
|
||||
}
|
||||
|
||||
type cgroupConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
BasePath string `toml:"base_path"`
|
||||
Group string `toml:"group"`
|
||||
Enable bool `toml:"enable"`
|
||||
BasePath string `toml:"base_path"`
|
||||
Group string `toml:"group"`
|
||||
Subsystem string `toml:"subsystem"`
|
||||
}
|
||||
|
||||
type dockerConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Volumes []string `toml:"volumes"`
|
||||
Options []string `toml:"options"`
|
||||
}
|
||||
|
||||
type zfsConfig struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Zpool string `toml:"zpool"`
|
||||
}
|
||||
|
||||
type includeConfig struct {
|
||||
@@ -104,6 +127,12 @@ type mirrorConfig struct {
|
||||
Username string `toml:"username"`
|
||||
Password string `toml:"password"`
|
||||
Stage1Profile string `toml:"stage1_profile"`
|
||||
|
||||
MemoryLimit string `toml:"memory_limit"`
|
||||
|
||||
DockerImage string `toml:"docker_image"`
|
||||
DockerVolumes []string `toml:"docker_volumes"`
|
||||
DockerOptions []string `toml:"docker_options"`
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration
|
||||
|
||||
98
worker/docker.go
普通文件
98
worker/docker.go
普通文件
@@ -0,0 +1,98 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type dockerHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
image string
|
||||
volumes []string
|
||||
options []string
|
||||
}
|
||||
|
||||
func newDockerHook(p mirrorProvider, gCfg dockerConfig, mCfg mirrorConfig) *dockerHook {
|
||||
volumes := []string{}
|
||||
volumes = append(volumes, gCfg.Volumes...)
|
||||
volumes = append(volumes, mCfg.DockerVolumes...)
|
||||
|
||||
options := []string{}
|
||||
options = append(options, gCfg.Options...)
|
||||
options = append(options, mCfg.DockerOptions...)
|
||||
|
||||
return &dockerHook{
|
||||
provider: p,
|
||||
image: mCfg.DockerImage,
|
||||
volumes: volumes,
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerHook) preExec() error {
|
||||
p := d.provider
|
||||
logFile := p.LogFile()
|
||||
workingDir := p.WorkingDir()
|
||||
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
return fmt.Errorf("Error making dir %s: %s", workingDir, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
logFileNew := "/log_latest"
|
||||
workingDirNew := "/data"
|
||||
|
||||
// Override workingDir
|
||||
ctx := p.EnterContext()
|
||||
ctx.Set(_WorkingDirKey, workingDirNew)
|
||||
ctx.Set(_LogFileKey+":docker", logFileNew)
|
||||
ctx.Set(
|
||||
"volumes", []string{
|
||||
fmt.Sprintf("%s:%s", logFile, logFileNew),
|
||||
fmt.Sprintf("%s:%s", workingDir, workingDirNew),
|
||||
},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerHook) postExec() error {
|
||||
// sh.Command(
|
||||
// "docker", "rm", "-f", d.Name(),
|
||||
// ).Run()
|
||||
d.provider.ExitContext()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Volumes returns the configured volumes and
|
||||
// runtime-needed volumes, including mirror dirs
|
||||
// and log files
|
||||
func (d *dockerHook) Volumes() []string {
|
||||
vols := make([]string, len(d.volumes))
|
||||
copy(vols, d.volumes)
|
||||
|
||||
p := d.provider
|
||||
ctx := p.Context()
|
||||
if ivs, ok := ctx.Get("volumes"); ok {
|
||||
vs := ivs.([]string)
|
||||
vols = append(vols, vs...)
|
||||
}
|
||||
return vols
|
||||
}
|
||||
|
||||
func (d *dockerHook) LogFile() string {
|
||||
p := d.provider
|
||||
ctx := p.Context()
|
||||
if iv, ok := ctx.Get(_LogFileKey + ":docker"); ok {
|
||||
v := iv.(string)
|
||||
return v
|
||||
}
|
||||
return p.LogFile()
|
||||
}
|
||||
|
||||
func (d *dockerHook) Name() string {
|
||||
p := d.provider
|
||||
return "tunasync-job-" + p.Name()
|
||||
}
|
||||
97
worker/docker_test.go
普通文件
97
worker/docker_test.go
普通文件
@@ -0,0 +1,97 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func getDockerByName(name string) (string, error) {
|
||||
// docker ps -f 'name=$name' --format '{{.Names}}'
|
||||
out, err := sh.Command(
|
||||
"docker", "ps",
|
||||
"--filter", "name="+name,
|
||||
"--format", "{{.Names}}",
|
||||
).Output()
|
||||
return string(out), err
|
||||
}
|
||||
|
||||
func TestDocker(t *testing.T) {
|
||||
Convey("Docker Should Work", t, func(ctx C) {
|
||||
tmpDir, err := ioutil.TempDir("", "tunasync")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
So(err, ShouldBeNil)
|
||||
cmdScript := filepath.Join(tmpDir, "cmd.sh")
|
||||
tmpFile := filepath.Join(tmpDir, "log_file")
|
||||
expectedOutput := "HELLO_WORLD"
|
||||
|
||||
c := cmdConfig{
|
||||
name: "tuna-docker",
|
||||
upstreamURL: "http://mirrors.tuna.moe/",
|
||||
command: "/bin/cmd.sh",
|
||||
workingDir: tmpDir,
|
||||
logDir: tmpDir,
|
||||
logFile: tmpFile,
|
||||
interval: 600 * time.Second,
|
||||
env: map[string]string{
|
||||
"TEST_CONTENT": expectedOutput,
|
||||
},
|
||||
}
|
||||
|
||||
cmdScriptContent := `#!/bin/sh
|
||||
echo ${TEST_CONTENT}
|
||||
sleep 10
|
||||
`
|
||||
err = ioutil.WriteFile(cmdScript, []byte(cmdScriptContent), 0755)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
provider, err := newCmdProvider(c)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
d := &dockerHook{
|
||||
provider: provider,
|
||||
image: "alpine",
|
||||
volumes: []string{
|
||||
fmt.Sprintf("%s:%s", cmdScript, "/bin/cmd.sh"),
|
||||
},
|
||||
}
|
||||
provider.AddHook(d)
|
||||
So(provider.Docker(), ShouldNotBeNil)
|
||||
|
||||
err = d.preExec()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
go func() {
|
||||
err = provider.Run()
|
||||
ctx.So(err, ShouldNotBeNil)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// assert container running
|
||||
names, err := getDockerByName(d.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(names, ShouldEqual, d.Name()+"\n")
|
||||
|
||||
err = provider.Terminate()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
// container should be terminated and removed
|
||||
names, err = getDockerByName(d.Name())
|
||||
So(err, ShouldBeNil)
|
||||
So(names, ShouldEqual, "")
|
||||
|
||||
// check log content
|
||||
loggedContent, err := ioutil.ReadFile(provider.LogFile())
|
||||
So(err, ShouldBeNil)
|
||||
So(string(loggedContent), ShouldEqual, expectedOutput+"\n")
|
||||
|
||||
d.postExec()
|
||||
})
|
||||
}
|
||||
@@ -36,6 +36,10 @@ type mirrorProvider interface {
|
||||
IsRunning() bool
|
||||
// Cgroup
|
||||
Cgroup() *cgroupHook
|
||||
// ZFS
|
||||
ZFS() *zfsHook
|
||||
// Docker
|
||||
Docker() *dockerHook
|
||||
|
||||
AddHook(hook jobHook)
|
||||
Hooks() []jobHook
|
||||
@@ -162,10 +166,22 @@ func newMirrorProvider(mirror mirrorConfig, cfg *Config) mirrorProvider {
|
||||
// Add Logging Hook
|
||||
provider.AddHook(newLogLimiter(provider))
|
||||
|
||||
// Add Cgroup Hook
|
||||
if cfg.Cgroup.Enable {
|
||||
// Add ZFS Hook
|
||||
if cfg.ZFS.Enable {
|
||||
provider.AddHook(newZfsHook(provider, cfg.ZFS.Zpool))
|
||||
}
|
||||
|
||||
// Add Docker Hook
|
||||
if cfg.Docker.Enable && len(mirror.DockerImage) > 0 {
|
||||
provider.AddHook(newDockerHook(provider, cfg.Docker, mirror))
|
||||
|
||||
} else if cfg.Cgroup.Enable {
|
||||
// Add Cgroup Hook
|
||||
provider.AddHook(
|
||||
newCgroupHook(provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group),
|
||||
newCgroupHook(
|
||||
provider, cfg.Cgroup.BasePath, cfg.Cgroup.Group,
|
||||
cfg.Cgroup.Subsystem, mirror.MemoryLimit,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -31,11 +33,40 @@ type cmdJob struct {
|
||||
func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string, env map[string]string) *cmdJob {
|
||||
var cmd *exec.Cmd
|
||||
|
||||
if provider.Cgroup() != nil {
|
||||
if d := provider.Docker(); d != nil {
|
||||
c := "docker"
|
||||
args := []string{
|
||||
"run", "--rm",
|
||||
"-a", "STDOUT", "-a", "STDERR",
|
||||
"--name", d.Name(),
|
||||
"-w", workingDir,
|
||||
}
|
||||
// add volumes
|
||||
for _, vol := range d.Volumes() {
|
||||
logger.Debugf("volume: %s", vol)
|
||||
args = append(args, "-v", vol)
|
||||
}
|
||||
// set env
|
||||
env["TUNASYNC_LOG_FILE"] = d.LogFile()
|
||||
for k, v := range env {
|
||||
kv := fmt.Sprintf("%s=%s", k, v)
|
||||
args = append(args, "-e", kv)
|
||||
}
|
||||
// apply options
|
||||
args = append(args, d.options...)
|
||||
// apply image and command
|
||||
args = append(args, d.image)
|
||||
// apply command
|
||||
args = append(args, cmdAndArgs...)
|
||||
|
||||
cmd = exec.Command(c, args...)
|
||||
|
||||
} else if provider.Cgroup() != nil {
|
||||
c := "cgexec"
|
||||
args := []string{"-g", provider.Cgroup().Cgroup()}
|
||||
args = append(args, cmdAndArgs...)
|
||||
cmd = exec.Command(c, args...)
|
||||
|
||||
} else {
|
||||
if len(cmdAndArgs) == 1 {
|
||||
cmd = exec.Command(cmdAndArgs[0])
|
||||
@@ -48,25 +79,28 @@ func newCmdJob(provider mirrorProvider, cmdAndArgs []string, workingDir string,
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
logger.Errorf("Error making dir %s", workingDir)
|
||||
if provider.Docker() == nil {
|
||||
logger.Debugf("Executing command %s at %s", cmdAndArgs[0], workingDir)
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
logger.Debugf("Making dir %s", workingDir)
|
||||
if err = os.MkdirAll(workingDir, 0755); err != nil {
|
||||
logger.Errorf("Error making dir %s: %s", workingDir, err.Error())
|
||||
}
|
||||
}
|
||||
cmd.Dir = workingDir
|
||||
cmd.Env = newEnviron(env, true)
|
||||
}
|
||||
|
||||
cmd.Dir = workingDir
|
||||
cmd.Env = newEnviron(env, true)
|
||||
|
||||
return &cmdJob{
|
||||
cmd: cmd,
|
||||
workingDir: workingDir,
|
||||
env: env,
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cmdJob) Start() error {
|
||||
// logger.Debugf("Command start: %v", c.cmd.Args)
|
||||
c.finished = make(chan empty, 1)
|
||||
return c.cmd.Start()
|
||||
}
|
||||
@@ -95,6 +129,14 @@ func (c *cmdJob) Terminate() error {
|
||||
if c.cmd == nil || c.cmd.Process == nil {
|
||||
return errProcessNotStarted
|
||||
}
|
||||
|
||||
if d := c.provider.Docker(); d != nil {
|
||||
sh.Command(
|
||||
"docker", "stop", "-t", "2", d.Name(),
|
||||
).Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
err := unix.Kill(c.cmd.Process.Pid, syscall.SIGTERM)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -55,9 +55,6 @@ func GetTUNASyncWorker(cfg *Config) *Worker {
|
||||
w.httpClient = httpClient
|
||||
}
|
||||
|
||||
if cfg.Cgroup.Enable {
|
||||
initCgroup(cfg.Cgroup.BasePath)
|
||||
}
|
||||
w.initJobs()
|
||||
w.makeHTTPServer()
|
||||
tunasyncWorker = w
|
||||
@@ -389,28 +386,21 @@ func (w *Worker) URL() string {
|
||||
}
|
||||
|
||||
func (w *Worker) registorWorker() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers",
|
||||
w.cfg.Manager.APIBase,
|
||||
)
|
||||
|
||||
msg := WorkerStatus{
|
||||
ID: w.Name(),
|
||||
URL: w.URL(),
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to register worker")
|
||||
for _, root := range w.cfg.Manager.APIBaseList() {
|
||||
url := fmt.Sprintf("%s/workers", root)
|
||||
logger.Debugf("register on manager url: %s", url)
|
||||
if _, err := PostJSON(url, msg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to register worker")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
jobMsg.name,
|
||||
)
|
||||
p := job.provider
|
||||
smsg := MirrorStatus{
|
||||
Name: jobMsg.name,
|
||||
@@ -422,19 +412,22 @@ func (w *Worker) updateStatus(job *mirrorJob, jobMsg jobMessage) {
|
||||
ErrorMsg: jobMsg.msg,
|
||||
}
|
||||
|
||||
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
|
||||
for _, root := range w.cfg.Manager.APIBaseList() {
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs/%s", root, w.Name(), jobMsg.name,
|
||||
)
|
||||
logger.Debugf("reporting on manager url: %s", url)
|
||||
if _, err := PostJSON(url, smsg, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to update mirror(%s) status: %s", jobMsg.name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) fetchJobStatus() []MirrorStatus {
|
||||
var mirrorList []MirrorStatus
|
||||
apiBase := w.cfg.Manager.APIBaseList()[0]
|
||||
|
||||
url := fmt.Sprintf(
|
||||
"%s/workers/%s/jobs",
|
||||
w.cfg.Manager.APIBase,
|
||||
w.Name(),
|
||||
)
|
||||
url := fmt.Sprintf("%s/workers/%s/jobs", apiBase, w.Name())
|
||||
|
||||
if _, err := GetJSON(url, &mirrorList, w.httpClient); err != nil {
|
||||
logger.Errorf("Failed to fetch job status: %s", err.Error())
|
||||
|
||||
45
worker/zfs_hook.go
普通文件
45
worker/zfs_hook.go
普通文件
@@ -0,0 +1,45 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/codeskyblue/go-sh"
|
||||
)
|
||||
|
||||
type zfsHook struct {
|
||||
emptyHook
|
||||
provider mirrorProvider
|
||||
zpool string
|
||||
}
|
||||
|
||||
func newZfsHook(provider mirrorProvider, zpool string) *zfsHook {
|
||||
return &zfsHook{
|
||||
provider: provider,
|
||||
zpool: zpool,
|
||||
}
|
||||
}
|
||||
|
||||
// create zfs dataset for a new mirror
|
||||
func (z *zfsHook) preJob() error {
|
||||
workingDir := z.provider.WorkingDir()
|
||||
if _, err := os.Stat(workingDir); os.IsNotExist(err) {
|
||||
// sudo zfs create $zfsDataset
|
||||
// sudo zfs set mountpoint=${absPath} ${zfsDataset}
|
||||
|
||||
zfsDataset := fmt.Sprintf("%s/%s", z.zpool, z.provider.Name())
|
||||
// Unknown issue of ZFS:
|
||||
// dataset name should not contain upper case letters
|
||||
zfsDataset = strings.ToLower(zfsDataset)
|
||||
logger.Infof("Creating ZFS dataset %s", zfsDataset)
|
||||
if err := sh.Command("sudo", "zfs", "create", zfsDataset).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Infof("Mount ZFS dataset %s to %s", zfsDataset, workingDir)
|
||||
if err := sh.Command("sudo", "zfs", "set", "mountpoint="+workingDir, zfsDataset).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
在新工单中引用
屏蔽一个用户