mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-05 04:19:10 -05:00
Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab20c16982 | ||
|
|
0c1df81ee9 | ||
|
|
d324b2ac86 | ||
|
|
0231089b99 | ||
|
|
74ffb85467 | ||
|
|
dcd280e6e2 | ||
|
|
4e56dbd883 | ||
|
|
13d7881b80 | ||
|
|
1d2c53bf3c | ||
|
|
9c449c966b | ||
|
|
ec2d4638e3 | ||
|
|
0ce92befc8 | ||
|
|
2167ce9656 | ||
|
|
0dc85d74aa | ||
|
|
b6e3f8037b | ||
|
|
00e7161a8f | ||
|
|
b5a7879eca | ||
|
|
4355dc69ea | ||
|
|
371ba69447 | ||
|
|
79ef57d0ea | ||
|
|
8bd6bdd397 | ||
|
|
ce3248cea7 | ||
|
|
99a6f3a5b6 | ||
|
|
c2e10dc156 | ||
|
|
fc914f3237 | ||
|
|
811d3752d0 | ||
|
|
00827dd5c1 | ||
|
|
a981c21d27 | ||
|
|
163dc122f3 | ||
|
|
83727e0824 | ||
|
|
529d3ef764 | ||
|
|
fefbf4dcc8 | ||
|
|
b9c6d3ae09 | ||
|
|
7bea8c758a | ||
|
|
479c0d3f16 | ||
|
|
d9ce7c3166 | ||
|
|
69979996d9 | ||
|
|
da58e5c50c | ||
|
|
44e259142f | ||
|
|
77970d5113 | ||
|
|
2b8ee4c7a5 | ||
|
|
be952e5f2d | ||
|
|
43ebac4242 | ||
|
|
f08a0ed01c | ||
|
|
612fdff377 | ||
|
|
8ccb7f1924 | ||
|
|
65d0ca8aa9 | ||
|
|
e82ed6e3d3 | ||
|
|
4b815fc086 | ||
|
|
7eaf843de2 | ||
|
|
110e1ae6f9 | ||
|
|
896f9725ec | ||
|
|
1a529e9d5d | ||
|
|
36ef17df8f | ||
|
|
955ac7775e | ||
|
|
8f69e874c4 | ||
|
|
ac06fd97e9 | ||
|
|
3726b7d112 | ||
|
|
377200591e | ||
|
|
4afc898c2f | ||
|
|
ff7e4fef55 | ||
|
|
9ffddb1923 | ||
|
|
896b857fc4 | ||
|
|
acc5d2675b | ||
|
|
6ece4c1fd2 | ||
|
|
cc09f0170d | ||
|
|
bb234d6c0e | ||
|
|
e6acc64758 | ||
|
|
f18cf545b9 | ||
|
|
6d64daaba3 | ||
|
|
47f48faed7 | ||
|
|
cfa834177b | ||
|
|
c454fc8baa | ||
|
|
dbe7fa9155 | ||
|
|
4d842f7d3b | ||
|
|
0e68221c91 | ||
|
|
19f63c7ea3 | ||
|
|
fb939ec496 | ||
|
|
39df3173d4 | ||
|
|
429672e0b4 | ||
|
|
605fd6d726 | ||
|
|
3c476542d2 | ||
|
|
31874f3ebb | ||
|
|
77942747db | ||
|
|
fe01b396ba | ||
|
|
3583949706 | ||
|
|
23fc22ebc5 | ||
|
|
cba163a1fd | ||
|
|
a8e2c8edb6 | ||
|
|
3e501d9036 | ||
|
|
9ca101756d | ||
|
|
a873d12c65 | ||
|
|
8ff670c564 | ||
|
|
b1ed2802fb | ||
|
|
b70cb580c8 | ||
|
|
28be3ba788 | ||
|
|
d4770ddc77 | ||
|
|
cbe1220680 | ||
|
|
0b95c5fa76 | ||
|
|
0343bca257 | ||
|
|
878016db39 | ||
|
|
1f4fde9525 | ||
|
|
5b9d8a838f | ||
|
|
8b19cb1e11 | ||
|
|
ce1e259bb4 | ||
|
|
2238a288d9 | ||
|
|
c8ee2a5cf6 | ||
|
|
1704827d04 | ||
|
|
a156e88eef | ||
|
|
94d0195b63 | ||
|
|
1616edcee3 | ||
|
|
6505e123bb | ||
|
|
63e4659282 | ||
|
|
f3f5557c8e | ||
|
|
b794726e1f | ||
|
|
3d59740a0a | ||
|
|
66fb65b01f | ||
|
|
5c2fcbfd19 | ||
|
|
f9b72330a8 | ||
|
|
822b6ac36b | ||
|
|
77f7778292 | ||
|
|
aed2c66e52 | ||
|
|
68a1fd010f | ||
|
|
ac8b3342ac | ||
|
|
0ea90dd932 | ||
|
|
718b1ce2b7 | ||
|
|
29f7510f5a | ||
|
|
a7f9ed4a80 | ||
|
|
1baefea410 | ||
|
|
563cec8923 | ||
|
|
a3c340ece9 | ||
|
|
cb24638ec9 | ||
|
|
2fb24dc2cc | ||
|
|
9aa2d2c92f | ||
|
|
d1c5100c98 | ||
|
|
42e677c055 | ||
|
|
27bba2c0c2 | ||
|
|
feff334547 | ||
|
|
713cf357ce | ||
|
|
5342bec1b7 | ||
|
|
7df75e681d | ||
|
|
8dc826b234 | ||
|
|
9ef37e1485 | ||
|
|
7517d18fbb | ||
|
|
42d0fee536 | ||
|
|
2ca9d3b5c5 | ||
|
|
9cde068f2a | ||
|
|
1243083831 | ||
|
|
356c5055ad | ||
|
|
19693734a3 | ||
|
|
17e60b9e0c | ||
|
|
ac22b2d00a | ||
|
|
de0b4270df | ||
|
|
e738af7c56 | ||
|
|
a28441a9bf | ||
|
|
2e313716e5 | ||
|
|
0b5ff1f5f7 | ||
|
|
0fe6d97d3d | ||
|
|
0756e42a85 | ||
|
|
13ebe1c87f | ||
|
|
aea7fa5f22 | ||
|
|
403ce7e597 | ||
|
|
4704d3bc48 | ||
|
|
2794b04243 | ||
|
|
1ce64971fd | ||
|
|
eb6d80eac4 | ||
|
|
a8db3351ae | ||
|
|
23a900e096 | ||
|
|
5a304cf295 | ||
|
|
136b3742bf | ||
|
|
21e0f98fe2 | ||
|
|
2bb5b2244b | ||
|
|
2f281799c1 | ||
|
|
18a58a2ddc | ||
|
|
f283215fce | ||
|
|
495809ac9e |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -3,11 +3,11 @@ updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
19
.github/workflows/build-infra-dockers.yaml
vendored
19
.github/workflows/build-infra-dockers.yaml
vendored
@@ -7,17 +7,21 @@ on:
|
||||
- infra-*
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.22.3"
|
||||
GO_VERSION: "~1.23.0"
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
if: github.repository == 'syncthing/syncthing'
|
||||
runs-on: ubuntu-latest
|
||||
environment: docker
|
||||
environment: release
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
@@ -41,6 +45,13 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in arm64 amd64; do
|
||||
@@ -53,13 +64,13 @@ jobs:
|
||||
|
||||
- name: Set Docker tags (all branches)
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:${{ github.sha }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:${{ github.sha }},ghcr.io/syncthing/infra/${{ matrix.pkg }}:${{ github.sha }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Set Docker tags (latest)
|
||||
if: github.ref == 'refs/heads/infrastructure'
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:latest,ghcr.io/syncthing/infra/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push
|
||||
|
||||
233
.github/workflows/build-syncthing.yaml
vendored
233
.github/workflows/build-syncthing.yaml
vendored
@@ -12,7 +12,7 @@ env:
|
||||
# The go version to use for builds. We set check-latest to true when
|
||||
# installing, so we get the latest patch version that matches the
|
||||
# expression.
|
||||
GO_VERSION: "~1.22.3"
|
||||
GO_VERSION: "~1.23.0"
|
||||
|
||||
# Optimize compatibility on the slow archictures.
|
||||
GO386: softfloat
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
|
||||
# The oldest version in this list should match what we have in our go.mod.
|
||||
# Variables don't seem to be supported here, or we could have done something nice.
|
||||
go: ["~1.21.7", "~1.22.3"]
|
||||
go: ["~1.22.6", "~1.23.0"]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -83,8 +83,8 @@ jobs:
|
||||
go run build.go test | go-test-json-to-loki
|
||||
env:
|
||||
GOFLAGS: "-json"
|
||||
LOKI_URL: ${{ vars.LOKI_URL }}
|
||||
LOKI_USER: ${{ vars.LOKI_USER }}
|
||||
LOKI_URL: ${{ secrets.LOKI_URL }}
|
||||
LOKI_USER: ${{ secrets.LOKI_USER }}
|
||||
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
|
||||
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
|
||||
|
||||
@@ -137,8 +137,8 @@ jobs:
|
||||
|
||||
package-windows:
|
||||
name: Package for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -153,6 +153,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -178,10 +179,13 @@ jobs:
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
go run build.go -goarch arm zip
|
||||
go run build.go -goarch arm64 zip
|
||||
go run build.go -goarch 386 zip
|
||||
$targets = 'syncthing', 'stdiscosrv', 'strelaysrv'
|
||||
$archs = 'amd64', 'arm', 'arm64', '386'
|
||||
foreach ($arch in $archs) {
|
||||
foreach ($tgt in $targets) {
|
||||
go run build.go -goarch $arch zip $tgt
|
||||
}
|
||||
}
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
|
||||
@@ -193,7 +197,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-windows
|
||||
path: syncthing-windows-*.zip
|
||||
path: "*.zip"
|
||||
|
||||
#
|
||||
# Linux
|
||||
@@ -206,6 +210,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -229,7 +234,9 @@ jobs:
|
||||
run: |
|
||||
archs=$(go tool dist list | grep linux | sed 's#linux/##')
|
||||
for goarch in $archs ; do
|
||||
go run build.go -goarch "$goarch" tar
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch "$goarch" tar "$tgt"
|
||||
done
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
@@ -238,7 +245,9 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-linux
|
||||
path: syncthing-linux-*.tar.gz
|
||||
path: |
|
||||
*.tar.gz
|
||||
compat.json
|
||||
|
||||
#
|
||||
# macOS
|
||||
@@ -246,13 +255,14 @@ jobs:
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -299,7 +309,9 @@ jobs:
|
||||
|
||||
- name: Create package (amd64)
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch amd64 zip "$tgt"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -313,7 +325,9 @@ jobs:
|
||||
go "\$@"
|
||||
EOT
|
||||
chmod 755 xgo.sh
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip "$tgt"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -337,15 +351,14 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-macos
|
||||
path: syncthing-*.zip
|
||||
path: "*.zip"
|
||||
|
||||
notarize-macos:
|
||||
name: Notarize for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-macos
|
||||
- basics
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
@@ -357,7 +370,7 @@ jobs:
|
||||
run: |
|
||||
APPSTORECONNECT_API_KEY_PATH="$RUNNER_TEMP/apikey.p8"
|
||||
echo "$APPSTORECONNECT_API_KEY" | base64 -d -o "$APPSTORECONNECT_API_KEY_PATH"
|
||||
for file in syncthing-macos-*.zip ; do
|
||||
for file in *-macos-*.zip ; do
|
||||
xcrun notarytool submit \
|
||||
-k "$APPSTORECONNECT_API_KEY_PATH" \
|
||||
-d "$APPSTORECONNECT_API_KEY_ID" \
|
||||
@@ -380,6 +393,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -422,9 +436,11 @@ jobs:
|
||||
goos="${plat%/*}"
|
||||
goarch="${plat#*/}"
|
||||
echo "::group ::$plat"
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar 2>/dev/null; then
|
||||
echo "::warning ::Failed to build for $plat"
|
||||
fi
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar "$tgt" 2>/dev/null; then
|
||||
echo "::warning ::Failed to build $tgt for $plat"
|
||||
fi
|
||||
done
|
||||
echo "::endgroup::"
|
||||
done
|
||||
env:
|
||||
@@ -434,7 +450,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-other
|
||||
path: syncthing-*.tar.gz
|
||||
path: "*.tar.gz"
|
||||
|
||||
#
|
||||
# Source
|
||||
@@ -447,6 +463,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -482,10 +499,9 @@ jobs:
|
||||
|
||||
sign-for-upgrade:
|
||||
name: Sign for upgrade
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- basics
|
||||
- package-windows
|
||||
- package-linux
|
||||
- package-macos
|
||||
@@ -496,6 +512,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -514,7 +531,7 @@ jobs:
|
||||
|
||||
- name: Install signing tool
|
||||
run: |
|
||||
go install ./cmd/stsigtool
|
||||
go install ./cmd/dev/stsigtool
|
||||
|
||||
- name: Sign archives
|
||||
run: |
|
||||
@@ -529,30 +546,44 @@ jobs:
|
||||
env:
|
||||
STSIGTOOL_PRIVATE_KEY: ${{ secrets.STSIGTOOL_PRIVATE_KEY }}
|
||||
|
||||
- name: Create and sign .asc files
|
||||
- name: Create shasum files
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt -y install gnupg
|
||||
|
||||
export SIGNING_KEY="$RUNNER_TEMP/gpg-secret.asc"
|
||||
echo "$GNUPG_SIGNING_KEY_BASE64" | base64 -d > "$SIGNING_KEY"
|
||||
gpg --import < "$SIGNING_KEY"
|
||||
|
||||
pushd packages
|
||||
files=(*.tar.gz *.zip)
|
||||
sha1sum "${files[@]}" | gpg --clearsign > sha1sum.txt.asc
|
||||
sha256sum "${files[@]}" | gpg --clearsign > sha256sum.txt.asc
|
||||
gpg --sign --armour --detach syncthing-source-*.tar.gz
|
||||
sha1sum "${files[@]}" > sha1sum.txt
|
||||
sha256sum "${files[@]}" > sha256sum.txt
|
||||
popd
|
||||
rm -f "$SIGNING_KEY" .gnupg
|
||||
|
||||
version=$(go run build.go version)
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign shasum files
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign
|
||||
packages/sha1sum.txt packages/sha256sum.txt
|
||||
env:
|
||||
GNUPG_SIGNING_KEY_BASE64: ${{ secrets.GNUPG_SIGNING_KEY_BASE64 }}
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Sign source
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign --detach --ascii
|
||||
packages/syncthing-source-${{ env.VERSION }}.tar.gz
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-signed
|
||||
path: packages/*
|
||||
path: |
|
||||
packages/*.tar.gz
|
||||
packages/*.zip
|
||||
packages/*.asc
|
||||
packages/*.json
|
||||
|
||||
#
|
||||
# Debian
|
||||
@@ -565,6 +596,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -595,7 +627,9 @@ jobs:
|
||||
- name: Package for Debian
|
||||
run: |
|
||||
for arch in amd64 i386 armhf armel arm64 ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb "$tgt"
|
||||
done
|
||||
done
|
||||
env:
|
||||
BUILD_USER: debian
|
||||
@@ -613,10 +647,9 @@ jobs:
|
||||
publish-nightly:
|
||||
name: Publish nightly build
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
|
||||
environment: signing
|
||||
environment: release
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- notarize-macos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -663,8 +696,8 @@ jobs:
|
||||
|
||||
publish-release-files:
|
||||
name: Publish release files
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- package-debian
|
||||
@@ -673,6 +706,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download signed packages
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -723,6 +757,84 @@ jobs:
|
||||
with:
|
||||
args: sync objstore:${{ secrets.S3_BUCKET }}/release/${{ env.VERSION }} objstore:${{ secrets.S3_BUCKET }}/release/latest
|
||||
|
||||
#
|
||||
# Push Debian/APT archive
|
||||
#
|
||||
|
||||
publish-apt:
|
||||
name: Publish APT
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-debian
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: debian-packages
|
||||
path: packages
|
||||
|
||||
- name: Set version
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "Version: $version"
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
# Decide whether packages should go to stable, candidate or nightly
|
||||
- name: Prepare packages
|
||||
run: |
|
||||
kind=stable
|
||||
if [[ $VERSION == *-rc.[0-9] ]] ; then
|
||||
kind=candidate
|
||||
elif [[ $VERSION == *-* ]] ; then
|
||||
kind=nightly
|
||||
fi
|
||||
echo "Kind: $kind"
|
||||
mkdir -p packages/syncthing/$kind
|
||||
mv packages/*.deb packages/syncthing/$kind
|
||||
|
||||
- name: Pull archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync objstore:syncthing-apt/dists dists
|
||||
|
||||
- name: Update archive
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
publish
|
||||
--add packages
|
||||
--dists dists
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Push archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync dists -v objstore:syncthing-apt/dists
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
#
|
||||
@@ -730,8 +842,11 @@ jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: docker
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
@@ -752,6 +867,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -791,9 +907,18 @@ jobs:
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -805,13 +930,13 @@ jobs:
|
||||
echo Release version, pushing to :latest and version tags
|
||||
major=${version%.*.*}
|
||||
minor=${version%.*}
|
||||
tags=${{ matrix.image }}:$version,${{ matrix.image }}:$major,${{ matrix.image }}:$minor,${{ matrix.image }}:latest
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:$major,ghcr.io/${{ matrix.image }}:$major,docker.io/${{ matrix.image }}:$minor,ghcr.io/${{ matrix.image }}:$minor,docker.io/${{ matrix.image }}:latest,ghcr.io/${{ matrix.image }}:latest
|
||||
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
|
||||
echo Release candidate, pushing to :rc
|
||||
tags=${{ matrix.image }}:rc
|
||||
echo Release candidate, pushing to :rc and version tags
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:rc,ghcr.io/${{ matrix.image }}:rc
|
||||
else
|
||||
echo Development version, pushing to :edge
|
||||
tags=${{ matrix.image }}:edge
|
||||
tags=docker.io/${{ matrix.image }}:edge,ghcr.io/${{ matrix.image }}:edge
|
||||
fi
|
||||
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,3 +18,4 @@ deb
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
/compat.json
|
||||
|
||||
@@ -1,26 +1,39 @@
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- goimports
|
||||
- cyclop
|
||||
- depguard
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- funlen
|
||||
- wsl
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- nestif
|
||||
- nonamedreturns
|
||||
- paralleltest
|
||||
- protogetter
|
||||
- scopelint
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wsl
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
99
.policy.yml
Normal file
99
.policy.yml
Normal file
@@ -0,0 +1,99 @@
|
||||
# This is the policy-bot configuration for this repository. It controls
|
||||
# which approvals are required for any given pull request. The format is
|
||||
# described at https://github.com/palantir/policy-bot. The syntax of the
|
||||
# policy can be verified by the bot:
|
||||
# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml
|
||||
|
||||
# The policy below is what is required for any pull request.
|
||||
policy:
|
||||
approval:
|
||||
- subject is conventional commit
|
||||
- project metadata requires maintainer approval
|
||||
- or:
|
||||
- is approved by a syncthing contributor
|
||||
- is a translation or dependency update by a contributor
|
||||
- is a trivial change by a contributor
|
||||
|
||||
# Additionally, contributors can disapprove of a PR
|
||||
disapproval:
|
||||
requires:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# The rules for the policy are described below.
|
||||
|
||||
approval_rules:
|
||||
|
||||
# All commits (PRs before squashing) should have a valid conventional
|
||||
# commit type subject.
|
||||
- name: subject is conventional commit
|
||||
requires:
|
||||
conditions:
|
||||
title:
|
||||
matches:
|
||||
- '^(feat|fix|docs|chore|refactor|build): [a-z].+'
|
||||
- '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'
|
||||
|
||||
# Changes to important project metadata and documentation, including this
|
||||
# policy, require signoff by a maintainer
|
||||
- name: project metadata requires maintainer approval
|
||||
if:
|
||||
changed_files:
|
||||
paths:
|
||||
- ^[^/]+\.md
|
||||
- ^\.policy\.yml
|
||||
- ^\.github/
|
||||
- ^LICENSE
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Regular pull requests require approval by an active contributor
|
||||
- name: is approved by a syncthing contributor
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Changes to some files (translations, dependencies, compatibility) do not
|
||||
# require approval if they were proposed by a contributor and have a
|
||||
# matching commit subject
|
||||
- name: is a translation or dependency update by a contributor
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- ^gui/default/assets/lang/
|
||||
- ^go\.mod$
|
||||
- ^go\.sum$
|
||||
- ^compat\.yaml$
|
||||
title:
|
||||
matches:
|
||||
- '^chore\(gui\):'
|
||||
- '^build\(deps\):'
|
||||
- '^build\(compat\):'
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# If the change is small and the label "trivial" is added, we accept that
|
||||
# on trust. These PRs can be audited after the fact as appropriate.
|
||||
# Features are not trivial.
|
||||
- name: is a trivial change by a contributor
|
||||
if:
|
||||
modified_lines:
|
||||
total: "< 25"
|
||||
title:
|
||||
not_matches:
|
||||
- '^feat'
|
||||
has_labels:
|
||||
- trivial
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
10
AUTHORS
10
AUTHORS
@@ -20,6 +20,7 @@ Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Ionescu <github@ionescu.sh>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
@@ -141,10 +142,12 @@ greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
guangwu <guoguangwu@magic-shield.com>
|
||||
gudvinr <gudvinr@gmail.com>
|
||||
Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hireworks <129852174+hireworksltd@users.noreply.github.com>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
@@ -188,6 +191,7 @@ Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Kapil Sareen <kapilsareen584@gmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Harrison <keithh@protonmail.com>
|
||||
@@ -232,6 +236,7 @@ Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
maxice8 <30738253+maxice8@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
|
||||
@@ -305,7 +310,9 @@ Severin von Wnuck-Lipinski <ss7@live.de>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Simon Pickup <simon@pickupinfinity.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Sonu Kumar Saw <31889738+dev-saw99@users.noreply.github.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
@@ -314,17 +321,20 @@ Sven Bachmann <dev@mcbachmann.de>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tim Nordenfur <tim@gurka.se>
|
||||
Tobias Frölich <40638719+tobifroe@users.noreply.github.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tommy van der Vorst <tommy-github@pixelspark.nl> <tommy@pixelspark.nl>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
|
||||
@@ -49,6 +49,11 @@ services:
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Discovery
|
||||
@@ -84,6 +89,11 @@ services:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
|
||||
12
README.md
12
README.md
@@ -82,13 +82,11 @@ build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
As of v0.10.15 and onwards, release binaries are GPG signed with the key
|
||||
D26E6ED000654A3E, available from https://syncthing.net/security/ and
|
||||
most key servers.
|
||||
|
||||
There is also a built-in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
Release binaries are GPG signed with the key available from
|
||||
https://syncthing.net/security/. There is also a built-in automatic
|
||||
upgrade mechanism (disabled in some distribution channels) which uses a
|
||||
compiled in ECDSA signature. macOS and Windows binaries are also
|
||||
code-signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
12
buf.gen.yaml
Normal file
12
buf.gen.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: v2
|
||||
managed:
|
||||
enabled: true
|
||||
override:
|
||||
- file_option: go_package_prefix
|
||||
value: github.com/syncthing/syncthing/internal/gen
|
||||
plugins:
|
||||
- remote: buf.build/protocolbuffers/go:v1.35.1
|
||||
out: .
|
||||
opt: module=github.com/syncthing/syncthing
|
||||
inputs:
|
||||
- directory: proto
|
||||
10
buf.yaml
Normal file
10
buf.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
version: v2
|
||||
modules:
|
||||
- path: proto
|
||||
name: github.com/syncthing/syncthing
|
||||
lint:
|
||||
use:
|
||||
- STANDARD
|
||||
breaking:
|
||||
use:
|
||||
- WIRE_JSON
|
||||
232
build.go
232
build.go
@@ -4,8 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package main
|
||||
|
||||
@@ -33,8 +33,9 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
buildpkg "github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -85,7 +86,6 @@ var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff. buildPkgs gets filled out in init()
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
@@ -96,41 +96,40 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0o644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0o644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0o644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0o644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0o644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0o644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0o644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0o644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0o644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0o644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0o644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0o644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
@@ -142,23 +141,22 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stdiscosrv"},
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0o644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"strelaysrv": {
|
||||
name: "strelaysrv",
|
||||
@@ -169,61 +167,47 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaysrv"},
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
name: "strelaypoolsrv",
|
||||
debname: "syncthing-relaypoolsrv",
|
||||
debdeps: []string{"libc6"},
|
||||
description: "Syncthing Relay Pool Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv",
|
||||
},
|
||||
"stupgrades": {
|
||||
name: "stupgrades",
|
||||
description: "Syncthing Upgrade Check Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stupgrades"},
|
||||
binaryName: "stupgrades",
|
||||
},
|
||||
"stcrashreceiver": {
|
||||
name: "stcrashreceiver",
|
||||
description: "Syncthing Crash Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stcrashreceiver"},
|
||||
binaryName: "stcrashreceiver",
|
||||
},
|
||||
"ursrv": {
|
||||
name: "ursrv",
|
||||
description: "Syncthing Usage Reporting Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/ursrv"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/ursrv"},
|
||||
binaryName: "ursrv",
|
||||
},
|
||||
}
|
||||
@@ -232,15 +216,11 @@ func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
pkg = filepath.Base(pkg)
|
||||
if strings.HasPrefix(pkg, ".") {
|
||||
// ignore dotfiles
|
||||
if files, err := filepath.Glob(pkg + "/*.go"); err != nil || len(files) == 0 {
|
||||
// No go files in the directory
|
||||
continue
|
||||
}
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
|
||||
@@ -248,13 +228,13 @@ func initTargets() {
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
for _, file := range listFiles("etc") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0o644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
@@ -344,9 +324,11 @@ func runCommand(cmd string, target target) {
|
||||
|
||||
case "tar":
|
||||
buildTar(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "zip":
|
||||
buildZip(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
@@ -407,7 +389,6 @@ func parseFlags() {
|
||||
func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-tags", strings.Join(tags, " ")}
|
||||
if long {
|
||||
timeout = longTimeout
|
||||
@@ -441,7 +422,7 @@ func bench(tags []string, pkgs ...string) {
|
||||
func integration(bench bool) {
|
||||
lazyRebuildAssets()
|
||||
args := []string{"test", "-v", "-timeout", "60m", "-tags"}
|
||||
tags := "purego,integration"
|
||||
tags := "integration"
|
||||
if bench {
|
||||
tags += ",benchmark"
|
||||
}
|
||||
@@ -752,7 +733,7 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0o644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
@@ -811,7 +792,7 @@ func copyFile(src, dst string, perm os.FileMode) error {
|
||||
}
|
||||
|
||||
copy:
|
||||
os.MkdirAll(filepath.Dir(dst), 0777)
|
||||
os.MkdirAll(filepath.Dir(dst), 0o777)
|
||||
if err := os.WriteFile(dst, in, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -836,12 +817,12 @@ func listFiles(dir string) []string {
|
||||
|
||||
func rebuildAssets() {
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
shouldRebuildAssets("cmd/infra/strelaypoolsrv/auto/gui.files.go", "cmd/infra/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
@@ -871,7 +852,7 @@ func buildNextGenGUI() bool {
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
if err := copyFile(src, dst, 0o644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -927,22 +908,9 @@ func updateDependencies() {
|
||||
}
|
||||
|
||||
func proto() {
|
||||
pv := protobufVersion()
|
||||
repo := "https://github.com/gogo/protobuf.git"
|
||||
path := filepath.Join("repos", "protobuf")
|
||||
|
||||
runPrint(goCmd, "install", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrint("git", "clone", repo, path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", pv)
|
||||
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
// buf needs to be installed
|
||||
// https://buf.build/docs/installation/
|
||||
runPrint("buf", "generate")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
@@ -1429,7 +1397,7 @@ func windowsCodesign(file string) {
|
||||
log.Println("Codesign: signing failed: creating temp file:", err)
|
||||
return
|
||||
}
|
||||
_ = f.Chmod(0600) // best effort remove other users' access
|
||||
_ = f.Chmod(0o600) // best effort remove other users' access
|
||||
defer os.Remove(f.Name())
|
||||
if _, err := f.Write(bs); err != nil {
|
||||
log.Println("Codesign: signing failed: writing temp file:", err)
|
||||
@@ -1485,14 +1453,6 @@ func (t target) BinaryName() string {
|
||||
return t.binaryName
|
||||
}
|
||||
|
||||
func protobufVersion() string {
|
||||
bs, err := runError(goCmd, "list", "-f", "{{.Version}}", "-m", "github.com/gogo/protobuf")
|
||||
if err != nil {
|
||||
log.Fatal("Getting protobuf version:", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
@@ -1559,3 +1519,29 @@ func nextPatchVersion(ver string) string {
|
||||
digits[len(digits)-1] = strconv.Itoa(n + 1)
|
||||
return strings.Join(digits, ".")
|
||||
}
|
||||
|
||||
func writeCompatJSON() {
|
||||
bs, err := os.ReadFile("compat.yaml")
|
||||
if err != nil {
|
||||
log.Fatal("Reading compat.yaml:", err)
|
||||
}
|
||||
|
||||
var entries []upgrade.ReleaseCompatibility
|
||||
if err := yaml.Unmarshal(bs, &entries); err != nil {
|
||||
log.Fatal("Parsing compat.yaml:", err)
|
||||
}
|
||||
|
||||
rt := runtime.Version()
|
||||
for _, e := range entries {
|
||||
if !strings.HasPrefix(rt, e.Runtime) {
|
||||
continue
|
||||
}
|
||||
bs, _ := json.MarshalIndent(e, "", " ")
|
||||
if err := os.WriteFile("compat.json", bs, 0o644); err != nil {
|
||||
log.Fatal("Writing compat.json:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Fatalf("runtime %v not found in compat.yaml", rt)
|
||||
}
|
||||
|
||||
2
build.sh
2
build.sh
@@ -26,7 +26,7 @@ case "${1:-default}" in
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
git commit -m 'chore(gui, man, authors): update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discoproto"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
@@ -75,20 +78,21 @@ func recv(bc beacon.Interface) {
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
var ann discoproto.Announce
|
||||
proto.Unmarshal(data[4:], &ann)
|
||||
|
||||
if ann.ID == myID {
|
||||
id, _ := protocol.DeviceIDFromBytes(ann.Id)
|
||||
if id == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := ann.ID.String() + src.String()
|
||||
key := id.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
log.Printf(" %v at %s\n", id, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
@@ -96,11 +100,11 @@ func recv(bc beacon.Interface) {
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
ann := &discoproto.Announce{
|
||||
Id: myID[:],
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.Marshal()
|
||||
bs, _ := proto.Marshal(ann)
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -14,6 +14,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -21,25 +22,29 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
type cli struct {
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
MetricsListen string `help:"HTTP listen address for metrics" default:":8081" env:"METRICS_LISTEN_ADDRESS"`
|
||||
IngorePatterns string `help:"File containing ignore patterns (regexp)" env:"IGNORE_PATTERNS" type:"existingfile"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -62,19 +67,38 @@ func main() {
|
||||
}
|
||||
go ss.Serve(context.Background())
|
||||
|
||||
var ip *ignorePatterns
|
||||
if params.IngorePatterns != "" {
|
||||
var err error
|
||||
ip, err = loadIgnorePatterns(params.IngorePatterns)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ignore patterns: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cr := &crashReceiver{
|
||||
store: ds,
|
||||
sentry: ss,
|
||||
ignore: ip,
|
||||
}
|
||||
|
||||
mux.Handle("/", cr)
|
||||
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
if params.MetricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
if err := http.ListenAndServe(params.MetricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if params.DSN != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports")))
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"), ip))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
@@ -83,7 +107,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
|
||||
func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
result := "failure"
|
||||
defer func() {
|
||||
@@ -98,6 +122,11 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
if ignore.match(bs) {
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
var reports []ur.FailureReport
|
||||
err = json.Unmarshal(bs, &reports)
|
||||
if err != nil {
|
||||
@@ -110,7 +139,7 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
version, err := parseVersion(reports[0].Version)
|
||||
version, err := build.ParseVersion(reports[0].Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
@@ -158,3 +187,42 @@ func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string,
|
||||
}
|
||||
return reportServer + path, nil
|
||||
}
|
||||
|
||||
type ignorePatterns struct {
|
||||
patterns []*regexp.Regexp
|
||||
}
|
||||
|
||||
func loadIgnorePatterns(path string) (*ignorePatterns, error) {
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var patterns []*regexp.Regexp
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
re, err := regexp.Compile(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patterns = append(patterns, re)
|
||||
}
|
||||
|
||||
log.Printf("Loaded %d ignore patterns", len(patterns))
|
||||
return &ignorePatterns{patterns: patterns}, nil
|
||||
}
|
||||
|
||||
func (i *ignorePatterns) match(report []byte) bool {
|
||||
if i == nil {
|
||||
return false
|
||||
}
|
||||
for _, re := range i.patterns {
|
||||
if re.Match(report) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/v2/stack"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
@@ -105,7 +106,7 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
return nil, errors.New("no first line")
|
||||
}
|
||||
|
||||
version, err := parseVersion(string(parts[0]))
|
||||
version, err := build.ParseVersion(string(parts[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -143,12 +144,12 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.commit != "" {
|
||||
if version.Commit != "" {
|
||||
// We have a commit hash, so we know exactly which source to use
|
||||
loader.LockWithVersion(version.commit)
|
||||
} else if strings.HasPrefix(version.tag, "v") {
|
||||
loader.LockWithVersion(version.Commit)
|
||||
} else if strings.HasPrefix(version.Tag, "v") {
|
||||
// Lets hope the tag is close enough
|
||||
loader.LockWithVersion(version.tag)
|
||||
loader.LockWithVersion(version.Tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("main")
|
||||
@@ -215,106 +216,26 @@ func crashReportFingerprint(message string) []string {
|
||||
return []string{"{{ default }}", message}
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
// or, somewhere along the way the "+" in the version tag disappeared:
|
||||
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
|
||||
var (
|
||||
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
|
||||
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
|
||||
)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
if v.commit != "" {
|
||||
return "Development"
|
||||
}
|
||||
if strings.Contains(v.tag, "-rc.") {
|
||||
return "Candidate"
|
||||
}
|
||||
if strings.Contains(v.tag, "-") {
|
||||
return "Beta"
|
||||
}
|
||||
return "Stable"
|
||||
}
|
||||
|
||||
func parseVersion(line string) (version, error) {
|
||||
m := longVersionRE.FindStringSubmatch(line)
|
||||
if len(m) == 0 {
|
||||
return version{}, errors.New("unintelligeble version string")
|
||||
}
|
||||
|
||||
v := version{
|
||||
version: m[1],
|
||||
codename: m[2],
|
||||
runtime: m[3],
|
||||
goos: m[4],
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
// Split the version tag into tag and commit. This is old style
|
||||
// v1.2.3-something.4+11-g12345678 or newer with just dots
|
||||
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
|
||||
parts := []string{v.version}
|
||||
if strings.Contains(v.version, "+") {
|
||||
parts = strings.Split(v.version, "+")
|
||||
} else {
|
||||
idxs := gitExtraRE.FindStringIndex(v.version)
|
||||
if len(idxs) > 0 {
|
||||
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
|
||||
}
|
||||
}
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
fields := gitExtraSepRE.Split(parts[1], -1)
|
||||
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
|
||||
v.commit = fields[1][1:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
func packet(version build.VersionParts, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Release: version.Tag,
|
||||
Environment: version.Environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "version", Value: version.Version},
|
||||
raven.Tag{Key: "tag", Value: version.Tag},
|
||||
raven.Tag{Key: "codename", Value: version.Codename},
|
||||
raven.Tag{Key: "runtime", Value: version.Runtime},
|
||||
raven.Tag{Key: "goos", Value: version.GOOS},
|
||||
raven.Tag{Key: "goarch", Value: version.GOARCH},
|
||||
raven.Tag{Key: "builder", Value: version.Builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
if version.Commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.Commit})
|
||||
}
|
||||
for _, tag := range version.extra {
|
||||
for _, tag := range version.Extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
return pkt
|
||||
@@ -12,66 +12,6 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
longVersion string
|
||||
parsed version
|
||||
}{
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
|
||||
parsed: version{
|
||||
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
|
||||
tag: "v1.23.7-dev",
|
||||
commit: "df7b56ae",
|
||||
codename: "Fermium Flea",
|
||||
runtime: "go1.20.5",
|
||||
goos: "darwin",
|
||||
goarch: "arm64",
|
||||
builder: "jb@ok.kastelo.net",
|
||||
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := os.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
@@ -71,7 +71,6 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
@@ -12,11 +12,16 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type crashReceiver struct {
|
||||
store *diskStore
|
||||
sentry *sentryService
|
||||
ignore *ignorePatterns
|
||||
|
||||
ignoredMut sync.RWMutex
|
||||
ignored map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -64,6 +69,12 @@ func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
return // found
|
||||
}
|
||||
if !r.store.Exists(reportID) {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
@@ -76,6 +87,15 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
metricCrashReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
result = "ignored_cached"
|
||||
io.Copy(io.Discard, req.Body)
|
||||
return // found
|
||||
}
|
||||
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
@@ -86,6 +106,17 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
if r.ignore.match(bs) {
|
||||
r.ignoredMut.Lock()
|
||||
if r.ignored == nil {
|
||||
r.ignored = make(map[string]struct{})
|
||||
}
|
||||
r.ignored[reportID] = struct{}{}
|
||||
r.ignoredMut.Unlock()
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
result = "success"
|
||||
|
||||
// Store the report
|
||||
@@ -9,14 +9,13 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
@@ -53,5 +52,5 @@ func compressAndWrite(bs []byte, fullPath string) error {
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0o644)
|
||||
}
|
||||
@@ -21,4 +21,3 @@ See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
//go:generate go run ../../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -259,7 +259,7 @@
|
||||
return a.value > b.value ? 1 : -1;
|
||||
}
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$http.get("/endpoint/full").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
@@ -338,7 +338,7 @@
|
||||
relay.showMarker = function() {
|
||||
relay.marker.openPopup();
|
||||
}
|
||||
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.closePopup();
|
||||
}
|
||||
@@ -347,7 +347,7 @@
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
console.log(relay.location.latitude)
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
{
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
|
||||
color: "FF0000",
|
||||
@@ -23,13 +23,12 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -51,6 +50,10 @@ type relay struct {
|
||||
StatsRetrieved time.Time `json:"statsRetrieved"`
|
||||
}
|
||||
|
||||
type relayShort struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
UptimeSeconds int `json:"uptimeSeconds"`
|
||||
@@ -95,6 +98,7 @@ var (
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
metricsListen = ":8081"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
@@ -125,6 +129,7 @@ func main() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
@@ -218,15 +223,40 @@ func main() {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second))
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
if metricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.HandleFunc("/metrics", handleMetrics)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
getMux := http.NewServeMux()
|
||||
getMux.HandleFunc("/", handleAssets)
|
||||
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
|
||||
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
|
||||
|
||||
postMux := http.NewServeMux()
|
||||
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead, http.MethodOptions:
|
||||
getMux.ServeHTTP(w, r)
|
||||
case http.MethodPost:
|
||||
postMux.ServeHTTP(w, r)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
@@ -260,39 +290,24 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
// handleEndpointFull returns the relay list with full metadata and
|
||||
// statistics. Large, and expensive.
|
||||
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
||||
@@ -300,17 +315,38 @@ func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
copy(relays[n:], knownRelays)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
// handleEndpointShort returns the relay list with only the URL.
|
||||
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
|
||||
for _, r := range append(permanentRelays, knownRelays...) {
|
||||
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
||||
}
|
||||
mut.RUnlock()
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handleRegister(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
rhost = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
@@ -660,3 +696,16 @@ func (b *errorTracker) IsBlocked(host string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func slimURL(u string) string {
|
||||
p, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return u
|
||||
}
|
||||
newQuery := url.Values{}
|
||||
if id := p.Query().Get("id"); id != "" {
|
||||
newQuery.Set("id", id)
|
||||
}
|
||||
p.RawQuery = newQuery.Encode()
|
||||
return p.String()
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestHandleGetRequest(t *testing.T) {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = new(bytes.Buffer)
|
||||
handleGetRequest(w, httptest.NewRequest("GET", "/", nil))
|
||||
handleEndpointFull(w, httptest.NewRequest("GET", "/", nil))
|
||||
|
||||
result := make(map[string][]*relay)
|
||||
err := json.NewDecoder(w.Body).Decode(&result)
|
||||
@@ -92,3 +92,18 @@ func TestCanonicalizeQueryValues(t *testing.T) {
|
||||
t.Errorf("expected %q, got %q", exp, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlimURL(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"http://example.com/", "http://example.com/"},
|
||||
{"relay://192.0.2.42:22067/?globalLimitBps=0&id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M&networkTimeout=2m0s&pingInterval=1m0s&providedBy=Test&sessionLimitBps=0&statusAddr=%3A22070", "relay://192.0.2.42:22067/?id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if got := slimURL(c.in); got != c.out {
|
||||
t.Errorf("expected %q, got %q", c.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,27 +6,12 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
statusClient = http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
351
cmd/infra/stupgrades/main.go
Normal file
351
cmd/infra/stupgrades/main.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
MetricsListen string `default:":8082" help:"Listen address for metrics"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
if params.MetricsListen != "" {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
metricsListen, err := net.Listen("tcp", params.MetricsListen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metrics: %w", err)
|
||||
}
|
||||
slog.Info("Metrics listener started", "addr", params.MetricsListen)
|
||||
go func() {
|
||||
if err := http.Serve(metricsListen, mux); err != nil {
|
||||
slog.Warn("Metrics server returned", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
cache := &cachedReleases{url: params.URL}
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
return fmt.Errorf("initial cache update: %w", err)
|
||||
} else {
|
||||
slog.Info("Initial cache update done")
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.NewTicker(params.CacheTime).C {
|
||||
slog.Info("Refreshing cached releases", "url", params.URL)
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
slog.Error("Failed to refresh cached releases", "url", params.URL, "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ghRels := &githubReleases{cache: cache}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/ping", ghRels.servePing)
|
||||
mux.HandleFunc("/meta.json", ghRels.serveReleases)
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
slog.Info("Forwarding", "from", path, "to", url)
|
||||
name := strings.ReplaceAll(path, "/", "_")
|
||||
mux.Handle(path, httpcache.SinglePath(&proxy{name: name, url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: params.Listen,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
srvListener, err := net.Listen("tcp", params.Listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
slog.Info("Main listener started", "addr", params.Listen)
|
||||
|
||||
return srv.Serve(srvListener)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
cache *cachedReleases
|
||||
}
|
||||
|
||||
func (p *githubReleases) servePing(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
if len(rels) == 0 {
|
||||
http.Error(w, "No releases available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Syncthing-Num-Releases", strconv.Itoa(len(rels)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *githubReleases) serveReleases(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
ua := req.Header.Get("User-Agent")
|
||||
osv := req.Header.Get("Syncthing-Os-Version")
|
||||
if ua != "" && osv != "" {
|
||||
// We should determine the compatibility of the releases.
|
||||
rels = filterForCompabitility(rels, ua, osv)
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("no-ua-or-osversion").Inc()
|
||||
}
|
||||
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Vary", "User-Agent, Syncthing-Os-Version")
|
||||
_ = json.NewEncoder(w).Encode(rels)
|
||||
|
||||
metricUpgradeChecks.Inc()
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
name string
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
metricHTTPRequests.WithLabelValues(p.name, "success").Inc()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
var userAgentOSArchExp = regexp.MustCompile(`^syncthing.*\(.+ (\w+)-(\w+)\)$`)
|
||||
|
||||
func filterForCompabitility(rels []upgrade.Release, ua, osv string) []upgrade.Release {
|
||||
osArch := userAgentOSArchExp.FindStringSubmatch(ua)
|
||||
if len(osArch) != 3 {
|
||||
metricFilterCalls.WithLabelValues("bad-os-arch").Inc()
|
||||
return rels
|
||||
}
|
||||
os := osArch[1]
|
||||
|
||||
var filtered []upgrade.Release
|
||||
for _, rel := range rels {
|
||||
if rel.Compatibility == nil {
|
||||
// No requirements means it's compatible with everything.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
req, ok := rel.Compatibility.Requirements[os]
|
||||
if !ok {
|
||||
// No entry for the current OS means it's compatible.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(osv, req) >= 0 {
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(filtered) != len(rels) {
|
||||
metricFilterCalls.WithLabelValues("filtered").Inc()
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("unchanged").Inc()
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
type cachedReleases struct {
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Releases() []upgrade.Release {
|
||||
c.mut.RLock()
|
||||
defer c.mut.RUnlock()
|
||||
return c.current
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Update(ctx context.Context) error {
|
||||
rels, err := fetchGithubReleases(ctx, c.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.mut.Lock()
|
||||
c.current = rels
|
||||
c.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGithubReleases(ctx context.Context, url string) ([]upgrade.Release, error) {
|
||||
req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var rels []upgrade.Release
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rels); err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "success").Inc()
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
addReleaseCompatibility(ctx, rels)
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
return rels, nil
|
||||
}
|
||||
|
||||
func addReleaseCompatibility(ctx context.Context, rels []upgrade.Release) {
|
||||
for i := range rels {
|
||||
rel := &rels[i]
|
||||
for i, asset := range rel.Assets {
|
||||
if asset.Name != "compat.json" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Load compat.json into the Compatibility field
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, asset.URL, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
_ = json.NewDecoder(io.LimitReader(resp.Body, 10<<10)).Decode(&rel.Compatibility)
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "success").Inc()
|
||||
resp.Body.Close()
|
||||
|
||||
// Remove compat.json from the asset list since it's been processed
|
||||
rel.Assets = append(rel.Assets[:i], rel.Assets[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
30
cmd/infra/stupgrades/metrics.go
Normal file
30
cmd/infra/stupgrades/metrics.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricUpgradeChecks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "metadata_requests",
|
||||
})
|
||||
metricFilterCalls = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "filter_calls",
|
||||
}, []string{"result"})
|
||||
metricHTTPRequests = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "http_requests",
|
||||
}, []string{"target", "result"})
|
||||
)
|
||||
@@ -8,22 +8,22 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/aggregate"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/serve"
|
||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
Aggregate aggregate.CLI `cmd:""`
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
|
||||
log.SetOutput(os.Stdout)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
var cli CLI
|
||||
ctx := kong.Parse(&cli)
|
||||
46
cmd/infra/ursrv/serve/metrics.go
Normal file
46
cmd/infra/ursrv/serve/metrics.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "incoming_reports_total",
|
||||
}, []string{"result"})
|
||||
metricsCollectsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collects_total",
|
||||
})
|
||||
metricsCollectSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_total",
|
||||
})
|
||||
metricsCollectSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_last",
|
||||
})
|
||||
metricsWriteSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "write_seconds_last",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("replace")
|
||||
metricReportsTotal.WithLabelValues("accept")
|
||||
}
|
||||
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
@@ -0,0 +1,314 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
const namePrefix = "syncthing_usage_"
|
||||
|
||||
type metricsSet struct {
|
||||
srv *server
|
||||
|
||||
gauges map[string]prometheus.Gauge
|
||||
gaugeVecs map[string]*prometheus.GaugeVec
|
||||
gaugeVecLabels map[string][]string
|
||||
summaries map[string]*metricSummary
|
||||
|
||||
collectMut sync.Mutex
|
||||
collectCutoff time.Duration
|
||||
}
|
||||
|
||||
func newMetricsSet(srv *server) *metricsSet {
|
||||
s := &metricsSet{
|
||||
srv: srv,
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
gaugeVecs: make(map[string]*prometheus.GaugeVec),
|
||||
gaugeVecLabels: make(map[string][]string),
|
||||
summaries: make(map[string]*metricSummary),
|
||||
collectCutoff: -24 * time.Hour,
|
||||
}
|
||||
|
||||
var initForType func(reflect.Type)
|
||||
initForType = func(t reflect.Type) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
initForType(field.Type)
|
||||
continue
|
||||
}
|
||||
name, typ, label := fieldNameTypeLabel(field)
|
||||
sname, labels := nameConstLabels(name)
|
||||
switch typ {
|
||||
case "gauge":
|
||||
s.gauges[name] = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: namePrefix + sname,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
case "summary":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, nil, labels)
|
||||
case "gaugeVec":
|
||||
s.gaugeVecLabels[name] = append(s.gaugeVecLabels[name], label)
|
||||
case "summaryVec":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, []string{label}, labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
initForType(reflect.ValueOf(contract.Report{}).Type())
|
||||
|
||||
for name, labels := range s.gaugeVecLabels {
|
||||
s.gaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: namePrefix + name,
|
||||
}, labels)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func fieldNameTypeLabel(rf reflect.StructField) (string, string, string) {
|
||||
metric := rf.Tag.Get("metric")
|
||||
name, typ, ok := strings.Cut(metric, ",")
|
||||
if !ok {
|
||||
return "", "", ""
|
||||
}
|
||||
gv, label, ok := strings.Cut(typ, ":")
|
||||
if ok {
|
||||
typ = gv
|
||||
}
|
||||
return name, typ, label
|
||||
}
|
||||
|
||||
func nameConstLabels(name string) (string, prometheus.Labels) {
|
||||
if name == "-" {
|
||||
return "", nil
|
||||
}
|
||||
name, labels, ok := strings.Cut(name, "{")
|
||||
if !ok {
|
||||
return name, nil
|
||||
}
|
||||
lls := strings.Split(labels[:len(labels)-1], ",")
|
||||
m := make(map[string]string)
|
||||
for _, l := range lls {
|
||||
k, v, _ := strings.Cut(l, "=")
|
||||
m[k] = v
|
||||
}
|
||||
return name, m
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReport(r *contract.Report) {
|
||||
gaugeVecs := make(map[string][]string)
|
||||
s.addReportStruct(reflect.ValueOf(r).Elem(), gaugeVecs)
|
||||
for name, lv := range gaugeVecs {
|
||||
s.gaugeVecs[name].WithLabelValues(lv...).Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReportStruct(v reflect.Value, gaugeVecs map[string][]string) {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Kind() == reflect.Struct {
|
||||
s.addReportStruct(field, gaugeVecs)
|
||||
continue
|
||||
}
|
||||
|
||||
name, typ, label := fieldNameTypeLabel(t.Field(i))
|
||||
switch typ {
|
||||
case "gauge":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.gauges[name].Add(float64(v))
|
||||
case string:
|
||||
s.gaugeVecs[name].WithLabelValues(v).Add(1)
|
||||
case bool:
|
||||
if v {
|
||||
s.gauges[name].Add(1)
|
||||
}
|
||||
}
|
||||
case "gaugeVec":
|
||||
var labelValue string
|
||||
switch v := field.Interface().(type) {
|
||||
case string:
|
||||
labelValue = v
|
||||
case int:
|
||||
labelValue = strconv.Itoa(v)
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
labelValue = k
|
||||
field.SetInt(int64(v))
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := gaugeVecs[name]; !ok {
|
||||
gaugeVecs[name] = make([]string, len(s.gaugeVecLabels[name]))
|
||||
}
|
||||
for i, l := range s.gaugeVecLabels[name] {
|
||||
if l == label {
|
||||
gaugeVecs[name][i] = labelValue
|
||||
break
|
||||
}
|
||||
}
|
||||
case "summary", "summaryVec":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
case float64:
|
||||
s.summaries[name].Observe("", v)
|
||||
case []int:
|
||||
for _, v := range v {
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
}
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
if k == "" {
|
||||
// avoid empty string labels as those are the sign
|
||||
// of a non-vec summary
|
||||
k = "unknown"
|
||||
}
|
||||
s.summaries[name].Observe(k, float64(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Describe(c chan<- *prometheus.Desc) {
|
||||
for _, g := range s.gauges {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Describe(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Collect(c chan<- prometheus.Metric) {
|
||||
s.collectMut.Lock()
|
||||
defer s.collectMut.Unlock()
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
dur := time.Since(t0).Seconds()
|
||||
metricsCollectSecondsLast.Set(dur)
|
||||
metricsCollectSecondsTotal.Add(dur)
|
||||
metricsCollectsTotal.Inc()
|
||||
}()
|
||||
|
||||
for _, g := range s.gauges {
|
||||
g.Set(0)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Reset()
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Reset()
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(s.collectCutoff)
|
||||
s.srv.reports.Range(func(key string, r *contract.Report) bool {
|
||||
if s.collectCutoff < 0 && r.Received.Before(cutoff) {
|
||||
s.srv.reports.Delete(key)
|
||||
return true
|
||||
}
|
||||
s.addReport(r)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, g := range s.gauges {
|
||||
c <- g
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Collect(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Collect(c)
|
||||
}
|
||||
}
|
||||
|
||||
type metricSummary struct {
|
||||
name string
|
||||
values map[string][]float64
|
||||
zeroes map[string]int
|
||||
|
||||
qDesc *prometheus.Desc
|
||||
countDesc *prometheus.Desc
|
||||
sumDesc *prometheus.Desc
|
||||
zDesc *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMetricSummary(name string, labels []string, constLabels prometheus.Labels) *metricSummary {
|
||||
return &metricSummary{
|
||||
name: name,
|
||||
values: make(map[string][]float64),
|
||||
zeroes: make(map[string]int),
|
||||
qDesc: prometheus.NewDesc(name, "", append(labels, "quantile"), constLabels),
|
||||
countDesc: prometheus.NewDesc(name+"_nonzero_count", "", labels, constLabels),
|
||||
sumDesc: prometheus.NewDesc(name+"_sum", "", labels, constLabels),
|
||||
zDesc: prometheus.NewDesc(name+"_zero_count", "", labels, constLabels),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Observe(labelValue string, v float64) {
|
||||
if v == 0 {
|
||||
q.zeroes[labelValue]++
|
||||
return
|
||||
}
|
||||
q.values[labelValue] = append(q.values[labelValue], v)
|
||||
}
|
||||
|
||||
func (q *metricSummary) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- q.qDesc
|
||||
c <- q.countDesc
|
||||
c <- q.sumDesc
|
||||
c <- q.zDesc
|
||||
}
|
||||
|
||||
func (q *metricSummary) Collect(c chan<- prometheus.Metric) {
|
||||
for lv, vs := range q.values {
|
||||
var labelVals []string
|
||||
if lv != "" {
|
||||
labelVals = []string{lv}
|
||||
}
|
||||
|
||||
c <- prometheus.MustNewConstMetric(q.countDesc, prometheus.GaugeValue, float64(len(vs)), labelVals...)
|
||||
c <- prometheus.MustNewConstMetric(q.zDesc, prometheus.GaugeValue, float64(q.zeroes[lv]), labelVals...)
|
||||
|
||||
var sum float64
|
||||
for _, v := range vs {
|
||||
sum += v
|
||||
}
|
||||
c <- prometheus.MustNewConstMetric(q.sumDesc, prometheus.GaugeValue, sum, labelVals...)
|
||||
|
||||
if len(vs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(vs)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[0], append(labelVals, "0")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*5/100], append(labelVals, "0.05")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)/2], append(labelVals, "0.5")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*9/10], append(labelVals, "0.9")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*95/100], append(labelVals, "0.95")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)-1], append(labelVals, "1")...)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Reset() {
|
||||
clear(q.values)
|
||||
clear(q.zeroes)
|
||||
}
|
||||
406
cmd/infra/ursrv/serve/serve.go
Normal file
406
cmd/infra/ursrv/serve/serve.go
Normal file
@@ -0,0 +1,406 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Listen string `env:"UR_LISTEN" help:"Usage reporting & metrics endpoint listen address" default:"0.0.0.0:8080"`
|
||||
ListenInternal string `env:"UR_LISTEN_INTERNAL" help:"Internal metrics endpoint listen address" default:"0.0.0.0:8082"`
|
||||
GeoIPLicenseKey string `env:"UR_GEOIP_LICENSE_KEY"`
|
||||
GeoIPAccountID int `env:"UR_GEOIP_ACCOUNT_ID"`
|
||||
DumpFile string `env:"UR_DUMP_FILE" default:"reports.jsons.gz"`
|
||||
DumpInterval time.Duration `env:"UR_DUMP_INTERVAL" default:"5m"`
|
||||
|
||||
S3Endpoint string `name:"s3-endpoint" hidden:"true" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" hidden:"true" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" hidden:"true" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" hidden:"true" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" hidden:"true" env:"UR_S3_SECRET_KEY"`
|
||||
}
|
||||
|
||||
var (
|
||||
compilerRe = regexp.MustCompile(`\(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\w@.-]+)`)
|
||||
knownDistributions = []distributionMatch{
|
||||
// Maps well known builders to the official distribution method that
|
||||
// they represent
|
||||
|
||||
{regexp.MustCompile(`\steamcity@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sjenkins@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sbuilder@github\.syncthing\.net`), "GitHub"},
|
||||
|
||||
{regexp.MustCompile(`\sdeb@build\.syncthing\.net`), "APT"},
|
||||
{regexp.MustCompile(`\sdebian@github\.syncthing\.net`), "APT"},
|
||||
|
||||
{regexp.MustCompile(`\sdocker@syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@build.syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@github.syncthing\.net`), "Docker Hub"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-builder@github\.syncthing\.net`), "Google Play"},
|
||||
{regexp.MustCompile(`\sandroid-.*teamcity@build\.syncthing\.net`), "Google Play"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-.*vagrant@basebox-stretch64`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bullseye`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bookworm`), "F-Droid"},
|
||||
|
||||
{regexp.MustCompile(`Anwender@NET2017`), "Syncthing-Fork (3rd party)"},
|
||||
|
||||
{regexp.MustCompile(`\sbuilduser@(archlinux|svetlemodry)`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`\ssyncthing@archlinux`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`\sbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile(`\sroot@buildkitsandbox`), "LinuxServer.io (3rd party)"},
|
||||
{regexp.MustCompile(`\sports@freebsd`), "FreeBSD (3rd party)"},
|
||||
{regexp.MustCompile(`\snix@nix`), "Nix (3rd party)"},
|
||||
{regexp.MustCompile(`.`), "Others"},
|
||||
}
|
||||
)
|
||||
|
||||
type distributionMatch struct {
|
||||
matcher *regexp.Regexp
|
||||
distribution string
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
slog.Info("Starting", "version", build.Version)
|
||||
|
||||
// Listening
|
||||
|
||||
urListener, err := net.Listen("tcp", cli.Listen)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (usage reports)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (usage reports)", "address", urListener.Addr())
|
||||
|
||||
internalListener, err := net.Listen("tcp", cli.ListenInternal)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (internal)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (internal)", "address", internalListener.Addr())
|
||||
|
||||
var geo *geoip.Provider
|
||||
if cli.GeoIPAccountID != 0 && cli.GeoIPLicenseKey != "" {
|
||||
geo, err = geoip.NewGeoLite2CityProvider(context.Background(), cli.GeoIPAccountID, cli.GeoIPLicenseKey, os.TempDir())
|
||||
if err != nil {
|
||||
slog.Error("Failed to load GeoIP", "error", err)
|
||||
return err
|
||||
}
|
||||
go geo.Serve(context.TODO())
|
||||
}
|
||||
|
||||
// s3
|
||||
|
||||
var s3sess *s3.Session
|
||||
if cli.S3Endpoint != "" {
|
||||
s3sess, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 session", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && s3sess != nil {
|
||||
if err := cli.downloadDumpFile(s3sess); err != nil {
|
||||
slog.Error("Failed to download dump file", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// server
|
||||
|
||||
srv := &server{
|
||||
geo: geo,
|
||||
reports: xsync.NewMapOf[string, *contract.Report](),
|
||||
}
|
||||
|
||||
if fd, err := os.Open(cli.DumpFile); err == nil {
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err == nil {
|
||||
srv.load(gr)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.Tick(cli.DumpInterval) {
|
||||
if err := cli.saveDumpFile(srv, s3sess); err != nil {
|
||||
slog.Error("Failed to write dump file", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// The internal metrics endpoint just serves metrics about what the
|
||||
// server is doing.
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
internalSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
}
|
||||
go internalSrv.Serve(internalListener)
|
||||
|
||||
// New external metrics endpoint accepts reports from clients and serves
|
||||
// aggregated usage reporting metrics.
|
||||
|
||||
ms := newMetricsSet(srv)
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||
mux.HandleFunc("/newdata", srv.handleNewData)
|
||||
mux.HandleFunc("/ping", srv.handlePing)
|
||||
|
||||
metricsSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
slog.Info("Ready to serve")
|
||||
return metricsSrv.Serve(urListener)
|
||||
}
|
||||
|
||||
func (cli *CLI) downloadDumpFile(s3sess *s3.Session) error {
|
||||
latestKey, err := s3sess.LatestKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("list latest S3 key: %w", err)
|
||||
}
|
||||
fd, err := os.Create(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dump file: %w", err)
|
||||
}
|
||||
if err := s3sess.Download(fd, latestKey); err != nil {
|
||||
_ = fd.Close()
|
||||
return fmt.Errorf("download dump file: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("close dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file downloaded", "key", latestKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *CLI) saveDumpFile(srv *server, s3sess *s3.Session) error {
|
||||
fd, err := os.Create(cli.DumpFile + ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dump file: %w", err)
|
||||
}
|
||||
gw := gzip.NewWriter(fd)
|
||||
if err := srv.save(gw); err != nil {
|
||||
return fmt.Errorf("saving dump file: %w", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
fd.Close()
|
||||
return fmt.Errorf("closing gzip writer: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("closing dump file: %w", err)
|
||||
}
|
||||
if err := os.Rename(cli.DumpFile+".tmp", cli.DumpFile); err != nil {
|
||||
return fmt.Errorf("renaming dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file saved")
|
||||
|
||||
if s3sess != nil {
|
||||
key := fmt.Sprintf("reports-%s.jsons.gz", time.Now().UTC().Format("2006-01-02"))
|
||||
fd, err := os.Open(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening dump file: %w", err)
|
||||
}
|
||||
if err := s3sess.Upload(fd, key); err != nil {
|
||||
return fmt.Errorf("uploading dump file: %w", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
slog.Info("Dump file uploaded")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
geo *geoip.Provider
|
||||
reports *xsync.MapOf[string, *contract.Report]
|
||||
}
|
||||
|
||||
func (s *server) handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *server) handleNewData(w http.ResponseWriter, r *http.Request) {
|
||||
result := "fail"
|
||||
defer func() {
|
||||
// result is "accept" (new report), "replace" (existing report) or
|
||||
// "fail"
|
||||
metricReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Header.Get("X-Forwarded-For")
|
||||
if addr != "" {
|
||||
addr = strings.Split(addr, ", ")[0]
|
||||
} else {
|
||||
addr = r.RemoteAddr
|
||||
}
|
||||
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
log := slog.With("addr", addr)
|
||||
|
||||
if net.ParseIP(addr) == nil {
|
||||
addr = ""
|
||||
}
|
||||
|
||||
var rep contract.Report
|
||||
|
||||
lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
|
||||
bs, _ := io.ReadAll(lr)
|
||||
if err := json.Unmarshal(bs, &rep); err != nil {
|
||||
log.Error("Failed to decode JSON", "error", err)
|
||||
http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rep.Received = time.Now()
|
||||
rep.Date = rep.Received.UTC().Format("20060102")
|
||||
rep.Address = addr
|
||||
|
||||
if err := rep.Validate(); err != nil {
|
||||
log.Error("Failed to validate report", "error", err)
|
||||
http.Error(w, "Validation Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if s.addReport(&rep) {
|
||||
result = "replace"
|
||||
} else {
|
||||
result = "accept"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) addReport(rep *contract.Report) bool {
|
||||
if s.geo != nil {
|
||||
if ip := net.ParseIP(rep.Address); ip != nil {
|
||||
if city, err := s.geo.City(ip); err == nil {
|
||||
rep.Country = city.Country.Names["en"]
|
||||
rep.CountryCode = city.Country.IsoCode
|
||||
}
|
||||
}
|
||||
}
|
||||
if rep.Country == "" {
|
||||
rep.Country = "Unknown"
|
||||
}
|
||||
if rep.CountryCode == "" {
|
||||
rep.CountryCode = "ZZ"
|
||||
}
|
||||
|
||||
rep.Version = transformVersion(rep.Version)
|
||||
if strings.Contains(rep.Version, ".") {
|
||||
split := strings.SplitN(rep.Version, ".", 3)
|
||||
if len(split) == 3 {
|
||||
rep.MajorVersion = strings.Join(split[:2], ".")
|
||||
}
|
||||
}
|
||||
rep.OS, rep.Arch, _ = strings.Cut(rep.Platform, "-")
|
||||
|
||||
if m := compilerRe.FindStringSubmatch(rep.LongVersion); len(m) == 3 {
|
||||
rep.Compiler = m[1]
|
||||
rep.Builder = m[2]
|
||||
}
|
||||
for _, d := range knownDistributions {
|
||||
if d.matcher.MatchString(rep.LongVersion) {
|
||||
rep.Distribution = d.distribution
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_, loaded := s.reports.LoadAndStore(rep.UniqueID, rep)
|
||||
return loaded
|
||||
}
|
||||
|
||||
func (s *server) save(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
enc := json.NewEncoder(bw)
|
||||
var err error
|
||||
s.reports.Range(func(k string, v *contract.Report) bool {
|
||||
err = enc.Encode(v)
|
||||
return err == nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (s *server) load(r io.Reader) {
|
||||
dec := json.NewDecoder(r)
|
||||
s.reports.Clear()
|
||||
for {
|
||||
var rep contract.Report
|
||||
if err := dec.Decode(&rep); errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
slog.Error("Failed to load record", "error", err)
|
||||
break
|
||||
}
|
||||
s.addReport(&rep)
|
||||
}
|
||||
slog.Info("Loaded reports", "count", s.reports.Size())
|
||||
}
|
||||
|
||||
var (
|
||||
plusRe = regexp.MustCompile(`(\+.*|[.-]dev\..*)$`)
|
||||
plusStr = "-dev"
|
||||
)
|
||||
|
||||
// transformVersion returns a version number formatted correctly, with all
|
||||
// development versions aggregated into one.
|
||||
func transformVersion(v string) string {
|
||||
if v == "unknown-dev" {
|
||||
return v
|
||||
}
|
||||
if !strings.HasPrefix(v, "v") {
|
||||
v = "v" + v
|
||||
}
|
||||
v = plusRe.ReplaceAllString(v, plusStr)
|
||||
|
||||
return v
|
||||
}
|
||||
261
cmd/stdiscosrv/amqp.go
Normal file
261
cmd/stdiscosrv/amqp.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type amqpReplicator struct {
|
||||
suture.Service
|
||||
broker string
|
||||
sender *amqpSender
|
||||
receiver *amqpReceiver
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
svc := suture.New("amqpReplicator", suture.Spec{PassThroughPanics: true})
|
||||
|
||||
sender := &amqpSender{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
svc.Add(sender)
|
||||
|
||||
receiver := &amqpReceiver{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
db: db,
|
||||
}
|
||||
svc.Add(receiver)
|
||||
|
||||
return &amqpReplicator{
|
||||
Service: svc,
|
||||
broker: broker,
|
||||
sender: sender,
|
||||
receiver: receiver,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReplicator) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
s.sender.send(key, ps, seen)
|
||||
}
|
||||
|
||||
type amqpSender struct {
|
||||
broker string
|
||||
clientID string
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func (s *amqpSender) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case rec := <-s.outbox:
|
||||
size := proto.Size(rec)
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
n, err := protoutil.MarshalTo(buf, rec)
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication marshal: %w", err)
|
||||
}
|
||||
|
||||
err = ch.PublishWithContext(ctx,
|
||||
"discovery", // exchange
|
||||
"", // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
ContentType: "application/protobuf",
|
||||
Body: buf[:n],
|
||||
AppId: s.clientID,
|
||||
})
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication publish: %w", err)
|
||||
}
|
||||
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpSender) String() string {
|
||||
return fmt.Sprintf("amqpSender(%q)", s.broker)
|
||||
}
|
||||
|
||||
func (s *amqpSender) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
item := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: ps,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
type amqpReceiver struct {
|
||||
broker string
|
||||
clientID string
|
||||
db database
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
msgs, err := amqpConsume(ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-msgs:
|
||||
if !ok {
|
||||
return fmt.Errorf("subscription closed: %w", io.EOF)
|
||||
}
|
||||
|
||||
// ignore messages from ourself
|
||||
if msg.AppId == s.clientID {
|
||||
continue
|
||||
}
|
||||
|
||||
var rec discosrv.ReplicationRecord
|
||||
if err := proto.Unmarshal(msg.Body, &rec); err != nil {
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication unmarshal: %w", err)
|
||||
}
|
||||
id, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
id, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Replication device ID:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.db.merge(&id, rec.Addresses, rec.Seen); err != nil {
|
||||
return fmt.Errorf("replication database merge: %w", err)
|
||||
}
|
||||
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) String() string {
|
||||
return fmt.Sprintf("amqpReceiver(%q)", s.broker)
|
||||
}
|
||||
|
||||
func amqpChannel(dst string) (*amqp.Connection, *amqp.Channel, error) {
|
||||
conn, err := amqp.Dial(dst)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP dial: %w", err)
|
||||
}
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP channel: %w", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
"discovery", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP declare exchange: %w", err)
|
||||
}
|
||||
|
||||
return conn, ch, nil
|
||||
}
|
||||
|
||||
func amqpConsume(ch *amqp.Channel) (<-chan amqp.Delivery, error) {
|
||||
q, err := ch.QueueDeclare(
|
||||
"", // name
|
||||
false, // durable
|
||||
false, // delete when unused
|
||||
true, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP declare queue: %w", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue name
|
||||
"", // routing key
|
||||
"discovery", // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP bind queue: %w", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
true, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // args
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP consume: %w", err)
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
@@ -22,12 +22,13 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stringutil"
|
||||
)
|
||||
@@ -39,15 +40,20 @@ type announcement struct {
|
||||
}
|
||||
|
||||
type apiSrv struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
compression bool
|
||||
gzipWriters sync.Pool
|
||||
seenTracker *retryAfterTracker
|
||||
notSeenTracker *retryAfterTracker
|
||||
}
|
||||
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
type replicator interface {
|
||||
send(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
@@ -60,18 +66,30 @@ type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv {
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
compression: compression,
|
||||
seenTracker: &retryAfterTracker{
|
||||
name: "seenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: 250,
|
||||
currentDelay: notFoundRetryUnknownMinSeconds,
|
||||
},
|
||||
notSeenTracker: &retryAfterTracker{
|
||||
name: "notSeenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: 250,
|
||||
currentDelay: notFoundRetryUnknownMaxSeconds / 2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
@@ -102,8 +120,15 @@ func (s *apiSrv) Serve(_ context.Context) error {
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
ErrorLog: log.New(io.Discard, "", 0),
|
||||
}
|
||||
if !debug {
|
||||
srv.ErrorLog = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
srv.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
@@ -173,7 +198,7 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
log.Println(reqID, "bad device param:", err)
|
||||
}
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -181,8 +206,7 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
rec, err := s.db.get(&deviceID)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
@@ -192,28 +216,14 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses + 1
|
||||
var afterS int
|
||||
if rec.Seen == 0 {
|
||||
afterS = s.notSeenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_ever").Inc()
|
||||
} else {
|
||||
misses++
|
||||
afterS = s.seenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_recent").Inc()
|
||||
}
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses%notFoundMissesWriteInterval == 0 {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
afterS := notFoundRetryAfterSeconds(int(misses))
|
||||
retryAfterHistogram.Observe(float64(afterS))
|
||||
w.Header().Set("Retry-After", strconv.Itoa(afterS))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
@@ -225,10 +235,16 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
var bw io.Writer = w
|
||||
|
||||
// Use compression if the client asks for it
|
||||
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
if s.compression && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gw, ok := s.gzipWriters.Get().(*gzip.Writer)
|
||||
if ok {
|
||||
gw.Reset(w)
|
||||
} else {
|
||||
gw = gzip.NewWriter(w)
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(bw)
|
||||
defer gw.Close()
|
||||
defer s.gzipWriters.Put(gw)
|
||||
bw = gw
|
||||
}
|
||||
|
||||
@@ -267,6 +283,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
if debug {
|
||||
log.Println(reqID, "no addresses")
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
@@ -274,6 +293,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handle:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
@@ -284,6 +306,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
if debug {
|
||||
log.Println(reqID, "announced", deviceID, addresses)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
@@ -291,29 +316,31 @@ func (s *apiSrv) Stop() {
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
slices.Sort(addresses)
|
||||
addresses = slices.Compact(addresses)
|
||||
|
||||
dbAddrs := make([]*discosrv.DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i] = &discosrv.DatabaseAddress{
|
||||
Address: addresses[i],
|
||||
Expires: expire,
|
||||
}
|
||||
}
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
s.repl.send(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
return s.db.merge(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
@@ -359,7 +386,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
} else if cert := req.Header.Get("X-Forwarded-Tls-Client-Cert"); cert != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
//
|
||||
// The certificate is in PEM format, maybe with URL encoding
|
||||
@@ -367,19 +394,36 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
// statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
|
||||
if strings.Contains(hdr, "%") {
|
||||
if unesc, err := url.QueryUnescape(hdr); err == nil {
|
||||
hdr = unesc
|
||||
if strings.Contains(cert, "%") {
|
||||
if unesc, err := url.QueryUnescape(cert); err == nil {
|
||||
cert = unesc
|
||||
}
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
const (
|
||||
header = "-----BEGIN CERTIFICATE-----"
|
||||
footer = "-----END CERTIFICATE-----"
|
||||
)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(header) + 1 + len(cert) + len(cert)/64 + 1 + len(footer) + 1)
|
||||
|
||||
b.WriteString(header)
|
||||
b.WriteByte('\n')
|
||||
|
||||
for i := 0; i < len(cert); i += 64 {
|
||||
end := i + 64
|
||||
if end > len(cert) {
|
||||
end = len(cert)
|
||||
}
|
||||
b.WriteString(cert[i:end])
|
||||
b.WriteByte('\n')
|
||||
}
|
||||
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr += "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
b.WriteString(footer)
|
||||
b.WriteByte('\n')
|
||||
|
||||
bs = b.Bytes()
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
@@ -444,7 +488,6 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
// remote is nil, unable to determine host IP
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If zero port was specified, use remote port.
|
||||
@@ -482,7 +525,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
func addressStrs(dbAddrs []*discosrv.DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
@@ -494,15 +537,44 @@ func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterSeconds(misses int) int {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return retryAfterS
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
|
||||
type retryAfterTracker struct {
|
||||
name string
|
||||
desiredRate float64 // requests per second
|
||||
|
||||
mut sync.Mutex
|
||||
lastCount int // requests in the last bucket
|
||||
curCount int // requests in the current bucket
|
||||
bucketStarts time.Time // start of the current bucket
|
||||
currentDelay int // current delay in seconds
|
||||
}
|
||||
|
||||
func (t *retryAfterTracker) retryAfterS() int {
|
||||
now := time.Now()
|
||||
t.mut.Lock()
|
||||
if durS := now.Sub(t.bucketStarts).Seconds(); durS > float64(t.currentDelay) {
|
||||
t.bucketStarts = now
|
||||
t.lastCount = t.curCount
|
||||
lastRate := float64(t.lastCount) / durS
|
||||
|
||||
switch {
|
||||
case t.currentDelay > notFoundRetryUnknownMinSeconds &&
|
||||
lastRate < 0.75*t.desiredRate:
|
||||
t.currentDelay = max(8*t.currentDelay/10, notFoundRetryUnknownMinSeconds)
|
||||
case t.currentDelay < notFoundRetryUnknownMaxSeconds &&
|
||||
lastRate > 1.25*t.desiredRate:
|
||||
t.currentDelay = min(3*t.currentDelay/2, notFoundRetryUnknownMaxSeconds)
|
||||
}
|
||||
|
||||
t.curCount = 0
|
||||
}
|
||||
if t.curCount == 0 {
|
||||
retryAfterLevel.WithLabelValues(t.name).Set(float64(t.currentDelay))
|
||||
}
|
||||
t.curCount++
|
||||
t.mut.Unlock()
|
||||
return t.currentDelay + rand.Intn(t.currentDelay/4)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,20 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
@@ -94,3 +105,79 @@ func addr(host string, port int) *net.TCPAddr {
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAPIRequests(b *testing.B) {
|
||||
db := newInMemoryStore(b.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go db.Serve(ctx)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true)
|
||||
srv := httptest.NewServer(http.HandlerFunc(api.handler))
|
||||
|
||||
kf := b.TempDir() + "/cert"
|
||||
crt, err := tlsutil.NewCertificate(kf+".crt", kf+".key", "localhost", 7)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs, err := os.ReadFile(kf + ".crt")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs = regexp.MustCompile(`---[^\n]+---\n`).ReplaceAll(certBs, nil)
|
||||
certString := string(strings.ReplaceAll(string(certBs), "\n", " "))
|
||||
|
||||
devID := protocol.NewDeviceID(crt.Certificate[0])
|
||||
devIDString := devID.String()
|
||||
|
||||
b.Run("Announce", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(`{"addresses":["tcp://10.10.10.10:42000"]}`))
|
||||
req.Header.Set("X-Forwarded-Tls-Client-Cert", certString)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Lookup", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LookupNoCompression", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req.Header.Set("Accept-Encoding", "identity") // disable compression
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,23 +4,31 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -34,380 +42,401 @@ func (defaultClock) Now() time.Time {
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error
|
||||
merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error
|
||||
get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
type inMemoryStore struct {
|
||||
m *xsync.MapOf[protocol.DeviceID, *discosrv.DatabaseRecord]
|
||||
dir string
|
||||
flushInterval time.Duration
|
||||
s3 *s3.Session
|
||||
objKey string
|
||||
clock clock
|
||||
}
|
||||
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, s3sess *s3.Session) *inMemoryStore {
|
||||
hn, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
hn = rand.String(8)
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMemoryLevelDBStore() (*levelDBStore, error) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
s := &inMemoryStore{
|
||||
m: xsync.NewMapOf[protocol.DeviceID, *discosrv.DatabaseRecord](),
|
||||
dir: dir,
|
||||
flushInterval: flushInterval,
|
||||
s3: s3sess,
|
||||
objKey: hn + ".db",
|
||||
clock: defaultClock{},
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
nr, err := s.read()
|
||||
if os.IsNotExist(err) && s3sess != nil {
|
||||
// Try to read from AWS
|
||||
latestKey, cerr := s3sess.LatestKey()
|
||||
if cerr != nil {
|
||||
log.Println("Error reading database from S3:", err)
|
||||
return s
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||
if cerr != nil {
|
||||
log.Println("Error creating database file:", err)
|
||||
return s
|
||||
}
|
||||
if cerr := s3sess.Download(fd, latestKey); cerr != nil {
|
||||
log.Printf("Error reading database from S3: %v", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
nr, err = s.read()
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
log.Println("Error reading database:", err)
|
||||
}
|
||||
|
||||
return err
|
||||
log.Printf("Read %d records from database", nr)
|
||||
s.expireAndCalculateStatistics()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
func (s *inMemoryStore) put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
s.m.Store(*key, rec)
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
func (s *inMemoryStore) merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
|
||||
newRec := &discosrv.DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
if oldRec, ok := s.m.Load(*key); ok {
|
||||
newRec = merge(oldRec, newRec)
|
||||
}
|
||||
s.m.Store(*key, newRec)
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
func (s *inMemoryStore) get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
rec, ok := s.m.Load(*key)
|
||||
if !ok {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
return &discosrv.DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
func (s *inMemoryStore) Serve(ctx context.Context) error {
|
||||
if s.flushInterval <= 0 {
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
t := time.NewTimer(s.flushInterval)
|
||||
defer t.Stop()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
log.Println("Calculating statistics")
|
||||
s.expireAndCalculateStatistics()
|
||||
log.Println("Flushing database")
|
||||
if err := s.write(); err != nil {
|
||||
log.Println("Error writing database:", err)
|
||||
}
|
||||
log.Println("Finished flushing database")
|
||||
t.Reset(s.flushInterval)
|
||||
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
return s.write()
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) expireAndCalculateStatistics() {
|
||||
now := s.clock.Now()
|
||||
cutoff24h := now.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, currentIPv6GUA, last24h, last1w := 0, 0, 0, 0, 0, 0
|
||||
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, rec *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
addresses := expire(rec.Addresses, now)
|
||||
if len(addresses) == 0 {
|
||||
rec.Addresses = nil
|
||||
s.m.Store(key, rec)
|
||||
} else if len(addresses) != len(rec.Addresses) {
|
||||
rec.Addresses = addresses
|
||||
s.m.Store(key, rec)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(rec.Addresses) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6, seenIPv6GUA := false, false, false
|
||||
for _, addr := range rec.Addresses {
|
||||
// We do fast and loose matching on strings here instead of
|
||||
// parsing the address and the IP and doing "proper" checks,
|
||||
// to keep things fast and generate less garbage.
|
||||
if strings.Contains(addr.Address, "[") {
|
||||
seenIPv6 = true
|
||||
if strings.Contains(addr.Address, "[2") {
|
||||
seenIPv6GUA = true
|
||||
}
|
||||
} else {
|
||||
seenIPv4 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 && seenIPv6GUA {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
if seenIPv6GUA {
|
||||
currentIPv6GUA++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
default:
|
||||
// drop the record if it's older than a week
|
||||
s.m.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("currentIPv6GUA").Set(float64(currentIPv6GUA))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseStatisticsSeconds.Set(time.Since(now).Seconds())
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) write() (err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
databaseWriteSeconds.Set(time.Since(t0).Seconds())
|
||||
databaseLastWritten.Set(float64(t0.Unix()))
|
||||
}
|
||||
}()
|
||||
|
||||
dbf := path.Join(s.dir, "records.db")
|
||||
fd, err := os.Create(dbf + ".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bw := bufio.NewWriter(fd)
|
||||
|
||||
var buf []byte
|
||||
var rangeErr error
|
||||
now := s.clock.Now()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, value *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
if value.Seen < cutoff1w {
|
||||
// drop the record if it's older than a week
|
||||
return true
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: value.Addresses,
|
||||
Seen: value.Seen,
|
||||
}
|
||||
s := proto.Size(rec)
|
||||
if s+4 > len(buf) {
|
||||
buf = make([]byte, s+4)
|
||||
}
|
||||
n, err := protoutil.MarshalTo(buf[4:], rec)
|
||||
if err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
if _, err := bw.Write(buf[:n+4]); err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if rangeErr != nil {
|
||||
_ = fd.Close()
|
||||
return rangeErr
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
_ = fd.Close
|
||||
return err
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(dbf+".tmp", dbf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload to S3
|
||||
if s.s3 != nil {
|
||||
fd, err = os.Open(dbf)
|
||||
if err != nil {
|
||||
log.Printf("Error uploading database to S3: %v", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := s.s3.Upload(fd, s.objKey); err != nil {
|
||||
log.Printf("Error uploading database to S3: %v", err)
|
||||
}
|
||||
log.Println("Finished uploading database")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
func (s *inMemoryStore) read() (int, error) {
|
||||
fd, err := os.Open(path.Join(s.dir, "records.db"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
addrs := expire(rec.Addresses, nowNanos)
|
||||
switch {
|
||||
case len(addrs) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6 := false, false
|
||||
for _, addr := range addrs {
|
||||
uri, err := url.Parse(addr.Address)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
host, _, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil && ip.To4() != nil {
|
||||
seenIPv4 = true
|
||||
} else if ip != nil {
|
||||
seenIPv6 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
br := bufio.NewReader(fd)
|
||||
var buf []byte
|
||||
nr := 0
|
||||
for {
|
||||
var n uint32
|
||||
if err := binary.Read(br, binary.BigEndian, &n); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return nr, err
|
||||
}
|
||||
if int(n) > len(buf) {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
if _, err := io.ReadFull(br, buf[:n]); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{}
|
||||
if err := proto.Unmarshal(buf[:n], rec); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
key, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
key, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Bad device ID:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
slices.SortFunc(rec.Addresses, Cmp)
|
||||
rec.Addresses = slices.CompactFunc(rec.Addresses, Equal)
|
||||
s.m.Store(key, &discosrv.DatabaseRecord{
|
||||
Addresses: expire(rec.Addresses, s.clock.Now()),
|
||||
Seen: rec.Seen,
|
||||
})
|
||||
nr++
|
||||
}
|
||||
return nr, nil
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// chosen for any duplicates. The address list in a is overwritten and
|
||||
// reused for the result.
|
||||
func merge(a, b *discosrv.DatabaseRecord) *discosrv.DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
a.Seen = max(a.Seen, b.Seen)
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
for aIdx < len(a.Addresses) && bIdx < len(b.Addresses) {
|
||||
switch cmp.Compare(a.Addresses[aIdx].Address, b.Addresses[bIdx].Address) {
|
||||
case 0:
|
||||
// a == b, choose the newer expiry time
|
||||
a.Addresses[aIdx].Expires = max(a.Addresses[aIdx].Expires, b.Addresses[bIdx].Expires)
|
||||
aIdx++
|
||||
bIdx++
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
case -1:
|
||||
// a < b, keep a and move on
|
||||
aIdx++
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
case 1:
|
||||
// a > b, insert b before a
|
||||
a.Addresses = append(a.Addresses[:aIdx], append([]*discosrv.DatabaseAddress{b.Addresses[bIdx]}, a.Addresses[aIdx:]...)...)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
return res
|
||||
if bIdx < len(b.Addresses) {
|
||||
a.Addresses = append(a.Addresses, b.Addresses[bIdx:]...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
addrs = sliceutil.RemoveAndZero(addrs, i)
|
||||
// destroyed. Internal order is preserved.
|
||||
func expire(addrs []*discosrv.DatabaseAddress, now time.Time) []*discosrv.DatabaseAddress {
|
||||
cutoff := now.UnixNano()
|
||||
naddrs := addrs[:0]
|
||||
for i := range addrs {
|
||||
if i > 0 && addrs[i].Address == addrs[i-1].Address {
|
||||
// Skip duplicates
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if addrs[i].Expires >= cutoff {
|
||||
naddrs = append(naddrs, addrs[i])
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
if len(naddrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return naddrs
|
||||
}
|
||||
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
func Cmp(d, other *discosrv.DatabaseAddress) (n int) {
|
||||
if c := cmp.Compare(d.Address, other.Address); c != 0 {
|
||||
return c
|
||||
}
|
||||
return cmp.Compare(d.Expires, other.Expires)
|
||||
}
|
||||
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
func Equal(d, other *discosrv.DatabaseAddress) bool {
|
||||
return d.Address == other.Address
|
||||
}
|
||||
|
||||
@@ -1,847 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
@@ -11,29 +11,26 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
db, err := newMemoryLevelDBStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db := newInMemoryStore(t.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get("abcd")
|
||||
rec, err := db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
@@ -43,16 +40,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
rec.Addresses = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
if err := db.put(&protocol.EmptyDeviceID, rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,16 +66,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []DatabaseAddress{
|
||||
addrs := []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.EmptyDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -101,7 +98,7 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,40 +111,18 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Put a record with misses
|
||||
|
||||
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
addrs = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.GlobalDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
rec, err = db.get(&protocol.GlobalDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -155,48 +130,126 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
a []*discosrv.DatabaseAddress
|
||||
b []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []*discosrv.DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, 10)
|
||||
res := expire(tc.a, time.Unix(0, 10))
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
cases := []struct {
|
||||
a, b, res []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{nil, nil, nil},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "d", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}, {Address: "d", Expires: 10}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
rec := merge(&discosrv.DatabaseRecord{Addresses: tc.a}, &discosrv.DatabaseRecord{Addresses: tc.b})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
rec = merge(&discosrv.DatabaseRecord{Addresses: tc.b}, &discosrv.DatabaseRecord{Addresses: tc.a})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMergeEqual(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ar := []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}}
|
||||
br := []*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 10}}
|
||||
res := merge(&discosrv.DatabaseRecord{Addresses: ar}, &discosrv.DatabaseRecord{Addresses: br})
|
||||
if len(res.Addresses) != 2 {
|
||||
b.Fatal("wrong length")
|
||||
}
|
||||
if res.Addresses[0].Address != "a" || res.Addresses[1].Address != "b" {
|
||||
b.Fatal("wrong address")
|
||||
}
|
||||
if res.Addresses[0].Expires != 15 || res.Addresses[1].Expires != 15 {
|
||||
b.Fatal("wrong expiry")
|
||||
}
|
||||
}
|
||||
b.ReportAllocs() // should be zero per operation
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
@@ -9,22 +9,24 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,17 +40,12 @@ const (
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
// Retry for not found is notFoundRetrySeenSeconds for records we have
|
||||
// seen an announcement for (but it's not active right now) and
|
||||
// notFoundRetryUnknownSeconds for records we have never seen (or not
|
||||
// seen within the last week).
|
||||
notFoundRetryUnknownMinSeconds = 60
|
||||
notFoundRetryUnknownMaxSeconds = 3600
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
@@ -58,164 +55,116 @@ const (
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
var debug = false
|
||||
|
||||
type CLI struct {
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
|
||||
DBDir string `group:"Database" help:"Database directory" default:"." env:"DISCOVERY_DB_DIR"`
|
||||
DBFlushInterval time.Duration `group:"Database" help:"Interval between database flushes" default:"5m" env:"DISCOVERY_DB_FLUSH_INTERVAL"`
|
||||
|
||||
DBS3Endpoint string `name:"db-s3-endpoint" group:"Database (S3 backup)" hidden:"true" help:"S3 endpoint for database" env:"DISCOVERY_DB_S3_ENDPOINT"`
|
||||
DBS3Region string `name:"db-s3-region" group:"Database (S3 backup)" hidden:"true" help:"S3 region for database" env:"DISCOVERY_DB_S3_REGION"`
|
||||
DBS3Bucket string `name:"db-s3-bucket" group:"Database (S3 backup)" hidden:"true" help:"S3 bucket for database" env:"DISCOVERY_DB_S3_BUCKET"`
|
||||
DBS3AccessKeyID string `name:"db-s3-access-key-id" group:"Database (S3 backup)" hidden:"true" help:"S3 access key ID for database" env:"DISCOVERY_DB_S3_ACCESS_KEY_ID"`
|
||||
DBS3SecretKey string `name:"db-s3-secret-key" group:"Database (S3 backup)" hidden:"true" help:"S3 secret key for database" env:"DISCOVERY_DB_S3_SECRET_KEY"`
|
||||
|
||||
AMQPAddress string `group:"AMQP replication" hidden:"true" help:"Address to AMQP broker" env:"DISCOVERY_AMQP_ADDRESS"`
|
||||
|
||||
Debug bool `short:"d" help:"Print debug output" env:"DISCOVERY_DEBUG"`
|
||||
Version bool `short:"v" help:"Print version and exit"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var replCertFile string
|
||||
var replKeyFile string
|
||||
var useHTTP bool
|
||||
var largeDB bool
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
|
||||
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
|
||||
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
var cli CLI
|
||||
kong.Parse(&cli)
|
||||
debug = cli.Debug
|
||||
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if *showVersion {
|
||||
if cli.Version {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo.WithLabelValues(build.Version, runtime.Version(), build.User, build.Date.UTC().Format("2006-01-02T15:04:05Z")).Set(1)
|
||||
|
||||
if largeDB {
|
||||
levelDBOptions.BlockCacheCapacity = 64 << 20
|
||||
levelDBOptions.BlockSize = 64 << 10
|
||||
levelDBOptions.CompactionTableSize = 16 << 20
|
||||
levelDBOptions.CompactionTableSizeMultiplier = 2.0
|
||||
levelDBOptions.WriteBuffer = 64 << 20
|
||||
levelDBOptions.CompactionL0Trigger = 8
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
replCert := cert
|
||||
if replCertFile != "" && replKeyFile != "" {
|
||||
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load replication keypair:", err)
|
||||
}
|
||||
}
|
||||
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
|
||||
log.Println("Replication device ID is", replDevID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
var cert tls.Certificate
|
||||
if !cli.HTTP {
|
||||
var err error
|
||||
cert, err = tls.LoadX509KeyPair(cli.Cert, cli.Key)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(cli.Cert, cli.Key, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
Timeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
// Start the database.
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("Open database:", err)
|
||||
// If configured, use S3 for database backups.
|
||||
var s3c *s3.Session
|
||||
if cli.DBS3Endpoint != "" {
|
||||
var err error
|
||||
s3c, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create S3 session: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the database.
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, s3c)
|
||||
main.Add(db)
|
||||
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, replCert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
// If we have an AMQP broker for replication, start that
|
||||
var repl replicator
|
||||
if cli.AMQPAddress != "" {
|
||||
clientID := rand.String(10)
|
||||
kr := newAMQPReplicator(cli.AMQPAddress, clientID, db)
|
||||
main.Add(kr)
|
||||
repl = kr
|
||||
}
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP)
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if metricsListen != "" {
|
||||
if cli.MetricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
log.Fatal(http.ListenAndServe(cli.MetricsListen, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Cancel on signal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
go func() {
|
||||
sig := <-signalChan
|
||||
log.Printf("Received signal %s; shutting down", sig)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Engage!
|
||||
main.Serve(context.Background())
|
||||
main.Serve(ctx)
|
||||
}
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
replicationReadTimeout = time.Minute
|
||||
replicationWriteTimeout = 30 * time.Second
|
||||
replicationHeartbeatInterval = time.Second * 30
|
||||
)
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// The replication stream is not especially latency sensitive, but it is
|
||||
// quite a lot of data in small writes. Make it more efficient.
|
||||
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
|
||||
_ = tcpc.SetNoDelay(false)
|
||||
}
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -7,10 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -99,13 +96,28 @@ var (
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
|
||||
retryAfterHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
Buckets: prometheus.ExponentialBuckets(60, 2, 7), // 60, 120, 240, 480, 960, 1920, 3840
|
||||
})
|
||||
databaseWriteSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_write_seconds",
|
||||
Help: "Time spent writing the database.",
|
||||
})
|
||||
databaseLastWritten = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_last_written",
|
||||
Help: "Timestamp of the last successful database write.",
|
||||
})
|
||||
|
||||
retryAfterLevel = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -126,16 +138,6 @@ func init() {
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds,
|
||||
retryAfterHistogram)
|
||||
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
databaseWriteSeconds, databaseLastWritten,
|
||||
retryAfterLevel)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -30,7 +32,6 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -68,7 +68,6 @@ func findSession(key string) *session {
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
return http.ListenAndServe(params.Listen, nil)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
rels := upgrade.FetchLatestReleases(p.url, "")
|
||||
if rels == nil {
|
||||
http.Error(w, "no releases", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = json.NewEncoder(buf).Encode(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -33,19 +37,19 @@ func indexDump() error {
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
fmt.Printf(" V:%v\n", &f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
var flv dbproto.VersionList
|
||||
proto.Unmarshal(it.Value(), &flv)
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, &flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
@@ -94,11 +98,11 @@ func indexDump() error {
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d", folder)
|
||||
var cs db.CountsSet
|
||||
if err := cs.Unmarshal(it.Value()); err != nil {
|
||||
var cs dbproto.CountsSet
|
||||
if err := proto.Unmarshal(it.Value(), &cs); err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", cs)
|
||||
fmt.Printf(" V:%v\n", &cs)
|
||||
}
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
@@ -125,20 +129,20 @@ func indexDump() error {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x", key[1:])
|
||||
var v protocol.Vector
|
||||
err := v.Unmarshal(it.Value())
|
||||
var v bep.Vector
|
||||
err := proto.Unmarshal(it.Value(), &v)
|
||||
if err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", v)
|
||||
fmt.Printf(" V:%v\n", &v)
|
||||
}
|
||||
|
||||
case db.KeyTypePendingFolder:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := string(key[5:])
|
||||
var of db.ObservedFolder
|
||||
of.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, of)
|
||||
var of dbproto.ObservedFolder
|
||||
proto.Unmarshal(it.Value(), &of)
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, &of)
|
||||
|
||||
case db.KeyTypePendingDevice:
|
||||
device := "<invalid>"
|
||||
@@ -146,9 +150,9 @@ func indexDump() error {
|
||||
if err == nil {
|
||||
device = dev.String()
|
||||
}
|
||||
var od db.ObservedDevice
|
||||
od.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, od)
|
||||
var od dbproto.ObservedDevice
|
||||
proto.Unmarshal(it.Value(), &od)
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, &od)
|
||||
|
||||
default:
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
|
||||
@@ -13,6 +13,10 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -42,12 +46,12 @@ func indexCheck() (err error) {
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
fileInfos := make(map[fileInfoKey]protocol.FileInfo)
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
fileInfos := make(map[fileInfoKey]*bep.FileInfo)
|
||||
globals := make(map[globalKey]*dbproto.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]protocol.Vector)
|
||||
versions := make(map[string]*bep.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
@@ -74,26 +78,26 @@ func indexCheck() (err error) {
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
if err != nil {
|
||||
fmt.Println("Unable to unmarshal FileInfo:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfos[fileInfoKey{folder, device, name}] = f
|
||||
fileInfos[fileInfoKey{folder, device, name}] = &f
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
if err := flv.Unmarshal(it.Value()); err != nil {
|
||||
var flv dbproto.VersionList
|
||||
if err := proto.Unmarshal(it.Value(), &flv); err != nil {
|
||||
fmt.Println("Unable to unmarshal VersionList:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
globals[globalKey{folder, name}] = flv
|
||||
globals[globalKey{folder, name}] = &flv
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
@@ -124,13 +128,13 @@ func indexCheck() (err error) {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
var v bep.Vector
|
||||
if err := proto.Unmarshal(it.Value(), &v); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = v
|
||||
versions[hash] = &v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,25 +252,27 @@ func indexCheck() (err error) {
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !fiv.Equal(version) {
|
||||
if !protocol.VectorFromWire(fiv).Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
ffi := protocol.FileInfoFromDB(fi)
|
||||
if ffi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, ffi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
if ffi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, ffi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.RawVersions {
|
||||
for i, fv := range vl.Versions {
|
||||
ver := protocol.VectorFromWire(fv.Version)
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
checkGlobal(i, device, ver, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
checkGlobal(i, device, ver, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,10 +282,10 @@ func indexCheck() (err error) {
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
fv, _ := vlGetGlobal(vl)
|
||||
devB, _ := fvFirstDevice(fv)
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
fi := protocol.FileInfoFromDB(fileInfos[fileInfoKey{gk.folder, dev, gk.name}])
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
}
|
||||
@@ -345,11 +351,84 @@ func indexCheck() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
gfv, gok := vl.GetGlobal()
|
||||
func needsLocally(vl *dbproto.VersionList) bool {
|
||||
gfv, gok := vlGetGlobal(vl)
|
||||
if !gok { // That's weird, but we hardly need something non-existent
|
||||
return false
|
||||
}
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, fv.Version)
|
||||
fv, ok := vlGet(vl, protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, protocol.VectorFromWire(fv.Version))
|
||||
}
|
||||
|
||||
// Get returns a FileVersion that contains the given device and whether it has
|
||||
// been found at all.
|
||||
func vlGet(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool) {
|
||||
_, i, _, ok := vlFindDevice(vl, device)
|
||||
if !ok {
|
||||
return &dbproto.FileVersion{}, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// GetGlobal returns the current global FileVersion. The returned FileVersion
|
||||
// may be invalid, if all FileVersions are invalid. Returns false only if
|
||||
// VersionList is empty.
|
||||
func vlGetGlobal(vl *dbproto.VersionList) (*dbproto.FileVersion, bool) {
|
||||
i := vlFindGlobal(vl)
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// findGlobal returns the first version that isn't invalid, or if all versions are
|
||||
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
|
||||
func vlFindGlobal(vl *dbproto.VersionList) int {
|
||||
for i := range vl.Versions {
|
||||
if !fvIsInvalid(vl.Versions[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// findDevice returns whether the device is in InvalidVersions or Versions and
|
||||
// in InvalidDevices or Devices (true for invalid), the positions in the version
|
||||
// and device slices and whether it has been found at all.
|
||||
func vlFindDevice(vl *dbproto.VersionList, device []byte) (bool, int, int, bool) {
|
||||
for i, v := range vl.Versions {
|
||||
if j := deviceIndex(v.Devices, device); j != -1 {
|
||||
return false, i, j, true
|
||||
}
|
||||
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
|
||||
return true, i, j, true
|
||||
}
|
||||
}
|
||||
return false, -1, -1, false
|
||||
}
|
||||
|
||||
func deviceIndex(devices [][]byte, device []byte) int {
|
||||
for i, dev := range devices {
|
||||
if bytes.Equal(device, dev) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func fvFirstDevice(fv *dbproto.FileVersion) ([]byte, bool) {
|
||||
if len(fv.Devices) != 0 {
|
||||
return fv.Devices[0], true
|
||||
}
|
||||
if len(fv.InvalidDevices) != 0 {
|
||||
return fv.InvalidDevices[0], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func fvIsInvalid(fv *dbproto.FileVersion) bool {
|
||||
return fv == nil || len(fv.Devices) == 0
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -67,7 +67,7 @@ func (*stdinCommand) Run() error {
|
||||
fmt.Println("Reading commands from stdin...", args)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
input, err := shellquote.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing input: %w", err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -16,8 +17,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -10,6 +10,4 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
)
|
||||
var l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
@@ -280,10 +283,11 @@ func loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
var encFi bep.FileInfo
|
||||
if err := proto.Unmarshal(trailer, &encFi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi := protocol.FileInfoFromWire(&encFi)
|
||||
|
||||
return &encFi, nil
|
||||
return &fi, nil
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"github.com/willabides/kongplete"
|
||||
|
||||
@@ -38,6 +37,7 @@ import (
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/generate"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
@@ -92,11 +92,6 @@ above.
|
||||
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
STVERSIONEXTRA Add extra information to the version string in logs and the
|
||||
version line in the GUI. Can be set to the name of a wrapper
|
||||
or tool controlling syncthing to communicate this to the end
|
||||
|
||||
@@ -241,22 +241,6 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.Contains(line, "SIGILL") {
|
||||
l.Warnln(`
|
||||
*******************************************************************************
|
||||
* Crash due to illegal instruction detected. This is most likely due to a CPU *
|
||||
* incompatibility with the high performance hashing package. Switching to the *
|
||||
* standard hashing package instead. Please report this issue at: *
|
||||
* *
|
||||
* https://github.com/syncthing/syncthing/issues *
|
||||
* *
|
||||
* Include the details of your CPU. *
|
||||
*******************************************************************************
|
||||
`)
|
||||
os.Setenv("STHASHING", "standard")
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
|
||||
@@ -140,7 +140,7 @@ func checkNotExist(t *testing.T, name string) {
|
||||
func TestAutoClosedFile(t *testing.T) {
|
||||
os.RemoveAll("_autoclose")
|
||||
defer os.RemoveAll("_autoclose")
|
||||
os.Mkdir("_autoclose", 0755)
|
||||
os.Mkdir("_autoclose", 0o755)
|
||||
file := filepath.FromSlash("_autoclose/tmp")
|
||||
data := []byte("hello, world\n")
|
||||
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package aggregate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
DBConn string `env:"UR_DB_URL" default:"postgres://user:password@localhost/ur?sslmode=disable"`
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
log.SetFlags(log.Ltime | log.Ldate)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
db, err := sql.Open("postgres", cli.DBConn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
err = setupDB(db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
runAggregation(db)
|
||||
// Sleep until one minute past next midnight
|
||||
sleepUntilNext(24*time.Hour, 1*time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func runAggregation(db *sql.DB) {
|
||||
since := maxIndexedDay(db, "VersionSummary")
|
||||
log.Println("Aggregating VersionSummary data since", since)
|
||||
rows, err := aggregateVersionSummary(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "Performance")
|
||||
log.Println("Aggregating Performance data since", since)
|
||||
rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "BlockStats")
|
||||
log.Println("Aggregating BlockStats data since", since)
|
||||
rows, err = aggregateBlockStats(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
}
|
||||
|
||||
func sleepUntilNext(intv, margin time.Duration) {
|
||||
now := time.Now().UTC()
|
||||
next := now.Truncate(intv).Add(intv).Add(margin)
|
||||
log.Println("Sleeping until", next)
|
||||
time.Sleep(next.Sub(now))
|
||||
}
|
||||
|
||||
func setupDB(db *sql.DB) error {
|
||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS VersionSummary (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Version VARCHAR(8) NOT NULL,
|
||||
Count INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
TotFiles INTEGER NOT NULL,
|
||||
TotMiB INTEGER NOT NULL,
|
||||
SHA256Perf DOUBLE PRECISION NOT NULL,
|
||||
MemorySize INTEGER NOT NULL,
|
||||
MemoryUsageMiB INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Reports INTEGER NOT NULL,
|
||||
Total BIGINT NOT NULL,
|
||||
Renamed BIGINT NOT NULL,
|
||||
Reused BIGINT NOT NULL,
|
||||
Pulled BIGINT NOT NULL,
|
||||
CopyOrigin BIGINT NOT NULL,
|
||||
CopyOriginShifted BIGINT NOT NULL,
|
||||
CopyElsewhere BIGINT NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var t string
|
||||
|
||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
||||
var t time.Time
|
||||
row := db.QueryRow("SELECT MAX(DATE_TRUNC('day', Day)) FROM " + table)
|
||||
err := row.Scan(&t)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO VersionSummary (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
SUBSTRING(Report->>'version' FROM '^v\d.\d+') AS Ver,
|
||||
COUNT(*) AS Count
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day, Ver
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO Performance (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
AVG((Report->>'totFiles')::numeric) As TotFiles,
|
||||
AVG((Report->>'totMiB')::numeric) As TotMiB,
|
||||
AVG((Report->>'sha256Perf')::numeric) As SHA256Perf,
|
||||
AVG((Report->>'memorySize')::numeric) As MemorySize,
|
||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
/* Some custom implementation reported bytes when we expect megabytes, cap at petabyte */
|
||||
AND (Report->>'memorySize')::numeric < 1073741824
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
||||
// Filter out anything prior 0.14.41 as that has sum aggregations which
|
||||
// made no sense.
|
||||
res, err := db.Exec(`INSERT INTO BlockStats (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
COUNT(1) As Reports,
|
||||
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND (Report->>'urVersion')::numeric >= 3
|
||||
AND Report->>'version' like 'v_.%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.39%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.38%'
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type analytic struct {
|
||||
Key string
|
||||
Count int
|
||||
Percentage float64
|
||||
Items []analytic `json:",omitempty"`
|
||||
}
|
||||
|
||||
type analyticList []analytic
|
||||
|
||||
func (l analyticList) Less(a, b int) bool {
|
||||
if l[a].Key == "Others" {
|
||||
return false
|
||||
}
|
||||
if l[b].Key == "Others" {
|
||||
return true
|
||||
}
|
||||
return l[b].Count < l[a].Count // inverse
|
||||
}
|
||||
|
||||
func (l analyticList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l analyticList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
// Returns a list of frequency analytics for a given list of strings.
|
||||
func analyticsFor(ss []string, cutoff int) []analytic {
|
||||
m := make(map[string]int)
|
||||
t := 0
|
||||
for _, s := range ss {
|
||||
m[s]++
|
||||
t++
|
||||
}
|
||||
|
||||
l := make([]analytic, 0, len(m))
|
||||
for k, c := range m {
|
||||
l = append(l, analytic{
|
||||
Key: k,
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(l))
|
||||
|
||||
if cutoff > 0 && len(l) > cutoff {
|
||||
c := 0
|
||||
for _, i := range l[cutoff:] {
|
||||
c += i.Count
|
||||
}
|
||||
l = append(l[:cutoff], analytic{
|
||||
Key: "Others",
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Find the points at which certain penetration levels are met
|
||||
func penetrationLevels(as []analytic, points []float64) []analytic {
|
||||
sort.Slice(as, func(a, b int) bool {
|
||||
return versionLess(as[b].Key, as[a].Key)
|
||||
})
|
||||
|
||||
var res []analytic
|
||||
|
||||
idx := 0
|
||||
sum := 0.0
|
||||
for _, a := range as {
|
||||
sum += a.Percentage
|
||||
if sum >= points[idx] {
|
||||
a.Count = int(points[idx])
|
||||
a.Percentage = sum
|
||||
res = append(res, a)
|
||||
idx++
|
||||
if idx == len(points) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInts(data []int) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Ints(data)
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInt64s(data []int64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(data, func(a, b int) bool {
|
||||
return data[a] < data[b]
|
||||
})
|
||||
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForFloats(data []float64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Float64s(data)
|
||||
res[0] = data[int(float64(len(data))*0.05)]
|
||||
res[1] = data[len(data)/2]
|
||||
res[2] = data[int(float64(len(data))*0.95)]
|
||||
res[3] = data[len(data)-1]
|
||||
return res
|
||||
}
|
||||
|
||||
func group(by func(string) string, as []analytic, perGroup int, otherPct float64) []analytic {
|
||||
var res []analytic
|
||||
|
||||
next:
|
||||
for _, a := range as {
|
||||
group := by(a.Key)
|
||||
for i := range res {
|
||||
if res[i].Key == group {
|
||||
res[i].Count += a.Count
|
||||
res[i].Percentage += a.Percentage
|
||||
if len(res[i].Items) < perGroup {
|
||||
res[i].Items = append(res[i].Items, a)
|
||||
}
|
||||
continue next
|
||||
}
|
||||
}
|
||||
res = append(res, analytic{
|
||||
Key: group,
|
||||
Count: a.Count,
|
||||
Percentage: a.Percentage,
|
||||
Items: []analytic{a},
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(res))
|
||||
|
||||
if otherPct > 0 {
|
||||
// Groups with less than otherPCt go into "Other"
|
||||
other := analytic{
|
||||
Key: "Other",
|
||||
}
|
||||
for i := 0; i < len(res); i++ {
|
||||
if res[i].Percentage < otherPct || res[i].Key == "Other" {
|
||||
other.Count += res[i].Count
|
||||
other.Percentage += res[i].Percentage
|
||||
res = append(res[:i], res[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if other.Count > 0 {
|
||||
res = append(res, other)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func byVersion(s string) string {
|
||||
parts := strings.Split(s, ".")
|
||||
if len(parts) >= 2 {
|
||||
return strings.Join(parts[:2], ".")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func byPlatform(s string) string {
|
||||
parts := strings.Split(s, "-")
|
||||
if len(parts) >= 2 {
|
||||
return parts[0]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var numericGoVersion = regexp.MustCompile(`^go[0-9]\.[0-9]+`)
|
||||
|
||||
func byCompiler(s string) string {
|
||||
if m := numericGoVersion.FindString(s); m != "" {
|
||||
return m
|
||||
}
|
||||
return "Other"
|
||||
}
|
||||
|
||||
func versionLess(a, b string) bool {
|
||||
arel, apre := versionParts(a)
|
||||
brel, bpre := versionParts(b)
|
||||
|
||||
minlen := len(arel)
|
||||
if l := len(brel); l < minlen {
|
||||
minlen = l
|
||||
}
|
||||
|
||||
for i := 0; i < minlen; i++ {
|
||||
if arel[i] != brel[i] {
|
||||
return arel[i] < brel[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Longer version is newer, when the preceding parts are equal
|
||||
if len(arel) != len(brel) {
|
||||
return len(arel) < len(brel)
|
||||
}
|
||||
|
||||
if apre != bpre {
|
||||
// "(+dev)" versions are ahead
|
||||
if apre == plusStr {
|
||||
return false
|
||||
}
|
||||
if bpre == plusStr {
|
||||
return true
|
||||
}
|
||||
return apre < bpre
|
||||
}
|
||||
|
||||
// don't actually care how the prerelease stuff compares for our purposes
|
||||
return false
|
||||
}
|
||||
|
||||
// Split a version as returned from transformVersion into parts.
|
||||
// "1.2.3-beta.2" -> []int{1, 2, 3}, "beta.2"}
|
||||
func versionParts(v string) ([]int, string) {
|
||||
parts := strings.SplitN(v[1:], " ", 2) // " (+dev)" versions
|
||||
if len(parts) == 1 {
|
||||
parts = strings.SplitN(parts[0], "-", 2) // "-rc.1" type versions
|
||||
}
|
||||
fields := strings.Split(parts[0], ".")
|
||||
|
||||
release := make([]int, len(fields))
|
||||
for i, s := range fields {
|
||||
v, _ := strconv.Atoi(s)
|
||||
release[i] = v
|
||||
}
|
||||
|
||||
var prerelease string
|
||||
if len(parts) > 1 {
|
||||
prerelease = parts[1]
|
||||
}
|
||||
|
||||
return release, prerelease
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NumberType int
|
||||
|
||||
const (
|
||||
NumberMetric NumberType = iota
|
||||
NumberBinary
|
||||
NumberDuration
|
||||
)
|
||||
|
||||
func number(ntype NumberType, v float64) string {
|
||||
switch ntype {
|
||||
case NumberMetric:
|
||||
return metric(v)
|
||||
case NumberDuration:
|
||||
return duration(v)
|
||||
case NumberBinary:
|
||||
return binary(v)
|
||||
default:
|
||||
return metric(v)
|
||||
}
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
Suffix string
|
||||
Multiplier float64
|
||||
}
|
||||
|
||||
var metricSuffixes = []suffix{
|
||||
{"G", 1e9},
|
||||
{"M", 1e6},
|
||||
{"k", 1e3},
|
||||
}
|
||||
|
||||
var binarySuffixes = []suffix{
|
||||
{"Gi", 1 << 30},
|
||||
{"Mi", 1 << 20},
|
||||
{"Ki", 1 << 10},
|
||||
}
|
||||
|
||||
var durationSuffix = []suffix{
|
||||
{"year", 365 * 24 * 60 * 60},
|
||||
{"month", 30 * 24 * 60 * 60},
|
||||
{"day", 24 * 60 * 60},
|
||||
{"hour", 60 * 60},
|
||||
{"minute", 60},
|
||||
{"second", 1},
|
||||
}
|
||||
|
||||
func metric(v float64) string {
|
||||
return withSuffix(v, metricSuffixes, false)
|
||||
}
|
||||
|
||||
func binary(v float64) string {
|
||||
return withSuffix(v, binarySuffixes, false)
|
||||
}
|
||||
|
||||
func duration(v float64) string {
|
||||
return withSuffix(v, durationSuffix, true)
|
||||
}
|
||||
|
||||
func withSuffix(v float64, ps []suffix, pluralize bool) string {
|
||||
for _, p := range ps {
|
||||
if v >= p.Multiplier {
|
||||
suffix := p.Suffix
|
||||
if pluralize && v/p.Multiplier != 1.0 {
|
||||
suffix += "s"
|
||||
}
|
||||
// If the number only has decimal zeroes, strip em off.
|
||||
num := strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v/p.Multiplier), "0"), ".")
|
||||
return fmt.Sprintf("%s %s", num, suffix)
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v), "0"), ".")
|
||||
}
|
||||
|
||||
// commatize returns a number with sep as thousands separators. Handles
|
||||
// integers and plain floats.
|
||||
func commatize(sep, s string) string {
|
||||
// If no dot, don't do anything.
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
fs := strings.SplitN(s, ".", 2)
|
||||
|
||||
l := len(fs[0])
|
||||
for i := range fs[0] {
|
||||
b.Write([]byte{s[i]})
|
||||
if i < l-1 && (l-i)%3 == 1 {
|
||||
b.WriteString(sep)
|
||||
}
|
||||
}
|
||||
|
||||
if len(fs) > 1 && len(fs[1]) > 0 {
|
||||
b.WriteString(".")
|
||||
b.WriteString(fs[1])
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func proportion(m map[string]int, count int) float64 {
|
||||
total := 0
|
||||
isMax := true
|
||||
for _, n := range m {
|
||||
total += n
|
||||
if n > count {
|
||||
isMax = false
|
||||
}
|
||||
}
|
||||
pct := (100 * float64(count)) / float64(total)
|
||||
// To avoid rounding errors in the template, surpassing 100% and breaking
|
||||
// the progress bars.
|
||||
if isMax && len(m) > 1 && count != total {
|
||||
pct -= 0.01
|
||||
}
|
||||
return pct
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv",
|
||||
Name: "reports_total",
|
||||
}, []string{"version"})
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("duplicate")
|
||||
metricReportsTotal.WithLabelValues("v1")
|
||||
metricReportsTotal.WithLabelValues("v2")
|
||||
metricReportsTotal.WithLabelValues("v3")
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 4.8 KiB |
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 61 KiB |
Binary file not shown.
Binary file not shown.
@@ -1,623 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<!--
|
||||
Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
Use of this source code is governed by an MIT-style license that can be
|
||||
found in the LICENSE file.
|
||||
-->
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
<link rel="shortcut icon" href="static/assets/img/favicon.png">
|
||||
|
||||
<title>Syncthing Usage Reports</title>
|
||||
<link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
|
||||
<script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/heatmapjs@2.0.2/heatmap.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/leaflet-heatmap@1.0.0/leaflet-heatmap.js"></script>
|
||||
|
||||
<style type="text/css">
|
||||
body {
|
||||
margin: 40px;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
}
|
||||
tr.main td {
|
||||
font-weight: bold;
|
||||
}
|
||||
tr.child td.first {
|
||||
padding-left: 2em;
|
||||
}
|
||||
.progress-bar {
|
||||
overflow:hidden;
|
||||
white-space:nowrap;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript"
|
||||
src='https://www.google.com/jsapi?autoload={
|
||||
"modules":[{
|
||||
"name":"visualization",
|
||||
"version":"1",
|
||||
"packages":["corechart"]
|
||||
}]
|
||||
}'></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
google.setOnLoadCallback(drawVersionChart);
|
||||
google.setOnLoadCallback(drawBlockStatsChart);
|
||||
google.setOnLoadCallback(drawPerformanceCharts);
|
||||
|
||||
function drawVersionChart() {
|
||||
// Summary version chart for versions that at some point in the chart
|
||||
// reaches 250 devices. This filters out versions that are old and
|
||||
// uninteresting yet linger forever with like four users.
|
||||
var jsonData = $.ajax({url: "summary.json?min=250", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
isStacked: true,
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('versionChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function formatGibibytes(gibibytes, decimals) {
|
||||
if(gibibytes == 0) return '0 GiB';
|
||||
var k = 1024,
|
||||
dm = decimals || 2,
|
||||
sizes = ['GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
|
||||
i = Math.floor(Math.log(gibibytes) / Math.log(k));
|
||||
if (i < 0) {
|
||||
sizes = 'MiB';
|
||||
} else {
|
||||
sizes = sizes[i];
|
||||
}
|
||||
return parseFloat((gibibytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes;
|
||||
}
|
||||
|
||||
|
||||
function drawBlockStatsChart() {
|
||||
var jsonData = $.ajax({url: "blockstats.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
|
||||
var totals = [0, 0, 0, 0, 0, 0];
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
for (var j = 2; j < rows[i].length; j++) {
|
||||
totals[j-2] += rows[i][j];
|
||||
}
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var totalTotals = totals.reduce(function(a, b) { return a + b; }, 0);
|
||||
|
||||
if (totalTotals > 0) {
|
||||
var content = "<table class='table'>\n"
|
||||
for (var j = 2; j < rows[0].length; j++) {
|
||||
content += "<tr><td><b>" + rows[0][j].replace(' (GiB)', '') + "</b></td><td>" + formatGibibytes(totals[j-2].toFixed(2)) + " (" + ((100*totals[j-2])/totalTotals).toFixed(2) +"%)</td></tr>\n";
|
||||
}
|
||||
content += "</table>";
|
||||
document.getElementById("data-to-date").innerHTML = content;
|
||||
} else {
|
||||
// No data, hide it.
|
||||
document.getElementById("block-stats").outerHTML = "";
|
||||
return;
|
||||
}
|
||||
|
||||
var options = {
|
||||
focusTarget: 'category',
|
||||
vAxes: {0: {}, 1: {}},
|
||||
series: {0: {type: 'line', targetAxisIndex:1}},
|
||||
isStacked: true,
|
||||
legend: {position: 'none'},
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('blockStatsChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function drawPerformanceCharts() {
|
||||
var jsonData = $.ajax({url: "/performance.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
}
|
||||
|
||||
drawChart(rows, 1, 'Total Number of Files', 'totFilesChart', 1e6, 1);
|
||||
drawChart(rows, 2, 'Total Folder Size (GiB)', 'totMiBChart', 1e6, 1024);
|
||||
drawChart(rows, 3, 'Hash Performance (MiB/s)', 'hashPerfChart', 1000, 1);
|
||||
drawChart(rows, 4, 'System RAM Size (GiB)', 'memSizeChart', 1e6, 1024);
|
||||
drawChart(rows, 5, 'Memory Usage (MiB)', 'memUsageChart', 250, 1);
|
||||
}
|
||||
|
||||
function drawChart(rows, index, title, id, cutoff, divisor) {
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
data.addColumn('number', title);
|
||||
|
||||
var row;
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
row = [rows[i][0], rows[i][index] / divisor];
|
||||
if (row[1] > cutoff) {
|
||||
row[1] = null;
|
||||
}
|
||||
data.addRow(row);
|
||||
}
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
vAxes: {0: {minValue: 0}},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.LineChart(document.getElementById(id));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
var locations = [];
|
||||
{{range $location, $weight := .locations}}
|
||||
locations.push({lat:{{- $location.Latitude -}},lng:{{- $location.Longitude -}},count:Math.min(100, {{- $weight -}})});
|
||||
{{- end}}
|
||||
|
||||
function drawHeatMap() {
|
||||
if (locations.length == 0) {
|
||||
return;
|
||||
}
|
||||
var testData = {
|
||||
data: locations
|
||||
};
|
||||
|
||||
var baseLayer = L.tileLayer(
|
||||
'https://tile.openstreetmap.org/{z}/{x}/{y}.png',{
|
||||
attribution: '...',
|
||||
maxZoom: 18
|
||||
}
|
||||
);
|
||||
var cfg = {
|
||||
"radius": 1,
|
||||
"minOpacity": .25,
|
||||
"maxOpacity": .8,
|
||||
"scaleRadius": true,
|
||||
"useLocalExtrema": true,
|
||||
latField: 'lat',
|
||||
lngField: 'lng',
|
||||
valueField: 'count',
|
||||
gradient: {
|
||||
'.1': 'cyan',
|
||||
'.8': 'blue',
|
||||
'.95': 'red'
|
||||
}
|
||||
};
|
||||
var heatmapLayer = new HeatmapOverlay(cfg);
|
||||
|
||||
var map = new L.Map('map', {
|
||||
center: new L.LatLng(25, 0),
|
||||
zoom: 1,
|
||||
layers: [baseLayer, heatmapLayer]
|
||||
});
|
||||
heatmapLayer.setData(testData);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1>Syncthing Usage Data</h1>
|
||||
|
||||
<h4 id="active-users">Active Users per Day and Version</h4>
|
||||
<p>
|
||||
This is the total number of unique users with reporting enabled, per day. Area color represents the major version.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<div id="block-stats">
|
||||
<h4>Data Transfers per Day</h4>
|
||||
<p>
|
||||
This is total data transferred per day. Also shows how much data was saved (not transferred) by each of the methods syncthing uses.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="blockStatsChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<h4 id="totals-to-date">Totals to date</h4>
|
||||
<p id="data-to-date">
|
||||
No data
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<h4 id="metrics">Usage Metrics</h4>
|
||||
<p>
|
||||
This is the aggregated usage report data for the last 24 hours. Data based on <b>{{.nodes}}</b> devices that have reported in.
|
||||
</p>
|
||||
|
||||
{{if .locations}}
|
||||
<div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<p class="text-muted">
|
||||
Heatmap max intensity is capped at 100 reports within a location.
|
||||
</p>
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading">
|
||||
<h4 class="panel-title">
|
||||
<a data-toggle="collapse" href="#collapseTwo">Break down per country</a>
|
||||
</h4>
|
||||
</div>
|
||||
<div id="collapseTwo" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 1}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 2}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th colspan="4" class="text-center">
|
||||
<a href="https://en.wikipedia.org/wiki/Percentile">Percentile</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th class="text-right">5%</th>
|
||||
<th class="text-right">50%</th>
|
||||
<th class="text-right">95%</th>
|
||||
<th class="text-right">100%</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .categories}}
|
||||
<tr>
|
||||
<td>{{.Descr}}</td>
|
||||
<td class="text-right">{{index .Values 0 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 1 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 2 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 3 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Version</th><th class="text-right">Devices</th><th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versions}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Penetration Level</th>
|
||||
<th>Version</th>
|
||||
<th class="text-right">Actual</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versionPenetrations}}
|
||||
<tr>
|
||||
<td>{{.Count}}%</td>
|
||||
<td>≥ {{.Key}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Platform</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .platforms}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Compiler</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .compilers}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if or (gt .Percentage 0.1) (eq .Key "Others")}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Distribution Channel</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .distributions}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Builder</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .builders}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage. Some features are reported per report, some are per sum of units within report (eg. devices with static addresses among all known devices per report).
|
||||
Currently there are <b>{{.versionNodes.v2}}</b> devices reporting for version 2 and <b>{{.versionNodes.v3}}</b> for version 3.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.features $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
<tr>
|
||||
<td style="width: 50%">{{.Key}} ({{.Version}})</td>
|
||||
<td style="width: 10%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 40%" {{if lt .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px" {{if ge .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Group Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage groups, which might include multiple occourances of a feature use per report.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.featureGroups $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Group Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
{{$counts := .Counts}}
|
||||
<tr>
|
||||
<td style="width: 50%">
|
||||
<div data-toggle="tooltip" title='{{range $key, $value := .Counts}}{{$key}} ({{$value | proportion $counts | printf "%.02f"}}% - {{$value}})</br>{{end}}'>
|
||||
{{.Key}} ({{.Version}})
|
||||
</div>
|
||||
</td>
|
||||
<td style="width: 50%">
|
||||
<div class="progress" role="progressbar" style="width: 100%">
|
||||
{{$j := counter}}
|
||||
{{range $key, $value := .Counts}}
|
||||
{{with $valuePct := $value | proportion $counts}}
|
||||
<div class="progress-bar {{ $j.Current | progressBarClassByIndex }}" style='width: {{$valuePct | printf "%.02f"}}%' data-toggle="tooltip" title='{{$key}} ({{$valuePct | printf "%.02f"}}% - {{$value}})'>
|
||||
{{if ge $valuePct 30.0}}{{$key}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
{{ $j.Increment }}
|
||||
{{end}}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1 id="performance-charts">Historical Performance Data</h1>
|
||||
<p>These charts are all the average of the corresponding metric, for the entire population of a given day.</p>
|
||||
|
||||
<h4 id="hash-performance">Hash Performance (MiB/s)</h4>
|
||||
<div class="img-thumbnail" id="hashPerfChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="memory-usage">Memory Usage (MiB)</h4>
|
||||
<div class="img-thumbnail" id="memUsageChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-files">Total Number of Files</h4>
|
||||
<div class="img-thumbnail" id="totFilesChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-size">Total Folder Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="totMiBChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="system-ram">System RAM Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="memSizeChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
<a href="https://github.com/syncthing/syncthing/tree/main/cmd/ursrv">Source code</a>.
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
</p>
|
||||
<script type="text/javascript">
|
||||
$('[data-toggle="tooltip"]').tooltip({html:true});
|
||||
drawHeatMap();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
34
compat.yaml
Normal file
34
compat.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
- runtime: go1.21
|
||||
requirements:
|
||||
# See https://en.wikipedia.org/wiki/MacOS_version_history#Releases
|
||||
#
|
||||
# macOS 10.15 (Catalina) per https://go.dev/doc/go1.22#darwin
|
||||
darwin: "19"
|
||||
# Per https://go.dev/doc/go1.23#linux
|
||||
linux: "2.6.32"
|
||||
# Windows 10's initial release was 10.0.10240.16405, per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/release-information
|
||||
# and Windows 11's initial release was 10.0.22000.194 per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
|
||||
#
|
||||
# Windows 10/Windows Server 2016 per https://go.dev/doc/go1.21#windows
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.22
|
||||
requirements:
|
||||
darwin: "19"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.23
|
||||
requirements:
|
||||
# macOS 11 (Big Sur) per https://tip.golang.org/doc/go1.23#darwin
|
||||
darwin: "20"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.24
|
||||
requirements:
|
||||
darwin: "20"
|
||||
linux: "3.2"
|
||||
windows: "10.0"
|
||||
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=Restart Syncthing after resume
|
||||
Documentation=man:syncthing(1)
|
||||
After=sleep.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=-/usr/bin/pkill -HUP -x syncthing
|
||||
|
||||
[Install]
|
||||
WantedBy=sleep.target
|
||||
84
go.mod
84
go.mod
@@ -1,52 +1,52 @@
|
||||
module github.com/syncthing/syncthing
|
||||
|
||||
go 1.21.0
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f
|
||||
github.com/alecthomas/kong v0.9.0
|
||||
github.com/alecthomas/kong v1.6.0
|
||||
github.com/aws/aws-sdk-go v1.55.5
|
||||
github.com/calmh/incontainer v1.0.0
|
||||
github.com/calmh/xdr v1.1.0
|
||||
github.com/ccding/go-stun v0.1.4
|
||||
github.com/calmh/xdr v1.2.0
|
||||
github.com/ccding/go-stun v0.1.5
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
github.com/getsentry/raven-go v0.2.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/greatroar/blobloom v0.7.2
|
||||
github.com/greatroar/blobloom v0.8.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/jackpal/gateway v1.0.15
|
||||
github.com/jackpal/gateway v1.0.16
|
||||
github.com/jackpal/go-nat-pmp v1.0.2
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/maruel/panicparse/v2 v2.3.1
|
||||
github.com/maruel/panicparse/v2 v2.4.0
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
|
||||
github.com/oschwald/geoip2-golang v1.9.0
|
||||
github.com/pierrec/lz4/v4 v4.1.21
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/quic-go/quic-go v0.44.0
|
||||
github.com/oschwald/geoip2-golang v1.11.0
|
||||
github.com/pierrec/lz4/v4 v4.1.22
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||
github.com/quic-go/quic-go v0.48.2
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/shirou/gopsutil/v3 v3.24.4
|
||||
github.com/shirou/gopsutil/v4 v4.24.12
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
|
||||
github.com/thejerf/suture/v4 v4.0.5
|
||||
github.com/urfave/cli v1.22.15
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
github.com/urfave/cli v1.22.16
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0
|
||||
github.com/willabides/kongplete v0.4.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.21.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/time v0.8.0
|
||||
golang.org/x/tools v0.28.0
|
||||
google.golang.org/protobuf v1.36.1
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -55,38 +55,44 @@ require (
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20240521024322-9665fa269a30 // indirect
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.12.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.20.2 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/posener/complete v1.2.3 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.0 // indirect
|
||||
github.com/prometheus/common v0.60.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
|
||||
222
go.sum
222
go.sum
@@ -2,25 +2,27 @@ github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f h1:GmH5
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU=
|
||||
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA=
|
||||
github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kong v1.6.0 h1:mwOzbdMR7uv2vul9J0FU3GYxE7ls/iX1ieMg5WIM6gE=
|
||||
github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/calmh/glob v0.0.0-20220615080505-1d823af5017b h1:Fjm4GuJ+TGMgqfGHN42IQArJb77CfD/mAwLbDUoJe6g=
|
||||
github.com/calmh/glob v0.0.0-20220615080505-1d823af5017b/go.mod h1:91K7jfEsgJSyfSrX+gmrRfZMtntx6JsHolWubGXDopg=
|
||||
github.com/calmh/incontainer v1.0.0 h1:g2cTUtZuFGmMGX8GoykPkN1Judj2uw8/3/aEtq4Z/rg=
|
||||
github.com/calmh/incontainer v1.0.0/go.mod h1:eOhqnw15c9X+4RNBe0W3HlUZFfX16O0EDsCOInTndHY=
|
||||
github.com/calmh/xdr v1.1.0 h1:U/Dd4CXNLoo8EiQ4ulJUXkgO1/EyQLgDKLgpY1SOoJE=
|
||||
github.com/calmh/xdr v1.1.0/go.mod h1:E8sz2ByAdXC8MbANf1LCRYzedSnnc+/sXXJs/PVqoeg=
|
||||
github.com/ccding/go-stun v0.1.4 h1:lC0co3Q3vjAuu2Jz098WivVPBPbemYFqbwE1syoka4M=
|
||||
github.com/ccding/go-stun v0.1.4/go.mod h1:cCZjJ1J3WFSJV6Wj8Y9Di8JMTsEXh6uv2eNmLzKaUeM=
|
||||
github.com/calmh/xdr v1.2.0 h1:GaGSNH4ZDw9kNdYqle6+RcAENiaQ8/611Ok+jQbBEeU=
|
||||
github.com/calmh/xdr v1.2.0/go.mod h1:vO5+lCx/8xz7Ekd/ZLf+xuy7c1x6FMO1pBJyjDebwyg=
|
||||
github.com/ccding/go-stun v0.1.5 h1:qEM367nnezmj7dv+SdT52prv5x6HUTG3nlrjX5aitlo=
|
||||
github.com/ccding/go-stun v0.1.5/go.mod h1:cCZjJ1J3WFSJV6Wj8Y9Di8JMTsEXh6uv2eNmLzKaUeM=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s=
|
||||
@@ -32,15 +34,15 @@ github.com/chmduquesne/rollinghash v4.0.0+incompatible/go.mod h1:Uc2I36RRfTAf7Dg
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||
github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
|
||||
github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE=
|
||||
github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
@@ -49,23 +51,20 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8 h1:loKJyspcRezt2Q3ZRMq2p/0v8iOurlmeXDPw6fikSvQ=
|
||||
github.com/go-ldap/ldap/v3 v3.4.8/go.mod h1:qS3Sjlu76eHfHGpUdWkAXQTw4beih+cHsco2jXlIXrk=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -81,20 +80,18 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240521024322-9665fa269a30 h1:r6YdmbD41tGHeCWDyHF691LWtL7D1iSTyJaKejTWwVU=
|
||||
github.com/google/pprof v0.0.0-20240521024322-9665fa269a30/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/greatroar/blobloom v0.7.2 h1:F30MGLHOcb4zr0pwCPTcKdlTM70rEgkf+LzdUPc5ss8=
|
||||
github.com/greatroar/blobloom v0.7.2/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
|
||||
github.com/greatroar/blobloom v0.8.0 h1:I9RlEkfqK9/6f1v9mFmDYegDQ/x0mISCpiNpAm23Pt4=
|
||||
github.com/greatroar/blobloom v0.8.0/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -110,8 +107,8 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jackpal/gateway v1.0.15 h1:yb4Gltgr8ApHWWnSyybnDL1vURbqw7ooo7IIL5VZSeg=
|
||||
github.com/jackpal/gateway v1.0.15/go.mod h1:dbyEDcDhHUh9EmjB9ung81elMUZfG0SoNc2TfTbcj4c=
|
||||
github.com/jackpal/gateway v1.0.16 h1:mTBRuHSW8qviVqX7kXnxKevqlfS/OA01ys6k6fxSX7w=
|
||||
github.com/jackpal/gateway v1.0.16/go.mod h1:IOn1OUbso/cGYmnCBZbCEqhNCLSz0xxdtIpUpri5/nA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
@@ -126,34 +123,34 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/maruel/panicparse/v2 v2.3.1 h1:NtJavmbMn0DyzmmSStE8yUsmPZrZmudPH7kplxBinOA=
|
||||
github.com/maruel/panicparse/v2 v2.3.1/go.mod h1:s3UmQB9Fm/n7n/prcD2xBGDkwXD6y2LeZnhbEXvs9Dg=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/maruel/panicparse/v2 v2.4.0 h1:yQKMIbQ0DKfinzVkTkcUzQyQ60UCiNnYfR7PWwTs2VI=
|
||||
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1 h1:NicmruxkeqHjDv03SfSxqmaLuisddudfP3h5wdXFbhM=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0 h1:sdtTHzzQNJlXF5+fd/EoPTucRHyMonYt/Cok8xzzfqA=
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+UfmdEAZGJ8IiKld1O0dbGotEnkMolG5hfMSY=
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75/go.mod h1:pBbZyGwC5i16IBkjVKoy/sznA8jPD/K9iedwe1ESE6w=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||
@@ -164,20 +161,20 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
|
||||
github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc=
|
||||
github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y=
|
||||
github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs=
|
||||
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w=
|
||||
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -185,21 +182,24 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
|
||||
github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
|
||||
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
|
||||
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
|
||||
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
|
||||
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
|
||||
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY=
|
||||
@@ -210,10 +210,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
|
||||
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
|
||||
github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4=
|
||||
github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -226,30 +224,34 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 h1:F4snRP//nIuTTW9LYEzVH4HVwDG9T3M4t8y/2nqMbiY=
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
|
||||
github.com/thejerf/suture/v4 v4.0.5 h1:F1E/4FZwXWqvlWDKEUo6/ndLtxGAUzMmNqkrMknZbAA=
|
||||
github.com/thejerf/suture/v4 v4.0.5/go.mod h1:gu9Y4dXNUWFrByqRt30Rm9/UZ0wzRSt9AJS6xu/ZGxU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/thejerf/suture/v4 v4.0.6 h1:QsuCEsCqb03xF9tPAsWAj8QOAJBgQI1c0VqJNaingg8=
|
||||
github.com/thejerf/suture/v4 v4.0.6/go.mod h1:gu9Y4dXNUWFrByqRt30Rm9/UZ0wzRSt9AJS6xu/ZGxU=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM=
|
||||
github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dmRIc0=
|
||||
github.com/urfave/cli v1.22.16 h1:MH0k6uJxdwdeWQTwhSO42Pwr4YLrNLwBtg1MRgTqPdQ=
|
||||
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 h1:okhMind4q9H1OxF44gNegWkiP4H/gsTFLalHFa4OOUI=
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0/go.mod h1:TTbGUfE+cXXceWtbTHq6lqcTvYPBKLNejBEbnUsQJtU=
|
||||
github.com/willabides/kongplete v0.4.0 h1:eivXxkp5ud5+4+NVN9e4goxC5mSh3n1RHov+gsblM2g=
|
||||
github.com/willabides/kongplete v0.4.0/go.mod h1:0P0jtWD9aTsqPSUAl4de35DLghrr57XcayPyvqSi2X8=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
|
||||
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -257,23 +259,25 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -284,18 +288,21 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -312,10 +319,7 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -323,38 +327,42 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -368,8 +376,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -378,8 +386,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -224,7 +224,6 @@ code.ng-binding{
|
||||
}
|
||||
|
||||
.well, .form-control[readonly="readonly"], .popover { /* read-only fields*/
|
||||
color: #666 !important;
|
||||
border-color: #444 !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
@@ -278,3 +277,17 @@ code.ng-binding{
|
||||
.reception {
|
||||
filter: invert(77%) sepia(0%) saturate(724%) hue-rotate(146deg) brightness(91%) contrast(85%);
|
||||
}
|
||||
|
||||
/* Disabled checkbox panels */
|
||||
|
||||
.checkbox[disabled] {
|
||||
background-color: #222222;
|
||||
}
|
||||
|
||||
.checkbox[disabled] * {
|
||||
color: #666666;
|
||||
}
|
||||
|
||||
.checkbox[disabled] .help-block {
|
||||
color: #666666 !important;
|
||||
}
|
||||
|
||||
@@ -228,7 +228,6 @@ code.ng-binding{
|
||||
}
|
||||
|
||||
.well, .form-control[readonly="readonly"], .popover { /* read-only fields*/
|
||||
color: #666 !important;
|
||||
border-color: #424242 !important;
|
||||
background-color: #3B3B3B !important;
|
||||
}
|
||||
@@ -289,4 +288,18 @@ code.ng-binding{
|
||||
/* Remote Devices 'connection type'-icon color set to #aaa */
|
||||
.reception {
|
||||
filter: invert(77%) sepia(0%) saturate(724%) hue-rotate(146deg) brightness(91%) contrast(85%);
|
||||
}
|
||||
}
|
||||
|
||||
/* Disabled checkbox panels */
|
||||
|
||||
.checkbox[disabled] {
|
||||
background-color: #3B3B3B;
|
||||
}
|
||||
|
||||
.checkbox[disabled] * {
|
||||
color: #999999;
|
||||
}
|
||||
|
||||
.checkbox[disabled] .help-block {
|
||||
color: #999999 !important;
|
||||
}
|
||||
|
||||
@@ -304,11 +304,7 @@ a.toggler:hover {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/**
|
||||
* Panel padding decrease
|
||||
*/
|
||||
|
||||
.panel-collapse .panel-body {
|
||||
.panel-body.less-padding {
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
@@ -452,7 +448,6 @@ ul.three-columns li, ul.two-columns li {
|
||||
}
|
||||
|
||||
@media (max-width:479px) {
|
||||
|
||||
nav .dropdown-toggle {
|
||||
font-size: 1em;
|
||||
}
|
||||
@@ -460,13 +455,7 @@ ul.three-columns li, ul.two-columns li {
|
||||
.navbar-nav .open .dropdown-menu > li > a {
|
||||
padding: 12px 15px 12px 25px;
|
||||
}
|
||||
}
|
||||
|
||||
.tab-content {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
@media (max-width: 419px) {
|
||||
/* The selectors are build to target only the content of folder and device
|
||||
panels as it would "destroy" e.g. out of sync or recent changes listings.
|
||||
The !important is needed to override .visible-xs that sets display to a
|
||||
@@ -517,6 +506,10 @@ ul.three-columns li, ul.two-columns li {
|
||||
}
|
||||
}
|
||||
|
||||
.tab-content {
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
.form-horizontal .form-group {
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
@@ -70,12 +70,28 @@
|
||||
"Connection Type": "Тып Падлучэння",
|
||||
"Connections": "Падлучэнні",
|
||||
"Connections via relays might be rate limited by the relay": "Падлучэнні праз рэтранслятар могуць быць абмежаванымі самім рэтранслятарам",
|
||||
"Continuously watching for changes is now available within Syncthing. This will detect changes on disk and issue a scan on only the modified paths. The benefits are that changes are propagated quicker and that less full scans are required.": "Працяглы прагляд змен цяпер даступны без Syncthing. Ён выяўляе змены на дыску і скануе толькі мадыфікаваныя шляхі. Перавага метада ў тым, што змены распаўсюджваюцца хучэй і патарабуецца меньшая колькасць поўных сканаванняў.",
|
||||
"Copied from elsewhere": "Скапіявана з іншага месца",
|
||||
"Copied from original": "Скапіявана з арыгіналу",
|
||||
"Copied!": "Скапіявана!",
|
||||
"Copy": "Скапіяваць",
|
||||
"Copy failed! Try to select and copy manually.": "Капіяванне не адбылося! Паспрабуйце вылучыць і скапіяваць уласнаручна.",
|
||||
"Currently Shared With Devices": "Ужо Абагулена З Прыладамі",
|
||||
"Custom Range": "Уласны Дыяпазон",
|
||||
"Danger!": "Небязпечна!",
|
||||
"Database Location": "Шлях Да Базы Даных",
|
||||
"Debugging Facilities": "Сродкі Адладкі",
|
||||
"Default": "Стандартныя",
|
||||
"Default Configuration": "Стандартныя Налады",
|
||||
"Default Device": "Стандартная Прылада",
|
||||
"Default Folder": "Стандартная Тэчка",
|
||||
"Default Ignore Patterns": "Шаблоны ігнаравання па змаўчанні",
|
||||
"Defaults": "Па змаўчанні",
|
||||
"Delete": "Выдаліць",
|
||||
"Delete Unexpected Items": "Выдаліць Нечаканыя Элементы",
|
||||
"Deleted {%file%}": "Выдалены {{file}}",
|
||||
"Deselect All": "Зняць выбар з усіх",
|
||||
"Device": "Прылада",
|
||||
"Device ID": "ID прылады",
|
||||
"Device Identification": "Ідэнтыфікацыя прылады",
|
||||
"Device Name": "Назва прылады",
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
"Advanced Configuration": "Разширени настройки",
|
||||
"All Data": "Всички данни",
|
||||
"All Time": "През цялото време",
|
||||
"All folders shared with this device must be protected by a password, such that all sent data is unreadable without the given password.": "Всички папки, споделени с устройството трябва да бъдат защитени с парола, така че данните да са недостъпни без нея.",
|
||||
"All folders shared with this device must be protected by a password, such that all sent data is unreadable without the given password.": "Папките, споделени с устройството трябва да бъдат защитени с парола, така че данните да са недостъпни без нея.",
|
||||
"Allow Anonymous Usage Reporting?": "Разрешавате ли анонимно отчитане на употребата?",
|
||||
"Allowed Networks": "Разрешени мрежи",
|
||||
"Alphabetic": "Азбучен ред",
|
||||
@@ -111,7 +111,7 @@
|
||||
"Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:": "Изключено периодично обхождане и грешка при започване на наблюдението за промени, прави се опит всяка минута:",
|
||||
"Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).": "Изключва сравняването и синхронизацията на правата на файловете. Полезно за системи с липсващи или специфични права (като FAT, exFAT, Synology, Android).",
|
||||
"Discard": "Отказване",
|
||||
"Disconnected": "Не е свързано",
|
||||
"Disconnected": "Няма връзка",
|
||||
"Disconnected (Inactive)": "Не е свързано (неизползвано)",
|
||||
"Disconnected (Unused)": "Не е свързано (неизползвано)",
|
||||
"Discovered": "Открит",
|
||||
@@ -134,8 +134,8 @@
|
||||
"Edit Folder Defaults": "За нови папки",
|
||||
"Editing {%path%}.": "Променяне на {{path}}.",
|
||||
"Enable Crash Reporting": "Включване на доклад за срив",
|
||||
"Enable NAT traversal": "Преминаване през NAT",
|
||||
"Enable Relaying": "Препращане",
|
||||
"Enable NAT traversal": "Обхождане на NAT",
|
||||
"Enable Relaying": "Разрешаване на ретранслатори",
|
||||
"Enabled": "Включено",
|
||||
"Enables sending extended attributes to other devices, and applying incoming extended attributes. May require running with elevated privileges.": "Когато е отметнато разширените атрибути се изпращат към другите устройства, а получените разширени атрибути се прилагат. Обикновено изисква съответните за целта права.",
|
||||
"Enables sending extended attributes to other devices, but not applying incoming extended attributes. This can have a significant performance impact. Always enabled when \"Sync Extended Attributes\" is enabled.": "Когато е отметнато разширените атрибути се изпращат към другите устройства, но получените разширени атрибути не се прилагат. Може да има значително неблагоприятно влияние върху производителността. Винаги е отметнато когато „Синхронизиране на разширени атрибути“ е отметнато.",
|
||||
@@ -158,8 +158,8 @@
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Неуспешна връзка към сървъри по IPv6 може да се очаква ако няма свързаност по IPv6.",
|
||||
"File Pull Order": "Ред на изтегляне",
|
||||
"File Versioning": "Версии на файловете",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Файловете биват преместени в папка .stversions при заменяне или изтриване от Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Когато Syncthing замени или изтрие файл той бива преместен в папката .stversions и преименуван чрез добавяне на датата и часа.",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Когато Syncthing замени или премахне файл, негова версия се копира в папка .stversions.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Когато Syncthing замени или премахне файл, негова версия се копира в папка .stversions, като в името му се добавят датата и часът.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Предпазва местните файлове от промени, идващи от другите устройства, но местните промени се изпращат.",
|
||||
"Files are synchronized from the cluster, but any changes made locally will not be sent to other devices.": "Файловете се синхронизират от другите устройства, но местните промени не се изпращат.",
|
||||
"Filesystem Watcher Errors": "Грешка при наблюдаване на файловата система",
|
||||
@@ -205,7 +205,7 @@
|
||||
"Ignored Folders": "Пренебрегнати папки",
|
||||
"Ignored at": "Пренебрегнато на",
|
||||
"Included Software": "Използван софтуер",
|
||||
"Incoming Rate Limit (KiB/s)": "Ограничение при изтегляне (KiB/s)",
|
||||
"Incoming Rate Limit (KiB/s)": "Ограничение при изтегляне (КиБ/с)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Неправилни настройки могат да повредят файлове и да попречат на синхронизирането.",
|
||||
"Incorrect user name or password.": "Грешно потребителско име или парола.",
|
||||
"Internally used paths:": "Вътрешно използвани пътища:",
|
||||
@@ -215,7 +215,7 @@
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Обръща значението на условието (напр. да не се отхвърля)",
|
||||
"Keep Versions": "Пазени версии",
|
||||
"LDAP": "LDAP",
|
||||
"Largest First": "Първо най-големи",
|
||||
"Largest First": "Първо най-големите",
|
||||
"Last 30 Days": "Последните 30 дена",
|
||||
"Last 7 Days": "Последните 7 дена",
|
||||
"Last Month": "Миналия месец",
|
||||
@@ -261,7 +261,7 @@
|
||||
"Never": "никога",
|
||||
"New Device": "Ново устройство",
|
||||
"New Folder": "Нова папка",
|
||||
"Newest First": "Първо най-нови",
|
||||
"Newest First": "Първо най-новите",
|
||||
"No": "Не",
|
||||
"No File Versioning": "Без пазене на версии",
|
||||
"No files will be deleted as a result of this operation.": "В резултат на операцията няма да бъдат премахнати файлове.",
|
||||
@@ -272,12 +272,12 @@
|
||||
"Number of Connections": "Брой на връзките",
|
||||
"OK": "Добре",
|
||||
"Off": "Изключено",
|
||||
"Oldest First": "Първо най-стари",
|
||||
"Oldest First": "Първо най-старите",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Незадължително име на папката. Може да бъде различно на всяко устройство.",
|
||||
"Options": "Настройки",
|
||||
"Out of Sync": "Несинхронизирано",
|
||||
"Out of Sync Items": "Несинхронизирани елементи",
|
||||
"Outgoing Rate Limit (KiB/s)": "Ограничение при качване (KiB/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Ограничение при качване (КиБ/с)",
|
||||
"Override": "Налагане",
|
||||
"Override Changes": "Налагане на местни промени",
|
||||
"Ownership": "Собственост",
|
||||
@@ -307,11 +307,11 @@
|
||||
"QUIC LAN": "QUIC LAN",
|
||||
"QUIC WAN": "QUIC WAN",
|
||||
"Quick guide to supported patterns": "Кратък наръчник на поддържаните шаблони",
|
||||
"Random": "Произволен",
|
||||
"Receive Encrypted": "Приема шифровани данни",
|
||||
"Random": "В случаен ред",
|
||||
"Receive Encrypted": "Получава шифровани данни",
|
||||
"Receive Only": "Само получава",
|
||||
"Received data is already encrypted": "Получените данни вече са шифровани",
|
||||
"Recent Changes": "Последни промени",
|
||||
"Recent Changes": "Скорошни промени",
|
||||
"Reduced by ignore patterns": "Наложени са шаблони за пренебрегване",
|
||||
"Relay LAN": "Препращане по LAN",
|
||||
"Relay WAN": "Препращане по WAN",
|
||||
@@ -374,7 +374,7 @@
|
||||
"Simple File Versioning": "Обикновени версии",
|
||||
"Single level wildcard (matches within a directory only)": "Заместващ символ за едно ниво (съвпада само с папка)",
|
||||
"Size": "Размер",
|
||||
"Smallest First": "Първо най-малки",
|
||||
"Smallest First": "Първо най-малките",
|
||||
"Some discovery methods could not be established for finding other devices or announcing this device:": "Следните методи за откриване не могат да бъдат използвани за намиране на други устройства или за обявяване на това устройство, за да бъде открито от останалите:",
|
||||
"Some items could not be restored:": "Някои елементи не могат да бъдат възстановени:",
|
||||
"Some listening addresses could not be enabled to accept connections:": "Някои от адресите, на които Syncthing очаква връзка не могат да бъдат настроени да получават входящи връзки:",
|
||||
@@ -384,7 +384,7 @@
|
||||
"Stable releases only": "Само стабилни версии",
|
||||
"Staggered": "Разпределени",
|
||||
"Staggered File Versioning": "Разпределени версии",
|
||||
"Start Browser": "Отваряне в мрежов четец",
|
||||
"Start Browser": "Отваряне на мрежов четец",
|
||||
"Statistics": "Статистика",
|
||||
"Stay logged in": "Оставане в системата",
|
||||
"Stopped": "Спряна",
|
||||
@@ -437,11 +437,11 @@
|
||||
"The interval must be a positive number of seconds.": "Интервалът трябва да е положителен брой секунди.",
|
||||
"The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "Интервал, в секунди, на почистване на папката с версии. Нула изключва периодичното почистване.",
|
||||
"The maximum age must be a number and cannot be blank.": "Максималната възраст трябва да е число, полето не може да бъде празно.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максимална продължителност за пазене на версия (в дни, за да не бъдат изтривани версии задайте 0).",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максимален срок за пазене на версия (в дни, 0 – без ограничение).",
|
||||
"The number of connections must be a non-negative number.": "Броят на връзките трябва да бъде положително число.",
|
||||
"The number of days must be a number and cannot be blank.": "Броят дни трябва да бъде число и не може да бъде празно.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Брой дни за пазене на файловете в кошчето. Нула значи завинаги.",
|
||||
"The number of old versions to keep, per file.": "Брой стари версии, които да бъдат пазени за всеки файл.",
|
||||
"The number of old versions to keep, per file.": "Брой пазени стари версии на файл.",
|
||||
"The number of versions must be a number and cannot be blank.": "Броят версии трябва да бъде число и не може да бъде празно.",
|
||||
"The path cannot be blank.": "Пътят не може да бъде празен.",
|
||||
"The rate limit is applied to the accumulated traffic of all connections to this device.": "Ограничението се прилага към общия трафик от всички връзки към това устройство.",
|
||||
@@ -474,7 +474,7 @@
|
||||
"Unexpected Items": "Неочаквани елементи",
|
||||
"Unexpected items have been found in this folder.": "В папката са намерени неочаквани елементи.",
|
||||
"Unignore": "Отменяне на пренебрегване",
|
||||
"Unknown": "Неясно",
|
||||
"Unknown": "Неизвестно",
|
||||
"Unshared": "Несподелена",
|
||||
"Unshared Devices": "Устройства, с които не е споделена",
|
||||
"Unshared Folders": "Несподелени папки",
|
||||
@@ -498,7 +498,7 @@
|
||||
"Using a direct TCP connection over WAN": "Използване на директна свързаност с TCP през широкодостъпна мрежа",
|
||||
"Version": "Издание",
|
||||
"Versions": "Версии",
|
||||
"Versions Path": "Път до версиите",
|
||||
"Versions Path": "Папка с версиите",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Версиите биват изтривани автоматично ако са по-стари от максималната възраст или надминават броя разрешени версии за определено време.",
|
||||
"Waiting to Clean": "Изчаква за почистване",
|
||||
"Waiting to Scan": "Изчаква за обхождане",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"Add devices from the introducer to our device list, for mutually shared folders.": "Afegiu dispositius de l'introductor a la nostra llista de dispositius, per a carpetes compartides mútuament.",
|
||||
"Add filter entry": "Afegeix una entrada de filtre",
|
||||
"Add ignore patterns": "Afegiu patrons per ignorar",
|
||||
"Add new folder?": "Vols afegir una carpeta nova?",
|
||||
"Add new folder?": "Voleu afegir una carpeta nova?",
|
||||
"Additionally the full rescan interval will be increased (times 60, i.e. new default of 1h). You can also configure it manually for every folder later after choosing No.": "A més, s'augmentarà l'interval de reexploració complet (vegades 60, és a dir, el nou predeterminat d'1 h). També podeu configurar-lo manualment per a cada carpeta més tard després de triar No.",
|
||||
"Address": "Adreça",
|
||||
"Addresses": "Adreces",
|
||||
@@ -29,15 +29,17 @@
|
||||
"Altered by ignoring deletes.": "S'ha alterat ignorant les supressions.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Una ordre externa gestiona la versió. Ha d'eliminar el fitxer de la carpeta compartida. Si el camí a l'aplicació conté espais, s'ha de citar.",
|
||||
"Anonymous Usage Reporting": "Informe anònim d'ús",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "El format de l'informe d'ús anònim ha canviat. Vols canviar a aquest nou format?",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "El format de l'informe d'ús anònim ha canviat. Voleu canviar a aquest nou format?",
|
||||
"Applied to LAN": "Aplicat a LAN",
|
||||
"Apply": "Aplica",
|
||||
"Are you sure you want to override all remote changes?": "Esteu segur que voleu anul·lar tots els canvis remots?",
|
||||
"Are you sure you want to permanently delete all these files?": "Estàs segur que vols esborrar tots aquests fitxers permanentment?",
|
||||
"Are you sure you want to remove device {%name%}?": "Estàs segur que vols esborrar el dispositiu {{name}}?",
|
||||
"Are you sure you want to remove folder {%label%}?": "Estàs segur que vols esborrar la carpeta {{label}}?",
|
||||
"Are you sure you want to restore {%count%} files?": "Estàs segur que vols restaurar {{count}} fitxers?",
|
||||
"Are you sure you want to permanently delete all these files?": "Segur que voleu esborrar tots aquests fitxers permanentment?",
|
||||
"Are you sure you want to remove device {%name%}?": "Segur que voleu eliminar el dispositiu {{name}}?",
|
||||
"Are you sure you want to remove folder {%label%}?": "Segur que voleu eliminar la carpeta {{label}}?",
|
||||
"Are you sure you want to restore {%count%} files?": "Segur que voleu restaurar {{count}} fitxers?",
|
||||
"Are you sure you want to revert all local changes?": "Esteu segur que voleu revertir tots els canvis locals?",
|
||||
"Are you sure you want to upgrade?": "Esteu segur que voleu actualitzar?",
|
||||
"Authentication Required": "Autenticació necessària",
|
||||
"Authors": "Autors",
|
||||
"Auto Accept": "Auto Acceptar",
|
||||
"Automatic Crash Reporting": "Informe automàtic d'incidències",
|
||||
@@ -55,7 +57,7 @@
|
||||
"Cleaning Versions": "Netejant versions",
|
||||
"Cleanup Interval": "Interval de neteja",
|
||||
"Click to see full identification string and QR code.": "Feu clic per veure la cadena d'identificació completa i el codi QR.",
|
||||
"Close": "Tancar",
|
||||
"Close": "Tanca",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentari quan és usat al principi d'una línia",
|
||||
"Compression": "Compressió",
|
||||
@@ -64,6 +66,7 @@
|
||||
"Configured": "Configurat",
|
||||
"Connected (Unused)": "Connectat (no utilitzat)",
|
||||
"Connection Error": "Error de connexió",
|
||||
"Connection Management": "Gestió de connexions",
|
||||
"Connection Type": "Tipus de connexió",
|
||||
"Connections": "Connexions",
|
||||
"Connections via relays might be rate limited by the relay": "Les connexions mitjançant relés poden estar limitades pel relé",
|
||||
@@ -91,11 +94,12 @@
|
||||
"Deselect devices to stop sharing this folder with.": "Desseleccioneu els dispositius amb els quals deixar de compartir aquesta carpeta.",
|
||||
"Deselect folders to stop sharing with this device.": "Desseleccioneu les carpetes per deixar de compartir-les amb aquest dispositiu.",
|
||||
"Device": "Dispositiu",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "El dispositiu \"{{name}}\" ({{device}} a {{address}}) vol connectar-se. Vols afegir un dispositiu nou?",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "El dispositiu «{{name}}» ({{device}} a {{address}}) vol connectar-se. Voleu afegir un dispositiu nou?",
|
||||
"Device Certificate": "Certificat del dispositiu",
|
||||
"Device ID": "ID del dispositiu",
|
||||
"Device Identification": "Identificació del dispositiu",
|
||||
"Device Name": "Nom del dispositiu",
|
||||
"Device Status": "Estat del dispositiu",
|
||||
"Device is untrusted, enter encryption password": "El dispositiu no és de confiança, introduïu la contrasenya d'encriptació",
|
||||
"Device rate limits": "Límits de velocitat del dispositiu",
|
||||
"Device that last modified the item": "Dispositiu que ha modificat el fitxer per última vegada",
|
||||
@@ -118,13 +122,13 @@
|
||||
"Do not add it to the ignore list, so this notification may recur.": "No l'afegiu a la llista d'ignorar, de manera que aquesta notificació pot repetir-se.",
|
||||
"Do not restore": "No restaurar",
|
||||
"Do not restore all": "No restaurar-ho tot",
|
||||
"Do you want to enable watching for changes for all your folders?": "Vols activar la cerca de canvis a totes les teves carpetes?",
|
||||
"Do you want to enable watching for changes for all your folders?": "Voleu activar el control de canvis a totes les carpetes?",
|
||||
"Documentation": "Documentació",
|
||||
"Download Rate": "Tasca de descarrega",
|
||||
"Downloaded": "Descarregat",
|
||||
"Downloading": "Descarregant",
|
||||
"Edit": "Editar",
|
||||
"Edit Device": "Editar dispositiu",
|
||||
"Edit": "Edita",
|
||||
"Edit Device": "Edita el dispositiu",
|
||||
"Edit Device Defaults": "Edita els valors predeterminats del dispositiu",
|
||||
"Edit Folder": "Modificar carpeta",
|
||||
"Edit Folder Defaults": "Edita els valors per defecte de la carpeta",
|
||||
@@ -165,8 +169,9 @@
|
||||
"Folder ID": "ID de carpeta",
|
||||
"Folder Label": "Etiqueta de la carpeta",
|
||||
"Folder Path": "Camí de carpeta",
|
||||
"Folder Status": "Estat de la carpeta",
|
||||
"Folder Type": "Tipus de carpeta",
|
||||
"Folder type \"{%receiveEncrypted%}\" can only be set when adding a new folder.": "El tipus de carpeta \"{{receiveEncrypted}}\" només es pot definir quan s'afegeix una carpeta nova.",
|
||||
"Folder type \"{%receiveEncrypted%}\" can only be set when adding a new folder.": "El tipus de carpeta «{{receiveEncrypted}}» només es pot definir quan s'afegeix una carpeta nova.",
|
||||
"Folder type \"{%receiveEncrypted%}\" cannot be changed after adding the folder. You need to remove the folder, delete or decrypt the data on disk, and add the folder again.": "El tipus de carpeta \"{{receiveEncrypted}}\" no es pot canviar després d'afegir la carpeta. Heu d'eliminar la carpeta, suprimir o desxifrar les dades del disc i tornar a afegir la carpeta.",
|
||||
"Folders": "Carpetes",
|
||||
"For the following folders an error occurred while starting to watch for changes. It will be retried every minute, so the errors might go away soon. If they persist, try to fix the underlying issue and ask for help if you can't.": "A les carpetes següents s'ha produït un error en començar a buscar canvis. Es tornarà a provar cada minut, de manera que els errors poden desaparèixer aviat. Si persisteixen, intenteu solucionar el problema subjacent i demaneu ajuda si no podeu.",
|
||||
@@ -182,8 +187,8 @@
|
||||
"GUI Theme": "Tema de la GUI",
|
||||
"General": "General",
|
||||
"Generate": "Generar",
|
||||
"Global Discovery": "Descobriment Global",
|
||||
"Global Discovery Servers": "Servidors de Descobriment Global",
|
||||
"Global Discovery": "Descobriment global",
|
||||
"Global Discovery Servers": "Servidors de descobriment global",
|
||||
"Global State": "Estat global",
|
||||
"Help": "Ajuda",
|
||||
"Hint: only deny-rules detected while the default is deny. Consider adding \"permit any\" as last rule.": "Suggeriment: només s'han detectat regles de denegació mentre el valor predeterminat és denegació. Penseu a afegir \"permet qualsevol\" com a darrera regla.",
|
||||
@@ -202,9 +207,11 @@
|
||||
"Included Software": "Programari inclòs",
|
||||
"Incoming Rate Limit (KiB/s)": "Límit de velocitat d'entrada (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Una configuració incorrecta pot malmetre els continguts de la teva carpeta i que Syncthing esdevingui inoperatiu.",
|
||||
"Incorrect user name or password.": "Nom d'usuari o contrasenya incorrecta.",
|
||||
"Internally used paths:": "Camins utilitzats internament:",
|
||||
"Introduced By": "Introduït per",
|
||||
"Introducer": "Introductor",
|
||||
"Introduction": "Introducció",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversió del patrò introduït",
|
||||
"Keep Versions": "Mantenir Versions",
|
||||
"LDAP": "LDAP",
|
||||
@@ -224,13 +231,18 @@
|
||||
"Loading data...": "Carregant dades...",
|
||||
"Loading...": "Carregant...",
|
||||
"Local Additions": "Addicions locals",
|
||||
"Local Discovery": "Descobriment Local",
|
||||
"Local Discovery": "Descobriment local",
|
||||
"Local State": "Estat local",
|
||||
"Local State (Total)": "Estat local (Total)",
|
||||
"Locally Changed Items": "Elements canviats localment",
|
||||
"Log": "Registre",
|
||||
"Log File": "Fitxer de registre",
|
||||
"Log In": "Inicia la sessió",
|
||||
"Log Out": "Tanca la sessió",
|
||||
"Log in to see paths information.": "Inicieu sessió per veure la informació dels camins.",
|
||||
"Log in to see version information.": "Inicieu sessió per veure la informació de la versió.",
|
||||
"Log tailing paused. Scroll to the bottom to continue.": "S'ha posat en pausa el seguiment del registre. Desplaceu-vos cap a la part inferior per continuar.",
|
||||
"Login failed, see Syncthing logs for details.": "No s'ha pogut iniciar la sessió; consulteu els registres de Syncthing per obtenir més informació.",
|
||||
"Logs": "Registres",
|
||||
"Major Upgrade": "Actualització major",
|
||||
"Mass actions": "Accions massives",
|
||||
@@ -255,8 +267,9 @@
|
||||
"No files will be deleted as a result of this operation.": "No se suprimirà cap fitxer com a resultat d'aquesta operació.",
|
||||
"No rules set": "No hi ha regles establertes",
|
||||
"No upgrades": "No hi ha actualitzacions",
|
||||
"Not shared": "No compartit",
|
||||
"Not shared": "Sense compartir",
|
||||
"Notice": "Avís",
|
||||
"Number of Connections": "Nombre de connexions",
|
||||
"OK": "D'acord",
|
||||
"Off": "Desactivar",
|
||||
"Oldest First": "Més antic primer",
|
||||
@@ -268,13 +281,14 @@
|
||||
"Override": "Sobreescriu",
|
||||
"Override Changes": "Sobreescriure Canvis",
|
||||
"Ownership": "Propietat",
|
||||
"Password": "Contrasenya",
|
||||
"Path": "Ruta",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Ruta de la carpeta a l'equip local. Si no existeix serà creada. El caràcter (~) es pot fer servir com a drecera de",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Ruta on s'han d'emmagatzemar les versions (deixeu buit per al directori predeterminat .stversions a la carpeta compartida).",
|
||||
"Paths": "Rutes",
|
||||
"Pause": "Pausa",
|
||||
"Pause": "Posa-ho en pausa",
|
||||
"Pause All": "Posa-ho tot en pausa",
|
||||
"Paused": "Pausat",
|
||||
"Paused": "En pausa",
|
||||
"Paused (Unused)": "En pausa (no utilitzat)",
|
||||
"Pending changes": "Canvis pendents",
|
||||
"Periodic scanning at given interval and disabled watching for changes": "Escaneig periòdic a un interval determinat i vigilància desactivada dels canvis",
|
||||
@@ -294,8 +308,8 @@
|
||||
"QUIC WAN": "Connexió QUIC WAN",
|
||||
"Quick guide to supported patterns": "Guia ràpida per als possibles patrons",
|
||||
"Random": "Aleatori",
|
||||
"Receive Encrypted": "Rebre xifrat",
|
||||
"Receive Only": "Només rebre",
|
||||
"Receive Encrypted": "Rep xifrat",
|
||||
"Receive Only": "Només rep",
|
||||
"Received data is already encrypted": "Les dades rebudes ja estan xifrades",
|
||||
"Recent Changes": "Canvis recents",
|
||||
"Reduced by ignore patterns": "Reduït per ignorar patrons",
|
||||
@@ -309,20 +323,21 @@
|
||||
"Remove Device": "Elimina el dispositiu",
|
||||
"Remove Folder": "Elimina la carpeta",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Identificador obligatori per a la carpeta. Ha de ser el mateix en tots els dispositius del clúster.",
|
||||
"Rescan": "Re-escanejar",
|
||||
"Rescan All": "Re-escanejar tot",
|
||||
"Rescan": "Reescaneja",
|
||||
"Rescan All": "Reescaneja-ho tot",
|
||||
"Rescans": "Escaneja de nou",
|
||||
"Restart": "Reiniciar",
|
||||
"Restart Needed": "És Necessari Reiniciar",
|
||||
"Restarting": "Reiniciant",
|
||||
"Restart": "Reinicia",
|
||||
"Restart Needed": "Cal un reinici",
|
||||
"Restarting": "S'està reiniciant",
|
||||
"Restore": "Restaura",
|
||||
"Restore Versions": "Restaura versions",
|
||||
"Resume": "Reprendre",
|
||||
"Resume All": "Reprèn tot",
|
||||
"Resume": "Reprèn-ho",
|
||||
"Resume All": "Reprèn-ho tot",
|
||||
"Reused": "Reutilitzat",
|
||||
"Revert": "Reverteix",
|
||||
"Revert Local Changes": "Reverteix els canvis locals",
|
||||
"Save": "Guardar",
|
||||
"Save": "Desa",
|
||||
"Saving changes": "S'estan desant els canvis",
|
||||
"Scan Time Remaining": "Temps d'escanejat restant",
|
||||
"Scanning": "Escanejant",
|
||||
"See external versioning help for supported templated command line parameters.": "Consulteu l'ajuda de versions externa per als paràmetres de línia d'ordres de plantilla compatibles.",
|
||||
@@ -332,28 +347,28 @@
|
||||
"Select additional folders to share with this device.": "Seleccioneu carpetes addicionals per compartir amb aquest dispositiu.",
|
||||
"Select latest version": "Seleccioneu la darrera versió",
|
||||
"Select oldest version": "Seleccioneu la versió més antiga",
|
||||
"Send & Receive": "Enviar i rebre",
|
||||
"Send & Receive": "Envia i rep",
|
||||
"Send Extended Attributes": "Envia atributs ampliats",
|
||||
"Send Only": "Només enviar",
|
||||
"Send Only": "Només envia",
|
||||
"Send Ownership": "Envia la propietat",
|
||||
"Set Ignores on Added Folder": "Estableix filtres per ignorar a la carpeta afegida",
|
||||
"Settings": "Preferències",
|
||||
"Share": "Compartir",
|
||||
"Share Folder": "Compartir carpeta",
|
||||
"Share": "Comparteix",
|
||||
"Share Folder": "Comparteix la carpeta",
|
||||
"Share by Email": "Comparteix per correu electrònic",
|
||||
"Share by SMS": "Comparteix per SMS",
|
||||
"Share this folder?": "Compartir aquesta carpeta?",
|
||||
"Share this folder?": "Voleu compartir la carpeta?",
|
||||
"Shared Folders": "Carpetes compartides",
|
||||
"Shared With": "Compartir Amb",
|
||||
"Shared With": "Compartida amb",
|
||||
"Sharing": "Compartint",
|
||||
"Show ID": "Mostrar ID",
|
||||
"Show ID": "Mostra l'ID",
|
||||
"Show QR": "Mostra QR",
|
||||
"Show detailed discovery status": "Mostra l'estat detallat del descobriment",
|
||||
"Show detailed listener status": "Mostra l'estat detallat de l'oient",
|
||||
"Show diff with previous version": "Mostra la diferència amb la versió anterior",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Mostrat en comptes del ID del Node en l'estat del cluster. Serà advertit als altres dispositius com un nom opcional per defecte.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Mostrat en comptes del ID del Node en l'estat del cluster. S'actualitzarà al nom del dispositiu si es deixa buit.",
|
||||
"Shutdown": "Apagar",
|
||||
"Shutdown": "Apaga",
|
||||
"Shutdown Complete": "Apagat complet",
|
||||
"Simple": "Simple",
|
||||
"Simple File Versioning": "Versionat de Fitxers Senzill",
|
||||
@@ -369,8 +384,9 @@
|
||||
"Stable releases only": "Només versions estables",
|
||||
"Staggered": "Esglaonat",
|
||||
"Staggered File Versioning": "Versionat de Fitxers Esglaonat",
|
||||
"Start Browser": "Arrancar Navegador",
|
||||
"Start Browser": "Inicia el navegador",
|
||||
"Statistics": "Estadístiques",
|
||||
"Stay logged in": "Mantén la sessió iniciada",
|
||||
"Stopped": "Aturat",
|
||||
"Stores and syncs only encrypted data. Folders on all connected devices need to be set up with the same password or be of type \"{%receiveEncrypted%}\" too.": "Emmagatzema i sincronitza només dades encriptades. Les carpetes de tots els dispositius connectats s'han de configurar amb la mateixa contrasenya o també ser del tipus \"{{receiveEncrypted}}\".",
|
||||
"Subject:": "Assumpte:",
|
||||
@@ -382,17 +398,18 @@
|
||||
"Sync Status": "Estat de sincronització",
|
||||
"Syncing": "Synthing",
|
||||
"Syncthing device ID for \"{%devicename%}\"": "ID del dispositiu Syncthing amb el nom \"{{devicename}}\"",
|
||||
"Syncthing has been shut down.": "S'ha aturat el synthing.",
|
||||
"Syncthing has been shut down.": "Synthing s'ha aturat.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing inclou el següent programari o parts dels mateixos:",
|
||||
"Syncthing is Free and Open Source Software licensed as MPL v2.0.": "Syncthing és un programari lliure i de codi obert amb llicència MPL v2.0.",
|
||||
"Syncthing is a continuous file synchronization program. It synchronizes files between two or more computers in real time, safely protected from prying eyes. Your data is your data alone and you deserve to choose where it is stored, whether it is shared with some third party, and how it's transmitted over the internet.": "Syncthing és un programa de sincronització contínua de fitxers. Sincronitza fitxers entre dos o més ordinadors en temps real, protegit de manera segura de mirades indiscretes. Les vostres dades són només les vostres i mereixeu triar on s'emmagatzemen, si es comparteixen amb un tercer i com es transmeten per Internet.",
|
||||
"Syncthing is listening on the following network addresses for connection attempts from other devices:": "La sincronització està escoltant a les adreces de xarxa següents els intents de connexió des d'altres dispositius:",
|
||||
"Syncthing is not listening for connection attempts from other devices on any address. Only outgoing connections from this device may work.": "La sincronització no és escoltar els intents de connexió d'altres dispositius a cap adreça. Només poden funcionar les connexions sortints d'aquest dispositiu.",
|
||||
"Syncthing is restarting.": "Reiniciant syncthing.",
|
||||
"Syncthing is restarting.": "Syncthing s'està reiniciant.",
|
||||
"Syncthing is saving changes.": "Syncthing està desant els canvis.",
|
||||
"Syncthing is upgrading.": "Actualitzant syncthing.",
|
||||
"Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "Syncthing ara admet la notificació automàtica d'errors als desenvolupadors. Aquesta funció està activada per defecte.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Sembla que Syncthing no està funcionant o hi ha un problema amb la connexió a Internet. S'està tornant a provar…",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Sembla ser que Syncthing està tinguent problemes per processar la teva petició. Si us plau, refresca la pàgina o reinicia Syncthing si el problema persisteix.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Sembla que Syncthing està tenint problemes per a processar la petició. Recarregueu la pàgina o reinicieu Syncthing si el problema persisteix.",
|
||||
"TCP LAN": "Connexió TCP LAN",
|
||||
"TCP WAN": "Connexió TCP WAN",
|
||||
"Take me back": "Porta'm enrere",
|
||||
@@ -401,7 +418,7 @@
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "La interfície d'administració de Syncthing està configurada per permetre l'accés remot sense contrasenya.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Les estadístiques agregades estan disponibles públicament a l'URL següent.",
|
||||
"The cleanup interval cannot be blank.": "L'interval de neteja no pot estar en blanc.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuració s'ha guardar però no s'ha activat. S'ha de reiniciar el synthing per activar la nova configuració.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuració s'ha desat, però no s'ha activat. S'ha de reiniciar el Synthing per a activar la configuració nova.",
|
||||
"The device ID cannot be blank.": "El ID del dispositiu no pot estar en blanc.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "L'identificador del dispositiu que cal introduir aquí es pot trobar al diàleg \"Accions > Mostra l'ID\" de l'altre dispositiu. Els espais i els guions són opcionals (ignorats).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes, and app versions. If the reported data set is changed you will be prompted with this dialog again.": "L'informe d'ús encriptat s'envia diàriament. Es fa servir per rastrejar plataformes habituals, mides de carpetes i versions de l'aplicació. Si es canvia el conjunt de dades reportades es demanarà amb aquest diàleg de nou.",
|
||||
@@ -421,11 +438,13 @@
|
||||
"The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "L'interval, en segons, per executar la neteja al directori de versions. Zero per desactivar la neteja periòdica.",
|
||||
"The maximum age must be a number and cannot be blank.": "La màxima antiguitat ha de ser un número i no pot estar en blanc.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Temps màxim en mantenir una versió (en dies, si es deixa en 0 es mantenen les versions per sempre).",
|
||||
"The number of connections must be a non-negative number.": "El nombre de connexions ha de ser un nombre no negatiu.",
|
||||
"The number of days must be a number and cannot be blank.": "El nombre de dies ha de ser un número i no pot estar en blanc.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "El nombre de dies per guardar els fitxers a la paperera. Zero significa per sempre.",
|
||||
"The number of old versions to keep, per file.": "El nombre de versions antigues que es mantenen per fitxer.",
|
||||
"The number of versions must be a number and cannot be blank.": "El nombre de versions ha de ser un número i no es pot deixar en blanc.",
|
||||
"The path cannot be blank.": "El camí no pot estar en blanc.",
|
||||
"The rate limit is applied to the accumulated traffic of all connections to this device.": "El límit de velocitat s'aplica al trànsit acumulat de totes les connexions a aquest dispositiu.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "El límit de velocitat ha de ser un nombre positiu (0: sense límit)",
|
||||
"The remote device has not accepted sharing this folder.": "El dispositiu remot no ha acceptat compartir aquesta carpeta.",
|
||||
"The remote device has paused this folder.": "El dispositiu remot ha posat en pausa aquesta carpeta.",
|
||||
@@ -456,7 +475,7 @@
|
||||
"Unexpected items have been found in this folder.": "S'han trobat elements inesperats en aquesta carpeta.",
|
||||
"Unignore": "No ignorar",
|
||||
"Unknown": "Desconegut",
|
||||
"Unshared": "No compartit",
|
||||
"Unshared": "Sense compartir",
|
||||
"Unshared Devices": "Dispositius no compartits",
|
||||
"Unshared Folders": "Carpetes no compartides",
|
||||
"Untrusted": "No fiable",
|
||||
@@ -470,8 +489,11 @@
|
||||
"Usage reporting is always enabled for candidate releases.": "Els informes d'ús sempre estan activats per a les versions candidates.",
|
||||
"Use HTTPS for GUI": "Utilitzar HTTPS pel GUI",
|
||||
"Use notifications from the filesystem to detect changed items.": "Utilitzeu les notificacions del sistema de fitxers per detectar elements canviats.",
|
||||
"User": "Usuari",
|
||||
"User Home": "Carpeta d'inici de l'usuari",
|
||||
"Username/Password has not been set for the GUI authentication. Please consider setting it up.": "El nom d'usuari/contrasenya no s'ha establert per a l'autenticació de la GUI. Penseu en configurar-lo.",
|
||||
"Username/Password has not been set for the GUI authentication. Please consider setting it up.": "El nom d'usuari/contrasenya no s'ha establert per a l'autenticació de la GUI. Penseu a configurar-lo.",
|
||||
"Using a QUIC connection over LAN": "Utilitzant una connexió QUIC per LAN",
|
||||
"Using a QUIC connection over WAN": "Utilitzant una connexió QUIC a través de WAN",
|
||||
"Using a direct TCP connection over LAN": "Utilitzant una connexió TCP directa per LAN",
|
||||
"Using a direct TCP connection over WAN": "Utilitzant una connexió TCP directa a través de WAN",
|
||||
"Version": "Versió",
|
||||
@@ -492,6 +514,7 @@
|
||||
"Watching for changes discovers most changes without periodic scanning.": "Observant els canvis descobreix la majoria dels canvis sense escanejar periòdicament.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Quan s'afegeix un nou dispositiu, recorda que aquest dispositiu tambè s'ha d'afegir a l'altre banda.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Quan s'afegeix una nova carpeta recorda que el ID d'aquesta s'utilitza per lligar repositoris entre els dispositius. Es distingeix entre majúscules i minúscules i ha de ser exactament iguals entre tots els dispositius.",
|
||||
"When set to more than one on both devices, Syncthing will attempt to establish multiple concurrent connections. If the values differ, the highest will be used. Set to zero to let Syncthing decide.": "Quan s'estableix en més d'un als dos dispositius, Syncthing intentarà establir diverses connexions simultànies. Si els valors són diferents, s'utilitzarà el més alt. Establiu a zero per deixar que Sincronització decideixi.",
|
||||
"Yes": "Si",
|
||||
"Yesterday": "Ahir",
|
||||
"You can also copy and paste the text into a new message manually.": "També podeu copiar i enganxar el text en un missatge nou manualment.",
|
||||
@@ -499,8 +522,8 @@
|
||||
"You can change your choice at any time in the Settings dialog.": "Pots canviar la teva elecció en qualsevol moment al quadre de preferències.",
|
||||
"You can read more about the two release channels at the link below.": "Podeu llegir més sobre els dos canals de llançament a l'enllaç següent.",
|
||||
"You have no ignored devices.": "No teniu cap dispositiu ignorat.",
|
||||
"You have no ignored folders.": "No tens carpetes compartides.",
|
||||
"You have unsaved changes. Do you really want to discard them?": "Tens canvis no desats. Realment les voleu descartar?",
|
||||
"You have no ignored folders.": "No teniu carpetes ignorades.",
|
||||
"You have unsaved changes. Do you really want to discard them?": "Teniu canvis no desats. Realment els voleu descartar?",
|
||||
"You must keep at least one version.": "Has de mantenir com a mínim una versió.",
|
||||
"You should never add or change anything locally in a \"{%receiveEncrypted%}\" folder.": "Mai no hauríeu d'afegir ni canviar res localment a una carpeta \"{{receiveEncrypted}}\".",
|
||||
"Your SMS app should open to let you choose the recipient and send it from your own number.": "La vostra aplicació SMS s'hauria d'obrir per permetre't triar el destinatari i enviar-lo des del teu propi número.",
|
||||
@@ -525,6 +548,7 @@
|
||||
"light": "Clar"
|
||||
}
|
||||
},
|
||||
"unknown device": "dispositiu desconegut",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartir la carpeta \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} vol compartir la carpeta \"{{folderlabel}}\" ({{folder}}).",
|
||||
"{%reintroducer%} might reintroduce this device.": "{{reintroducer}} podria tornar a introduir aquest dispositiu."
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A device with that ID is already added.": "Zařízení s takovým identifikátorem už je přidáno.",
|
||||
"A device with that ID is already added.": "Zařízení s takovým identifikátorem je již přidáno.",
|
||||
"A negative number of days doesn't make sense.": "Záporný počet dní nedává smysl.",
|
||||
"A new major version may not be compatible with previous versions.": "Nová hlavní verze nemusí být kompatibilní s předchozími verzemi.",
|
||||
"API Key": "Klíč k API",
|
||||
@@ -11,93 +11,95 @@
|
||||
"Add Device": "Přidat zařízení",
|
||||
"Add Folder": "Přidat složku",
|
||||
"Add Remote Device": "Přidat vzdálené zařízení",
|
||||
"Add devices from the introducer to our device list, for mutually shared folders.": "Přidat zařízení z uvaděče do místního seznamu zařízení a získat tak vzájemně sdílené složky.",
|
||||
"Add filter entry": "Přidej filtr",
|
||||
"Add ignore patterns": "Přidat vzory ignorovaného",
|
||||
"Add devices from the introducer to our device list, for mutually shared folders.": "Přidat zařízení od zavaděče do našeho seznamu zařízení pro vzájemné sdílení složek.",
|
||||
"Add filter entry": "Přidat položku filtru",
|
||||
"Add ignore patterns": "Přidat vzory ignorování",
|
||||
"Add new folder?": "Přidat novou složku?",
|
||||
"Additionally the full rescan interval will be increased (times 60, i.e. new default of 1h). You can also configure it manually for every folder later after choosing No.": "Dále bude prodloužen interval mezi plnými skeny (60krát, t.j. nová výchozí hodnota 1h). V případě, že nyní zvolíte Ne, stále ještě toto později můžete u každé složky jednotlivě ručně upravit.",
|
||||
"Additionally the full rescan interval will be increased (times 60, i.e. new default of 1h). You can also configure it manually for every folder later after choosing No.": "Kromě toho se prodlouží interval úplného opětovného skenování (60krát, t.j. nová výchozí hodnota 1h). Také to můžete později nastavit ručně pro každou složku, pokud zvolíte Ne.",
|
||||
"Address": "Adresa",
|
||||
"Addresses": "Adresy",
|
||||
"Advanced": "Pokročilé",
|
||||
"Advanced Configuration": "Pokročilá nastavení",
|
||||
"Advanced": "Rozšířené",
|
||||
"Advanced Configuration": "Rozšířená nastavení",
|
||||
"All Data": "Všechna data",
|
||||
"All Time": "Celou dobu",
|
||||
"All folders shared with this device must be protected by a password, such that all sent data is unreadable without the given password.": "Všechny složky sdílené s tímto zařízením musí být chráněna heslem, aby byla odesílaná data bez hesla nečitelná.",
|
||||
"All folders shared with this device must be protected by a password, such that all sent data is unreadable without the given password.": "Všechny složky sdílené s tímto zařízením musí být chráněny heslem, aby všechna odesílaná data byla bez zadaného hesla nečitelná.",
|
||||
"Allow Anonymous Usage Reporting?": "Povolit anonymní hlášení o používání?",
|
||||
"Allowed Networks": "Sítě, ze kterých je umožněn přístup",
|
||||
"Alphabetic": "Abecední",
|
||||
"Altered by ignoring deletes.": "Změněno pomocí vzorů ignorovaného.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Správu verzí obstarává externí příkaz. U toho je třeba, aby neaktuální soubory jím byly odsouvány pryč ze sdílené složky. Pokud popis umístění tohoto příkazu obsahuje mezeru, je třeba popis umístění uzavřít do uvozovek.",
|
||||
"Allowed Networks": "Povolené sítě",
|
||||
"Alphabetic": "Abecedně",
|
||||
"Altered by ignoring deletes.": "Změněno ignorováním odstraněných.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Verzování obstarává externí příkaz. Ten musí odebrat soubor ze sdílené složky. Pokud cesta k aplikaci obsahuje mezery, je potřeba ji uvést v uvozovkách.",
|
||||
"Anonymous Usage Reporting": "Anonymní hlášení o používání",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Formát anonymního hlášení o používání byl změněn. Chcete přejít na nový formát?",
|
||||
"Applied to LAN": "Použité pro místní síť",
|
||||
"Apply": "Aplikovat",
|
||||
"Are you sure you want to override all remote changes?": "Skutečně si přejete přebít všechny vzdálené změny?",
|
||||
"Are you sure you want to permanently delete all these files?": "Skutečně chcete smazat všechny tyto soubory?",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Formát anonymního hlášení o používání se změnil. Chcete přejít na nový formát?",
|
||||
"Applied to LAN": "Použité pro LAN",
|
||||
"Apply": "Použít",
|
||||
"Are you sure you want to override all remote changes?": "Opravdu chcete přepsat všechny vzdálené změny?",
|
||||
"Are you sure you want to permanently delete all these files?": "Opravdu chcete trvale odstranit všechny tyto soubory?",
|
||||
"Are you sure you want to remove device {%name%}?": "Opravdu chcete odebrat zařízení {{name}}?",
|
||||
"Are you sure you want to remove folder {%label%}?": "Opravdu chcete odebrat složku {{label}}?",
|
||||
"Are you sure you want to restore {%count%} files?": "Opravdu chcete obnovit {{count}} souborů?",
|
||||
"Are you sure you want to revert all local changes?": "Skutečně si přejete vrátit všechny lokální změny?",
|
||||
"Are you sure you want to upgrade?": "Skutečně chcete provést aktualizaci?",
|
||||
"Authentication Required": "Autentizace vyžadována",
|
||||
"Are you sure you want to revert all local changes?": "Opravdu chcete vrátit všechny místní změny?",
|
||||
"Are you sure you want to upgrade?": "Opravdu chcete provést aktualizaci?",
|
||||
"Authentication Required": "Požadováno ověření",
|
||||
"Authors": "Autoři",
|
||||
"Auto Accept": "Přijmout automaticky",
|
||||
"Automatic Crash Reporting": "Automatické hlášení pádů",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "Automatická aktualizace nyní nabízí volbu mezi stabilními vydáními a kandidáty na ně.",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "Automatická aktualizace nyní nabízí výběr mezi stabilními vydáními a kandidáty na vydání.",
|
||||
"Automatic upgrades": "Automatické aktualizace",
|
||||
"Automatic upgrades are always enabled for candidate releases.": "Automatické aktualizace jsou vždy povolené u kandidátů na vydání.",
|
||||
"Automatically create or share folders that this device advertises at the default path.": "Automaticky vytvářet nebo sdílet složky, které toto zařízení propaguje ve výchozím popisu umístění.",
|
||||
"Available debug logging facilities:": "Dostupná logovací zařízení pro ladění:",
|
||||
"Automatically create or share folders that this device advertises at the default path.": "Automaticky vytvářet nebo sdílet složky, které toto zařízení propaguje ve výchozí cestě.",
|
||||
"Available debug logging facilities:": "Dostupné možnosti protokolování ladění:",
|
||||
"Be careful!": "Buďte opatrní!",
|
||||
"Body:": "Obsah:",
|
||||
"Bugs": "Chyby",
|
||||
"Cancel": "Zrušit",
|
||||
"Changelog": "Seznam změn",
|
||||
"Clean out after": "Vyčistit po",
|
||||
"Cleaning Versions": "Mazání verzí",
|
||||
"Cleanup Interval": "Interval mazání",
|
||||
"Click to see full identification string and QR code.": "Kliknutím zobrazíte úplnou identifikaci a QR kód.",
|
||||
"Cleaning Versions": "Čištění verzí",
|
||||
"Cleanup Interval": "Interval čištění",
|
||||
"Click to see full identification string and QR code.": "Kliknutím zobrazíte celý identifikační řetězec a QR kód.",
|
||||
"Close": "Zavřít",
|
||||
"Command": "Příkaz",
|
||||
"Comment, when used at the start of a line": "Pokud použito na jeho začátku, je řádek považován za komentář",
|
||||
"Comment, when used at the start of a line": "Považováno za komentář, pokud je použito na začátku řádku",
|
||||
"Compression": "Komprese",
|
||||
"Configuration Directory": "Konfigurační složka",
|
||||
"Configuration Directory": "Konfigurační adresář",
|
||||
"Configuration File": "Konfigurační soubor",
|
||||
"Configured": "Nastaveno",
|
||||
"Connected (Unused)": "Připojeno (nepoužité)",
|
||||
"Connected (Unused)": "Připojeno (nepoužito)",
|
||||
"Connection Error": "Chyba připojení",
|
||||
"Connection Management": "Správa připojení",
|
||||
"Connection Type": "Typ připojení",
|
||||
"Connections": "Spojení",
|
||||
"Connections via relays might be rate limited by the relay": "Připojení přes přenašeč může být přenašečem omezena rychlost",
|
||||
"Continuously watching for changes is now available within Syncthing. This will detect changes on disk and issue a scan on only the modified paths. The benefits are that changes are propagated quicker and that less full scans are required.": "Syncthing nyní umožňuje nepřetržité sledování změn. To zachytí změny na úložišti a spustí sken pouze pro umístění, ve kterých se něco změnilo. Výhodami jsou rychlejší propagace změn a méně plných skenů.",
|
||||
"Connections": "Připojení",
|
||||
"Connections via relays might be rate limited by the relay": "Připojení přes relé může být omezeno rychlostí relé",
|
||||
"Continuously watching for changes is now available within Syncthing. This will detect changes on disk and issue a scan on only the modified paths. The benefits are that changes are propagated quicker and that less full scans are required.": "Syncthing nyní umožňuje průběžné sledování změn. To zachytí změny na disku a provede skenování pouze změněných cest. Výhodou je rychlejší šíření změn a menší počet úplných skenování.",
|
||||
"Copied from elsewhere": "Zkopírováno odjinud",
|
||||
"Copied from original": "Zkopírováno z originálu",
|
||||
"Copied!": "Zkopírováno!",
|
||||
"Copy": "Kopírovat",
|
||||
"Copy failed! Try to select and copy manually.": "Kopírování selhalo! Zkuste vybrat a zkopírovat manuálně.",
|
||||
"Copy failed! Try to select and copy manually.": "Kopírování selhalo! Zkuste vybrat a zkopírovat ručně.",
|
||||
"Currently Shared With Devices": "Aktuálně sdíleno se zařízeními",
|
||||
"Custom Range": "Přesný rozsah",
|
||||
"Custom Range": "Vlastní rozsah",
|
||||
"Danger!": "Nebezpečí!",
|
||||
"Database Location": "Umístění databáze",
|
||||
"Debugging Facilities": "Nástroje pro ladění",
|
||||
"Default": "Výchozí",
|
||||
"Default Configuration": "Výchozí nastavení",
|
||||
"Default Configuration": "Výchozí konfigurace",
|
||||
"Default Device": "Výchozí zařízení",
|
||||
"Default Folder": "Výchozí složka",
|
||||
"Default Ignore Patterns": "Výchozí vzory ignorovaného",
|
||||
"Default Ignore Patterns": "Výchozí vzory ignorování",
|
||||
"Defaults": "Výchozí hodnoty",
|
||||
"Delete": "Smazat",
|
||||
"Delete Unexpected Items": "Smazat neočekávané položky",
|
||||
"Deleted {%file%}": "Smazáno {{file}}",
|
||||
"Delete": "Odstranit",
|
||||
"Delete Unexpected Items": "Odstranit neočekávané položky",
|
||||
"Deleted {%file%}": "Odstraněn {{file}}",
|
||||
"Deselect All": "Zrušit výběr všeho",
|
||||
"Deselect devices to stop sharing this folder with.": "Zrušte výběr zařízení, se kterými již nemá být tato složka sdílena.",
|
||||
"Deselect folders to stop sharing with this device.": "Zrušte výběr složek, které se mají přestat sdílet s tímto zařízením.",
|
||||
"Deselect devices to stop sharing this folder with.": "Zrušením výběru zařízení s ním přestanete tuto složku sdílet.",
|
||||
"Deselect folders to stop sharing with this device.": "Zrušením výběru složek zastavíte sdílení s tímto zařízením.",
|
||||
"Device": "Zařízení",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Zařízení „{{name}}“ ({{device}} na {{address}}) se chce připojit. Přidat nové zařízení?",
|
||||
"Device Certificate": "Certifikát zařízení",
|
||||
"Device ID": "Identifikátor zařízení",
|
||||
"Device Identification": "Identifikace zařízení",
|
||||
"Device Name": "Název zařízení",
|
||||
"Device Status": "Stav zařízení",
|
||||
"Device is untrusted, enter encryption password": "Zařízení nemá důvěru, zadejte šifrovací heslo.",
|
||||
"Device rate limits": "Omezení přenosové rychlosti pro zařízení",
|
||||
"Device that last modified the item": "Zařízení, které položku změnilo naposledy",
|
||||
@@ -219,13 +221,14 @@
|
||||
"Last seen": "Naposledy spatřen",
|
||||
"Latest Change": "Poslední změna",
|
||||
"Learn more": "Zjistěte více",
|
||||
"Learn more at {%url%}": "Více na {{url}}",
|
||||
"Limit": "Limit",
|
||||
"Listener Failures": "Selhání při naslouchání",
|
||||
"Listener Status": "Stav naslouchání",
|
||||
"Listeners": "Naslouchající",
|
||||
"Loading data...": "Načítání dat…",
|
||||
"Loading...": "Načítání…",
|
||||
"Local Additions": "Místní příbytky",
|
||||
"Local Additions": "Místní přebytky",
|
||||
"Local Discovery": "Místní objevování",
|
||||
"Local State": "Místní status",
|
||||
"Local State (Total)": "Místní status (Celkem)",
|
||||
@@ -234,11 +237,16 @@
|
||||
"Log File": "Soubor logů",
|
||||
"Log In": "Přihlásit se",
|
||||
"Log Out": "Odhlásit se",
|
||||
"Log in to see paths information.": "Pro zobrazení informací o cestě se přihlaste.",
|
||||
"Log in to see version information.": "Pro zobrazení informací o verzi se přihlaste.",
|
||||
"Log tailing paused. Scroll to the bottom to continue.": "Zaznamenávání událostí pozastaveno. Sjeďte dolů pro pokračování.",
|
||||
"Login failed, see Syncthing logs for details.": "Přihlášení selhalo, detaily najdete v Syncthing logu.",
|
||||
"Logs": "Záznamy událostí",
|
||||
"Major Upgrade": "Aktualizace hlavní verze",
|
||||
"Mass actions": "Hromadné akce",
|
||||
"Maximum Age": "Maximální časový limit",
|
||||
"Maximum single entry size": "Maximální velikost jedné položky",
|
||||
"Maximum total size": "Maximální celková velikost",
|
||||
"Metadata Only": "Pouze metadata",
|
||||
"Minimum Free Disk Space": "Minimální velikost volného místa na úložišti",
|
||||
"Mod. Device": "Zařízení, které provedlo změnu",
|
||||
@@ -408,15 +416,17 @@
|
||||
"The folder path cannot be blank.": "Popis umístění složky nemůže zůstat nevyplněný.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Jsou použity následující intervaly: za první hodinu jsou ponechány verze pro každých 30 sekund, za první den jsou ponechány verze pro každou hodinu, za prvních 30 dní jsou ponechány verze pro každý den a do nejvyššího nastaveného stáří jsou ponechány verze pro každý týden.",
|
||||
"The following items could not be synchronized.": "Následující položky nemohly být synchronizovány.",
|
||||
"The following items were changed locally.": "Tyto položky byly změněny lokálně",
|
||||
"The following items were changed locally.": "Tyto položky byly změněny lokálně.",
|
||||
"The following methods are used to discover other devices on the network and announce this device to be found by others:": "K objevování ostatních zařízení a oznamování tohoto zařízení se používají následující metody:",
|
||||
"The following text will automatically be inserted into a new message.": "Následující text bude automaticky vložen do nové zprávy.",
|
||||
"The following unexpected items were found.": "Byly nalezeny tyto neočekávané položky.",
|
||||
"The interval must be a positive number of seconds.": "Interval musí být kladný počet sekund.",
|
||||
"The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "Interval (v sekundách) pro spouštění čištění ve složce s verzemi. Nula pravidelné čištění vypíná.",
|
||||
"The maximum age must be a number and cannot be blank.": "Nejvyšší stáří je třeba zadat v podobě čísla a nemůže být prázdné.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maximální doba pro zachování verze (dny, zapsáním hodnoty 0 bude ponecháno navždy).",
|
||||
"The number of days must be a number and cannot be blank.": "Je třeba, aby počet dní bylo číslo a nemůže zůstat nevyplněné.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Počet dní, po který budou soubory uchovány v koši. Nula znamená navždy.",
|
||||
"The number of connections must be a non-negative number.": "Počet spojení musí být nezáporné číslo.",
|
||||
"The number of days must be a number and cannot be blank.": "Počet dní musí být číslo a nemůže zůstat nevyplněné.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Počet dní, po které budou soubory uchovány v koši. Nula znamená navždy.",
|
||||
"The number of old versions to keep, per file.": "Počet uchovávaných starších verzí každého ze souborů.",
|
||||
"The number of versions must be a number and cannot be blank.": "Je třeba, aby počet verzí bylo číslo a nemůže zůstat nevyplněné.",
|
||||
"The path cannot be blank.": "Popis umístění nemůže zůstat nevyplněný.",
|
||||
@@ -475,8 +485,8 @@
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolder%}\".": "Varování, tento popis umístění je nadřazenou složkou existující „{{otherFolder}}“.",
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Varování, tento popis umístění je nadřazenou složkou existující „{{otherFolderLabel}}“ ({{otherFolder}}).",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Varování: toto umístění je podsložkou existující „{{otherFolder}}“.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Varování, toto umístění e podsložkou existující „{{otherFolderLabel}}“ ({{otherFolder}}).",
|
||||
"Warning: If you are using an external watcher like {%syncthingInotify%}, you should make sure it is deactivated.": "Pozor: Pokud používáte externí sledování změn jako {{syncthingInotify}}, měly byste se ujistit, že je toto sledování vypnuto.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Varování, toto umístění je podsložkou existující „{{otherFolderLabel}}“ ({{otherFolder}}).",
|
||||
"Warning: If you are using an external watcher like {%syncthingInotify%}, you should make sure it is deactivated.": "Pozor: Pokud používáte externí sledování změn jako {{syncthingInotify}}, měli byste se ujistit, že je toto sledování vypnuto.",
|
||||
"Watch for Changes": "Sledovat změny",
|
||||
"Watching for Changes": "Sledování změn",
|
||||
"Watching for changes discovers most changes without periodic scanning.": "Sledování změn odhalí většinu změn ještě před periodickým skenováním.",
|
||||
@@ -499,6 +509,8 @@
|
||||
"folder": "složka",
|
||||
"full documentation": "úplná dokumentace",
|
||||
"items": "položky",
|
||||
"modified": "změněno",
|
||||
"permit": "povolit",
|
||||
"seconds": "sekund",
|
||||
"theme": {
|
||||
"name": {
|
||||
|
||||
@@ -187,7 +187,7 @@
|
||||
"GUI Theme": "GUI-tema",
|
||||
"General": "Generelt",
|
||||
"Generate": "Opret",
|
||||
"Global Discovery": "Globalt opslag",
|
||||
"Global Discovery": "Globalt opdagelse",
|
||||
"Global Discovery Servers": "Globale opslagsservere",
|
||||
"Global State": "Global tilstand",
|
||||
"Help": "Hjælp",
|
||||
@@ -386,6 +386,7 @@
|
||||
"Staggered File Versioning": "Forskudt filversionering",
|
||||
"Start Browser": "Start browser",
|
||||
"Statistics": "Statistikker",
|
||||
"Stay logged in": "Forbliv logget ind",
|
||||
"Stopped": "Stoppet",
|
||||
"Stores and syncs only encrypted data. Folders on all connected devices need to be set up with the same password or be of type \"{%receiveEncrypted%}\" too.": "Gemmer og synkroniserer kun krypterede data. Mapper på alle tilsluttede enheder skal være oprettet med samme adgangskode eller også være af typen \"{{receiveEncrypted}}\".",
|
||||
"Subject:": "Emne:",
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"Configuration Directory": "Konfigurationsverzeichnis",
|
||||
"Configuration File": "Konfigurationsdatei",
|
||||
"Configured": "Konfiguriert",
|
||||
"Connected (Unused)": "Verbunden (Nicht genutzt)",
|
||||
"Connected (Unused)": "Verbunden (nicht genutzt)",
|
||||
"Connection Error": "Verbindungsfehler",
|
||||
"Connection Management": "Verbindungsverwaltung",
|
||||
"Connection Type": "Verbindungstyp",
|
||||
@@ -95,7 +95,7 @@
|
||||
"Deselect folders to stop sharing with this device.": "Ordner abwählen, um sie nicht mehr für mit diesem Gerät zu teilen.",
|
||||
"Device": "Gerät",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Gerät „{{name}}“ ({{device}} {{address}}) möchte sich verbinden. Gerät hinzufügen?",
|
||||
"Device Certificate": "Geräte-Zertifikat",
|
||||
"Device Certificate": "Gerätezertifikat",
|
||||
"Device ID": "Gerätekennung",
|
||||
"Device Identification": "Geräteidentifikation",
|
||||
"Device Name": "Gerätename",
|
||||
@@ -112,8 +112,8 @@
|
||||
"Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).": "Deaktiviert Vergleich und Synchronisierung der Dateiberechtigungen. Dies ist hilfreich für Dateisysteme ohne konfigurierbare Berechtigungsparameter (z. B. FAT, exFAT, Synology, Android).",
|
||||
"Discard": "Verwerfen",
|
||||
"Disconnected": "Getrennt",
|
||||
"Disconnected (Inactive)": "Getrennt (Inaktiv)",
|
||||
"Disconnected (Unused)": "Getrennt (Nicht genutzt)",
|
||||
"Disconnected (Inactive)": "Getrennt (inaktiv)",
|
||||
"Disconnected (Unused)": "Getrennt (nicht genutzt)",
|
||||
"Discovered": "Ermittelt",
|
||||
"Discovery": "Gerätesuche",
|
||||
"Discovery Failures": "Gerätesuchfehler",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Edit Device Defaults": "Gerätevorgaben bearbeiten",
|
||||
"Edit Folder": "Ordner bearbeiten",
|
||||
"Edit Folder Defaults": "Ordnervorgaben bearbeiten",
|
||||
"Editing {%path%}.": "Bearbeite {{path}}.",
|
||||
"Editing {%path%}.": "Bearbeiten von {{path}}.",
|
||||
"Enable Crash Reporting": "Absturzmeldung aktivieren",
|
||||
"Enable NAT traversal": "NAT-Durchdringung aktivieren",
|
||||
"Enable Relaying": "Weiterleitung aktivieren",
|
||||
@@ -142,7 +142,7 @@
|
||||
"Enables sending ownership information to other devices, and applying incoming ownership information. Typically requires running with elevated privileges.": "Bewirkt das Senden der Besitzinformation an andere Geräte und das Anwenden empfangener Besitzinformation. Erfordert üblicherweise die Ausführung mit höheren Zugriffsrechten.",
|
||||
"Enables sending ownership information to other devices, but not applying incoming ownership information. This can have a significant performance impact. Always enabled when \"Sync Ownership\" is enabled.": "Bewirkt das Senden von Besitzinformation an andere Geräte, jedoch ohne empfangene Besitzinformation anzuwenden. Kann zu einem merklichen Leistungseinbruch führen. Immer aktiviert, wenn „Besitzinformation synchronisieren“ eingeschaltet ist.",
|
||||
"Enter a non-negative number (e.g., \"2.35\") and select a unit. Percentages are as part of the total disk size.": "Geben Sie eine positive Zahl ein (z. B. „2.35“) und wählen Sie eine Einheit. Prozentsätze sind Teil der gesamten Festplattengröße.",
|
||||
"Enter a non-privileged port number (1024 - 65535).": "Geben Sie eine nichtprivilegierte Portnummer ein (1024 - 65535).",
|
||||
"Enter a non-privileged port number (1024 - 65535).": "Geben Sie eine nicht privilegierte Portnummer ein (1024 - 65535).",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Kommagetrennte Adressen („tcp://ip:port“, „tcp://host:port“) oder „dynamic“ eingeben, um die Adresse automatisch zu ermitteln.",
|
||||
"Enter ignore patterns, one per line.": "Geben Sie Ignoriermuster ein, eines pro Zeile.",
|
||||
"Enter up to three octal digits.": "Tragen Sie bis zu drei oktale Ziffern ein.",
|
||||
@@ -158,13 +158,13 @@
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Ein Verbindungsfehler zu IPv6-Servern ist zu erwarten, wenn es keine IPv6-Konnektivität gibt.",
|
||||
"File Pull Order": "Dateiübertragungsreihenfolge",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Dateien werden in den .stversions Ordner verschoben, wenn sie von Syncthing ersetzt oder gelöscht wurden.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Dateien werden mit Datumsstempel versioniert und in den .stversions Ordner verschoben, wenn sie von Syncthing ersetzt oder gelöscht wurden.",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Dateien werden in den Ordner .stversions verschoben, wenn sie von Syncthing ersetzt oder gelöscht wurden.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Dateien werden mit Datumsstempel versioniert und in den Ordner .stversions verschoben, wenn sie von Syncthing ersetzt oder gelöscht wurden.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind auf diesem Gerät schreibgeschützt. Auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
|
||||
"Files are synchronized from the cluster, but any changes made locally will not be sent to other devices.": "Dateien werden vom Verbund synchronisiert, aber lokal vorgenommene Änderungen werden nicht an andere Geräte gesendet.",
|
||||
"Filesystem Watcher Errors": "Fehler im Dateisystembeobachter",
|
||||
"Filter by date": "Nach Datum sortieren",
|
||||
"Filter by name": "Nach Name sortieren",
|
||||
"Filter by name": "Nach Namen sortieren",
|
||||
"Folder": "Ordner",
|
||||
"Folder ID": "Ordnerkennung",
|
||||
"Folder Label": "Ordnerbezeichnung",
|
||||
@@ -182,9 +182,9 @@
|
||||
"GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche",
|
||||
"GUI Authentication User": "Benutzername für Zugang zur Benutzeroberfläche",
|
||||
"GUI Authentication: Set User and Password": "Authentifizierung für die Benutzeroberfläche: Geben Sie Benutzer und Passwort ein.",
|
||||
"GUI Listen Address": "Addresse der Benutzeroberfläche",
|
||||
"GUI Listen Address": "Adresse der Benutzeroberfläche",
|
||||
"GUI Override Directory": "GUI-Ersatz-Verzeichnis",
|
||||
"GUI Theme": "GUI Design",
|
||||
"GUI Theme": "GUI-Design",
|
||||
"General": "Allgemein",
|
||||
"Generate": "Generieren",
|
||||
"Global Discovery": "Globale Gerätesuche",
|
||||
@@ -212,7 +212,7 @@
|
||||
"Introduced By": "Verteilt von",
|
||||
"Introducer": "Verteilergerät",
|
||||
"Introduction": "Einführung",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (d. h. schließe nicht aus)",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (d. h. nicht ausschließen)",
|
||||
"Keep Versions": "Versionen erhalten",
|
||||
"LDAP": "LDAP",
|
||||
"Largest First": "Größte zuerst",
|
||||
@@ -223,7 +223,7 @@
|
||||
"Last seen": "Zuletzt online",
|
||||
"Latest Change": "Letzte Änderung",
|
||||
"Learn more": "Mehr erfahren",
|
||||
"Learn more at {%url%}": "Erfahre mehr unter {{url}}",
|
||||
"Learn more at {%url%}": "Erfahren Sie mehr unter {{url}}",
|
||||
"Limit": "Limit",
|
||||
"Listener Failures": "Fehler bei Listener",
|
||||
"Listener Status": "Status der Listener",
|
||||
@@ -233,7 +233,7 @@
|
||||
"Local Additions": "Lokal hinzugefügte Elemente",
|
||||
"Local Discovery": "Lokale Gerätesuche",
|
||||
"Local State": "Lokaler Status",
|
||||
"Local State (Total)": "Lokaler Status (Gesamt)",
|
||||
"Local State (Total)": "Lokaler Status (gesamt)",
|
||||
"Locally Changed Items": "Lokal geänderte Elemente",
|
||||
"Log": "Protokoll",
|
||||
"Log File": "Protokolldatei",
|
||||
@@ -283,23 +283,23 @@
|
||||
"Ownership": "Besitzinformation",
|
||||
"Password": "Passwort",
|
||||
"Path": "Pfad",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Ordner auf dem lokalen Gerät. Ordner wird erzeugt, wenn er nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Pfad in dem Versionen gespeichert werden sollen (leer lassen, wenn der Standard .stversions Ordner für den geteilten Ordner verwendet werden soll).",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Ordner auf dem lokalen Gerät. Ordner wird erzeugt, wenn er nicht existiert. Das Tilde-Zeichen (~) kann als Abkürzung verwendet werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Pfad, in dem Versionen gespeichert werden sollen (leer lassen für den Standardordner .stversions im freigegebenen Ordner).",
|
||||
"Paths": "Pfade",
|
||||
"Pause": "Pause",
|
||||
"Pause All": "Alles pausieren",
|
||||
"Paused": "Pausiert",
|
||||
"Paused (Unused)": "Pausiert (Nicht genutzt)",
|
||||
"Paused (Unused)": "Pausiert (nicht genutzt)",
|
||||
"Pending changes": "Ausstehende Änderungen",
|
||||
"Periodic scanning at given interval and disabled watching for changes": "Periodischer Scan im angegebenen Intervall und Überwachung von Änderungen deaktiviert",
|
||||
"Periodic scanning at given interval and enabled watching for changes": "Periodischer Scan im angegebenen Intervall und Überwachung von Änderungen aktiviert",
|
||||
"Periodic scanning at given interval and failed setting up watching for changes, retrying every 1m:": "Periodischer Scan im angegebenen Intervall, Überwachung von Änderungen fehlgeschlagen, erneuter Versuch jede Minute:",
|
||||
"Permanently add it to the ignore list, suppressing further notifications.": "Permanent zur Ignorierliste hinzufügen, um weitere Benachrichtigungen zu unterdrücken.",
|
||||
"Please consult the release notes before performing a major upgrade.": "Bitte lesen Sie die Veröffentlichungshinweise bevor Sie eine Hauptversionsaktualisierung installieren.",
|
||||
"Please set a GUI Authentication User and Password in the Settings dialog.": "Bitte lege einen Benutzer und ein Passwort für die Benutzeroberfläche in den Einstellungen fest.",
|
||||
"Please consult the release notes before performing a major upgrade.": "Bitte lesen Sie die Veröffentlichungshinweise, bevor Sie eine Hauptversionsaktualisierung installieren.",
|
||||
"Please set a GUI Authentication User and Password in the Settings dialog.": "Bitte legen Sie in den Einstellungen einen Benutzer und ein Passwort für die Benutzeroberfläche fest.",
|
||||
"Please wait": "Bitte warten",
|
||||
"Prefix indicating that the file can be deleted if preventing directory removal": "Präfix, das anzeigt, dass die Datei gelöscht werden kann, wenn sie die Entfernung des Ordners verhindert",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "Präfix, das anzeigt, dass das Muster ohne Beachtung der Groß- / Kleinschreibung übereinstimmen soll",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "Präfix, das anzeigt, dass das Muster ohne Beachtung der Groß-/Kleinschreibung abgeglichen werden soll",
|
||||
"Preparing to Sync": "Vorbereiten auf die Synchronisation",
|
||||
"Preview": "Vorschau",
|
||||
"Preview Usage Report": "Vorschau des Nutzungsberichts",
|
||||
@@ -308,7 +308,7 @@
|
||||
"QUIC WAN": "QUIC WAN",
|
||||
"Quick guide to supported patterns": "Schnellanleitung zu den unterstützten Mustern",
|
||||
"Random": "Zufall",
|
||||
"Receive Encrypted": "Empfange verschlüsselt",
|
||||
"Receive Encrypted": "Verschlüsselt empfangen",
|
||||
"Receive Only": "Nur empfangen",
|
||||
"Received data is already encrypted": "Empfangene Daten sind bereits verschlüsselt",
|
||||
"Recent Changes": "Letzte Änderungen",
|
||||
@@ -362,20 +362,20 @@
|
||||
"Shared With": "Geteilt mit",
|
||||
"Sharing": "Teilen",
|
||||
"Show ID": "Eigene Kennung",
|
||||
"Show QR": "Zeige QR Code",
|
||||
"Show QR": "QR-Code anzeigen",
|
||||
"Show detailed discovery status": "Status der Gerätesuche anzeigen",
|
||||
"Show detailed listener status": "Detaillierten Listener-Status anzeigen",
|
||||
"Show diff with previous version": "Unterschied zur vorherigen Version anzeigen",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Wird anstatt der Gerätekennung im Verbund-Status angezeigt. Wird als optionaler Standardname an andere Geräte bekannt gegeben.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Wird auf diesem Gerät als Gerätename angezeigt und an die anderen Geräte im Geräte-Verbund weitergegeben. Wenn kein Gerätename angegeben wird, wird der Name des entfernten Gerätes genommen.",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Wird anstelle der Gerätekennung im Verbundstatus angezeigt. Wird anderen Geräten als optionaler Standardname bekannt gegeben.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Wird anstelle der Gerätekennung im Verbundstatus angezeigt. Wird auf den Namen aktualisiert, den das Gerät anzeigt, wenn er leer bleibt.",
|
||||
"Shutdown": "Herunterfahren",
|
||||
"Shutdown Complete": "Vollständig Heruntergefahren",
|
||||
"Shutdown Complete": "Vollständig heruntergefahren",
|
||||
"Simple": "Einfach",
|
||||
"Simple File Versioning": "Einfache Dateiversionierung",
|
||||
"Single level wildcard (matches within a directory only)": "Einzelnes Maskenzeichen (wird für einen einzelnen Ordner verwendet)",
|
||||
"Size": "Größe",
|
||||
"Smallest First": "Kleinstes zuerst",
|
||||
"Some discovery methods could not be established for finding other devices or announcing this device:": "Zum Auffinden anderer Geräte, oder um dieses Gerät anzukündigen, konnten manche Methoden nicht eingerichtet werden:",
|
||||
"Some discovery methods could not be established for finding other devices or announcing this device:": "Einige Erkennungsmethoden zum Auffinden anderer Geräte oder zur Meldung dieses Geräts konnten nicht eingerichtet werden:",
|
||||
"Some items could not be restored:": "Einige Elemente konnten nicht wiederhergestellt werden:",
|
||||
"Some listening addresses could not be enabled to accept connections:": "An manchen Netzwerkadressen kann nicht gelauscht werden, um Verbindungen anzunehmen:",
|
||||
"Source Code": "Quellcode",
|
||||
@@ -388,7 +388,7 @@
|
||||
"Statistics": "Statistiken",
|
||||
"Stay logged in": "Angemeldet bleiben",
|
||||
"Stopped": "Gestoppt",
|
||||
"Stores and syncs only encrypted data. Folders on all connected devices need to be set up with the same password or be of type \"{%receiveEncrypted%}\" too.": "Speichert und synchronisiert nur verschlüsselte Daten. Ordner auf allen verbundenen Geräten müssen mit dem selben Passwort eingerichtet werden oder vom Typ „{{receiveEncrypted}}“ sein.",
|
||||
"Stores and syncs only encrypted data. Folders on all connected devices need to be set up with the same password or be of type \"{%receiveEncrypted%}\" too.": "Speichert und synchronisiert nur verschlüsselte Daten. Ordner auf allen verbundenen Geräten müssen mit demselben Passwort eingerichtet werden oder vom Typ „{{receiveEncrypted}}“ sein.",
|
||||
"Subject:": "Betreff:",
|
||||
"Support": "Support",
|
||||
"Support Bundle": "Supportpaket",
|
||||
@@ -396,7 +396,7 @@
|
||||
"Sync Ownership": "Besitzinformation synchronisieren",
|
||||
"Sync Protocol Listen Addresses": "Adresse(n) für das Synchronisierungsprotokoll",
|
||||
"Sync Status": "Status der Synchronisierung",
|
||||
"Syncing": "Synchronisiere",
|
||||
"Syncing": "Wird synchronisiert",
|
||||
"Syncthing device ID for \"{%devicename%}\"": "Syncthing-Geräte-ID für „{{devicename}}“",
|
||||
"Syncthing has been shut down.": "Syncthing wurde heruntergefahren.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing enthält die folgende Software oder Teile von:",
|
||||
@@ -409,24 +409,24 @@
|
||||
"Syncthing is upgrading.": "Syncthing wird aktualisiert.",
|
||||
"Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "Syncthing unterstützt jetzt automatische Absturzberichte an die Entwickler. Diese Funktion ist standardmäßig aktiviert.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing scheint nicht erreichbar zu sein oder es gibt ein Problem mit der Internetverbindung. Erneuter Versuch …",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing scheint ein Problem mit der Verarbeitung Deiner Eingabe zu haben. Bitte lade die Seite neu oder führe einen Neustart durch, falls das Problem weiterhin besteht.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing scheint ein Problem mit der Verarbeitung Ihrer Eingabe zu haben. Laden Sie bitte die Seite neu oder führen einen Neustart durch, falls das Problem weiterhin besteht.",
|
||||
"TCP LAN": "TCP LAN",
|
||||
"TCP WAN": "TCP WAN",
|
||||
"Take me back": "Führe mich zurück",
|
||||
"Take me back": "Führen Sie mich zurück",
|
||||
"The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.": "Die GUI-Adresse wird durch Startoptionen überschrieben. Hier vorgenommene Änderungen werden nicht wirksam, solange die Überschreibung besteht.",
|
||||
"The Syncthing Authors": "Die Syncthing-Autoren",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "Die Syncthing-Oberfläche erlaubt mit den jetzigen Einstellungen einen Zugriff ohne Passwort.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Die gesammelten Statistiken sind öffentlich unter der nachfolgenden URL verfügbar.",
|
||||
"The cleanup interval cannot be blank.": "Das Bereinigungsintervall darf nicht leer sein.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber noch nicht aktiviert. Syncthing muss neugestartet werden, um die neue Konfiguration zu übernehmen.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber noch nicht aktiviert. Syncthing muss neu gestartet werden, um die neue Konfiguration zu übernehmen.",
|
||||
"The device ID cannot be blank.": "Die Gerätekennung darf nicht leer sein.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Die hier einzutragende Gerätekennung kann im Dialog „Aktionen > Kennung anzeigen“ auf dem anderen Gerät gefunden werden. Leerzeichen und Bindestriche sind optional (werden ignoriert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes, and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird verwendet, um Statistiken über verwendete Betriebssysteme, Ordnergrößen und Programmversionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Gerätekennung scheint nicht gültig zu sein. Es sollte eine 52 oder 56 stellige Zeichenkette aus Buchstaben und Nummern sein. Leerzeichen und Bindestriche sind optional.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Gerätekennung scheint nicht gültig zu sein. Es sollte eine 52- oder 56-stellige Zeichenkette aus Buchstaben und Zahlen sein. Leerzeichen und Bindestriche sind optional.",
|
||||
"The folder ID cannot be blank.": "Die Ordnerkennung darf nicht leer sein.",
|
||||
"The folder ID must be unique.": "Die Ordnerkennung muss eindeutig sein.",
|
||||
"The folder content on other devices will be overwritten to become identical with this device. Files not present here will be deleted on other devices.": "Der Ordnerinhalt auf anderen Geräten wird überschrieben, um mit diesem Gerät identisch zu werden. Dateien, die hier nicht vorhanden sind, werden auf anderen Geräten gelöscht.",
|
||||
"The folder content on this device will be overwritten to become identical with other devices. Files newly added here will be deleted.": "Der Ordnerinhalt auf diesem Gerät wird überschrieben, um mit anderen Geräten identisch zu werden. Dateien, die hier neu hinzugefügt wurden, werden gelöscht.",
|
||||
"The folder content on this device will be overwritten to become identical with other devices. Files newly added here will be deleted.": "Der Ordnerinhalt auf diesem Gerät wird überschrieben, damit er mit anderen Geräten identisch ist. Dateien, die hier neu hinzugefügt wurden, werden gelöscht.",
|
||||
"The folder path cannot be blank.": "Der Ordnerpfad darf nicht leer sein.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es wird in folgenden Abständen versioniert: In der ersten Stunde wird alle 30 Sekunden eine Version behalten, am ersten Tag eine jede Stunde, in den ersten 30 Tagen eine jeden Tag. Danach wird bis zum angegebenen Höchstalter eine Version pro Woche behalten.",
|
||||
"The following items could not be synchronized.": "Die folgenden Elemente konnten nicht synchronisiert werden.",
|
||||
@@ -435,12 +435,12 @@
|
||||
"The following text will automatically be inserted into a new message.": "Der folgende Text wird automatisch in eine neue Nachricht eingefügt.",
|
||||
"The following unexpected items were found.": "Die folgenden unerwarteten Elemente wurden gefunden.",
|
||||
"The interval must be a positive number of seconds.": "Das Intervall muss eine positive Zahl von Sekunden sein.",
|
||||
"The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "Das Intervall, in Sekunden, zwischen den Bereinigungen im Versionsverzeichnis. Null um das regelmäßige Bereinigen zu deaktivieren.",
|
||||
"The interval, in seconds, for running cleanup in the versions directory. Zero to disable periodic cleaning.": "Das Intervall in Sekunden zwischen den Bereinigungen im Versionsverzeichnis. Null, um das regelmäßige Bereinigen zu deaktivieren.",
|
||||
"The maximum age must be a number and cannot be blank.": "Das Höchstalter muss angegeben werden und eine Zahl sein.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Die längste Zeit, die alte Versionen vorgehalten werden (in Tagen) (0 um alte Versionen für immer zu behalten).",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Die längste Zeit, die alte Versionen vorgehalten werden (in Tagen) (0, um alte Versionen für immer zu behalten).",
|
||||
"The number of connections must be a non-negative number.": "Die Anzahl der Verbindungen muss eine nicht-negative Zahl sein.",
|
||||
"The number of days must be a number and cannot be blank.": "Die Anzahl der Tage muss eine Ganzzahl sein und darf nicht leer sein.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Dauer in Tagen für welche die Dateien aufgehoben werden sollen. 0 bedeutet für immer.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Die Anzahl der Tage, die Dateien im Papierkorb verbleiben sollen. Null bedeutet für immer.",
|
||||
"The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei behalten werden sollen.",
|
||||
"The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Ganzzahl und darf nicht leer sein.",
|
||||
"The path cannot be blank.": "Der Pfad darf nicht leer sein.",
|
||||
@@ -456,16 +456,16 @@
|
||||
"This Device": "Dieses Gerät",
|
||||
"This Month": "Dieser Monat",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Dies kann dazu führen, dass Unberechtigte relativ einfach auf Ihre Dateien zugreifen und diese ändern können.",
|
||||
"This device cannot automatically discover other devices or announce its own address to be found by others. Only devices with statically configured addresses can connect.": "Dieses Gerät kann nicht automatisch andere Geräte auffinden, oder seine eigene Adresse bekannt geben, um von anderen gefunden zu werden. Nur Geräte mit statisch konfigurierten Adressen können sich verbinden.",
|
||||
"This device cannot automatically discover other devices or announce its own address to be found by others. Only devices with statically configured addresses can connect.": "Dieses Gerät kann nicht automatisch andere Geräte auffinden oder seine eigene Adresse bekannt geben, um von anderen gefunden zu werden. Nur Geräte mit statisch konfigurierten Adressen können sich verbinden.",
|
||||
"This is a major version upgrade.": "Dies ist eine Hauptversionsaktualisierung.",
|
||||
"This setting controls the free space required on the home (i.e., index database) disk.": "Diese Einstellung regelt den freien Speicherplatz, der für den Systemordner (d.h. Indexdatenbank) erforderlich ist.",
|
||||
"This setting controls the free space required on the home (i.e., index database) disk.": "Diese Einstellung regelt den freien Speicherplatz, der für den Systemordner (d. h. Indexdatenbank) erforderlich ist.",
|
||||
"Time": "Zeit",
|
||||
"Time the item was last modified": "Zeit der letzten Änderung des Elements",
|
||||
"To connect with the Syncthing device named \"{%devicename%}\", add a new remote device on your end with this ID:": "Um sich mit dem Syncthing-Gerät namens „{{devicename}}“ zu verbinden, fügen Sie ein neues Gerät mit dieser ID hinzu:",
|
||||
"To permit a rule, have the checkbox checked. To deny a rule, leave it unchecked.": "Zum Erlauben einer Regel Häkchen setzen. Zum Verweigern einer Regel frei lassen.",
|
||||
"Today": "Heute",
|
||||
"Trash Can": "Papierkorb",
|
||||
"Trash Can File Versioning": "Papierkorb Dateiversionierung",
|
||||
"Trash Can File Versioning": "Papierkorb-Dateiversionierung",
|
||||
"Type": "Typ",
|
||||
"UNIX Permissions": "UNIX-Berechtigungen",
|
||||
"Unavailable": "Nicht verfügbar",
|
||||
@@ -491,7 +491,7 @@
|
||||
"Use notifications from the filesystem to detect changed items.": "Benachrichtigungen des Dateisystems nutzen, um Änderungen zu erkennen.",
|
||||
"User": "Benutzer",
|
||||
"User Home": "Benutzer-Stammverzeichnis",
|
||||
"Username/Password has not been set for the GUI authentication. Please consider setting it up.": "Benutzername / Passwort wurde für die Benutzeroberfläche nicht gesetzt. Bitte erwägen Sie dies einzurichten.",
|
||||
"Username/Password has not been set for the GUI authentication. Please consider setting it up.": "Benutzername / Passwort wurde für die Benutzeroberfläche nicht gesetzt. Bitte erwägen Sie, dies einzurichten.",
|
||||
"Using a QUIC connection over LAN": "Verwendet eine QUIC-Verbindung über LAN",
|
||||
"Using a QUIC connection over WAN": "Verwendet eine QUIC-Verbindung über WAN",
|
||||
"Using a direct TCP connection over LAN": "Verwendet eine direkte TCP-Verbindung über LAN",
|
||||
@@ -512,8 +512,8 @@
|
||||
"Watch for Changes": "Änderungen überwachen",
|
||||
"Watching for Changes": "Überwachung von Änderungen",
|
||||
"Watching for changes discovers most changes without periodic scanning.": "Das Überwachen von Änderungen entdeckt die meisten Änderungen ohne regelmäßiges Scannen.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Beachte beim Hinzufügen eines neuen Gerätes, dass dieses Gerät auch auf den anderen Geräten hinzugefügt werden muss.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachte bitte beim Hinzufügen eines neuen Ordners, dass die Ordnerkennung dazu verwendet wird, Ordner zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Beachten Sie beim Hinzufügen eines neuen Gerätes, dass dieses Gerät auch auf den anderen Geräten hinzugefügt werden muss.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachten Sie bitte beim Hinzufügen eines neuen Ordners, dass die Ordnerkennung dazu verwendet wird, Ordner zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"When set to more than one on both devices, Syncthing will attempt to establish multiple concurrent connections. If the values differ, the highest will be used. Set to zero to let Syncthing decide.": "Wenn auf beiden Geräten der Wert höher als eins eingestellt ist, versucht Syncthing, mehrere gleichzeitige Verbindungen herzustellen. Wenn die Werte unterschiedlich sind, wird der höchste Wert verwendet. Den Wert auf Null setzen, um Syncthing entscheiden zu lassen.",
|
||||
"Yes": "Ja",
|
||||
"Yesterday": "Gestern",
|
||||
@@ -524,8 +524,8 @@
|
||||
"You have no ignored devices.": "Sie haben keine ignorierten Geräte.",
|
||||
"You have no ignored folders.": "Sie haben keine ignorierten Ordner.",
|
||||
"You have unsaved changes. Do you really want to discard them?": "Sie haben nicht gespeicherte Änderungen. Wollen Sie diese wirklich verwerfen?",
|
||||
"You must keep at least one version.": "Du musst mindestens eine Version behalten.",
|
||||
"You should never add or change anything locally in a \"{%receiveEncrypted%}\" folder.": "Sie sollten nie etwas im „{{receiveEncrypted}}“ Ordner lokal ändern oder hinzufügen.",
|
||||
"You must keep at least one version.": "Sie müssen zumindest eine Version behalten.",
|
||||
"You should never add or change anything locally in a \"{%receiveEncrypted%}\" folder.": "Sie sollten nie etwas im Ordner „{{receiveEncrypted}}“ lokal ändern oder hinzufügen.",
|
||||
"Your SMS app should open to let you choose the recipient and send it from your own number.": "Ihre SMS-App sollte sich öffnen, damit Sie den Empfänger auswählen und die Nachricht von Ihrer eigenen Nummer aus versenden können.",
|
||||
"Your email app should open to let you choose the recipient and send it from your own address.": "Ihre E-Mail-App sollte sich öffnen, damit Sie den Empfänger auswählen und die Nachricht von Ihrer eigenen Adresse aus versenden können.",
|
||||
"days": "Tage",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user