mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
276 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0945304a79 | ||
|
|
9703dd9f57 | ||
|
|
259e9ef08e | ||
|
|
6a0c6128d8 | ||
|
|
b05ece0681 | ||
|
|
e9133ef82b | ||
|
|
67ba20d777 | ||
|
|
21da0d7890 | ||
|
|
ebbe57d0ab | ||
|
|
f4abc71dcc | ||
|
|
8aa02da93a | ||
|
|
0e560486db | ||
|
|
57d413099d | ||
|
|
1fdf07933c | ||
|
|
c50678618f | ||
|
|
8094b459e4 | ||
|
|
6765867a2e | ||
|
|
4fb8ee6a6f | ||
|
|
674834ccf4 | ||
|
|
3bd2bff23b | ||
|
|
40660c5fb7 | ||
|
|
d940d094a1 | ||
|
|
9d67727989 | ||
|
|
6f51700a7f | ||
|
|
598915193a | ||
|
|
905e5ec07f | ||
|
|
4075b886d0 | ||
|
|
cade790198 | ||
|
|
98555a9a80 | ||
|
|
48b757cac1 | ||
|
|
58c85fc9db | ||
|
|
ddd98a818a | ||
|
|
64b5a1b738 | ||
|
|
1a131a56f2 | ||
|
|
beda37f28b | ||
|
|
2532ac35cf | ||
|
|
bcd30ceaec | ||
|
|
9a3493c2f4 | ||
|
|
fa404d5a0d | ||
|
|
73ad18fbfb | ||
|
|
1dd264894a | ||
|
|
8c3d2f3bc5 | ||
|
|
702ed8ecc1 | ||
|
|
b038650810 | ||
|
|
a16bf555c0 | ||
|
|
cd6ea60fa1 | ||
|
|
0bf21d9db2 | ||
|
|
f61843ef2e | ||
|
|
23e8366f8d | ||
|
|
93e72cc83f | ||
|
|
190dff142c | ||
|
|
c667ada63a | ||
|
|
93ae30d889 | ||
|
|
486eebc4ac | ||
|
|
ff33d976d1 | ||
|
|
69890b4282 | ||
|
|
533c9a6ab0 | ||
|
|
9521bb3931 | ||
|
|
e46a0f99c3 | ||
|
|
ed97e365b2 | ||
|
|
b4776ea4e0 | ||
|
|
b5ffd0a796 | ||
|
|
c74299b59a | ||
|
|
8b6d837483 | ||
|
|
3e74b3dee2 | ||
|
|
2902da996c | ||
|
|
f6f144bf17 | ||
|
|
ab5c42f4a0 | ||
|
|
7db3f7eaac | ||
|
|
f0b666269b | ||
|
|
190a59842c | ||
|
|
40888c1a66 | ||
|
|
fa0d933e49 | ||
|
|
8372c0288f | ||
|
|
5f5d672a7d | ||
|
|
d23cd197e1 | ||
|
|
d7ca483df1 | ||
|
|
e48be98cd5 | ||
|
|
e9a2ff3aa6 | ||
|
|
2301f72c5b | ||
|
|
f7c8efd93c | ||
|
|
3e7ccf7c48 | ||
|
|
6bc2784e9a | ||
|
|
f15d50c2e8 | ||
|
|
f9007ed106 | ||
|
|
05cc6b0f43 | ||
|
|
1efcfeb3ad | ||
|
|
93195911bd | ||
|
|
6085e3a5eb | ||
|
|
e5b72da607 | ||
|
|
0d6117d585 | ||
|
|
629971687d | ||
|
|
3c955a9706 | ||
|
|
7f3c8dbff1 | ||
|
|
7762e39fb3 | ||
|
|
4235b2c406 | ||
|
|
3fd090bfa7 | ||
|
|
6dfa54efa6 | ||
|
|
67575e1736 | ||
|
|
65923fc255 | ||
|
|
aea763868f | ||
|
|
26b134ae7b | ||
|
|
893071d2ba | ||
|
|
435f2d2178 | ||
|
|
8461ca539b | ||
|
|
fb977dc61d | ||
|
|
ee7ab4ce25 | ||
|
|
c3ce9713d9 | ||
|
|
6a147091c5 | ||
|
|
6208c36417 | ||
|
|
453fd20eeb | ||
|
|
28f0cffdb6 | ||
|
|
87c16c6cf5 | ||
|
|
5495c98e63 | ||
|
|
da7d5ce608 | ||
|
|
b300c297c6 | ||
|
|
124673f7a8 | ||
|
|
0395cf2bc0 | ||
|
|
36cd70040a | ||
|
|
1fbd396ffa | ||
|
|
2834bad85e | ||
|
|
516f3e29e8 | ||
|
|
ab20c16982 | ||
|
|
0c1df81ee9 | ||
|
|
d324b2ac86 | ||
|
|
0231089b99 | ||
|
|
74ffb85467 | ||
|
|
dcd280e6e2 | ||
|
|
4e56dbd883 | ||
|
|
13d7881b80 | ||
|
|
1d2c53bf3c | ||
|
|
9c449c966b | ||
|
|
ec2d4638e3 | ||
|
|
0ce92befc8 | ||
|
|
2167ce9656 | ||
|
|
0dc85d74aa | ||
|
|
b6e3f8037b | ||
|
|
00e7161a8f | ||
|
|
b5a7879eca | ||
|
|
4355dc69ea | ||
|
|
371ba69447 | ||
|
|
79ef57d0ea | ||
|
|
8bd6bdd397 | ||
|
|
ce3248cea7 | ||
|
|
99a6f3a5b6 | ||
|
|
c2e10dc156 | ||
|
|
fc914f3237 | ||
|
|
811d3752d0 | ||
|
|
00827dd5c1 | ||
|
|
a981c21d27 | ||
|
|
163dc122f3 | ||
|
|
83727e0824 | ||
|
|
529d3ef764 | ||
|
|
fefbf4dcc8 | ||
|
|
b9c6d3ae09 | ||
|
|
7bea8c758a | ||
|
|
479c0d3f16 | ||
|
|
d9ce7c3166 | ||
|
|
69979996d9 | ||
|
|
da58e5c50c | ||
|
|
44e259142f | ||
|
|
77970d5113 | ||
|
|
2b8ee4c7a5 | ||
|
|
be952e5f2d | ||
|
|
43ebac4242 | ||
|
|
f08a0ed01c | ||
|
|
612fdff377 | ||
|
|
8ccb7f1924 | ||
|
|
65d0ca8aa9 | ||
|
|
e82ed6e3d3 | ||
|
|
4b815fc086 | ||
|
|
7eaf843de2 | ||
|
|
110e1ae6f9 | ||
|
|
896f9725ec | ||
|
|
1a529e9d5d | ||
|
|
36ef17df8f | ||
|
|
955ac7775e | ||
|
|
8f69e874c4 | ||
|
|
ac06fd97e9 | ||
|
|
3726b7d112 | ||
|
|
377200591e | ||
|
|
4afc898c2f | ||
|
|
ff7e4fef55 | ||
|
|
9ffddb1923 | ||
|
|
896b857fc4 | ||
|
|
acc5d2675b | ||
|
|
6ece4c1fd2 | ||
|
|
cc09f0170d | ||
|
|
bb234d6c0e | ||
|
|
e6acc64758 | ||
|
|
f18cf545b9 | ||
|
|
6d64daaba3 | ||
|
|
47f48faed7 | ||
|
|
cfa834177b | ||
|
|
c454fc8baa | ||
|
|
dbe7fa9155 | ||
|
|
4d842f7d3b | ||
|
|
0e68221c91 | ||
|
|
19f63c7ea3 | ||
|
|
fb939ec496 | ||
|
|
39df3173d4 | ||
|
|
429672e0b4 | ||
|
|
605fd6d726 | ||
|
|
3c476542d2 | ||
|
|
31874f3ebb | ||
|
|
77942747db | ||
|
|
fe01b396ba | ||
|
|
3583949706 | ||
|
|
23fc22ebc5 | ||
|
|
cba163a1fd | ||
|
|
a8e2c8edb6 | ||
|
|
3e501d9036 | ||
|
|
9ca101756d | ||
|
|
a873d12c65 | ||
|
|
8ff670c564 | ||
|
|
b1ed2802fb | ||
|
|
b70cb580c8 | ||
|
|
28be3ba788 | ||
|
|
d4770ddc77 | ||
|
|
cbe1220680 | ||
|
|
0b95c5fa76 | ||
|
|
0343bca257 | ||
|
|
878016db39 | ||
|
|
1f4fde9525 | ||
|
|
5b9d8a838f | ||
|
|
8b19cb1e11 | ||
|
|
ce1e259bb4 | ||
|
|
2238a288d9 | ||
|
|
c8ee2a5cf6 | ||
|
|
1704827d04 | ||
|
|
a156e88eef | ||
|
|
94d0195b63 | ||
|
|
1616edcee3 | ||
|
|
6505e123bb | ||
|
|
63e4659282 | ||
|
|
f3f5557c8e | ||
|
|
b794726e1f | ||
|
|
3d59740a0a | ||
|
|
66fb65b01f | ||
|
|
5c2fcbfd19 | ||
|
|
f9b72330a8 | ||
|
|
822b6ac36b | ||
|
|
77f7778292 | ||
|
|
aed2c66e52 | ||
|
|
68a1fd010f | ||
|
|
ac8b3342ac | ||
|
|
0ea90dd932 | ||
|
|
718b1ce2b7 | ||
|
|
29f7510f5a | ||
|
|
a7f9ed4a80 | ||
|
|
1baefea410 | ||
|
|
563cec8923 | ||
|
|
a3c340ece9 | ||
|
|
cb24638ec9 | ||
|
|
2fb24dc2cc | ||
|
|
9aa2d2c92f | ||
|
|
d1c5100c98 | ||
|
|
42e677c055 | ||
|
|
27bba2c0c2 | ||
|
|
feff334547 | ||
|
|
713cf357ce | ||
|
|
5342bec1b7 | ||
|
|
7df75e681d | ||
|
|
8dc826b234 | ||
|
|
9ef37e1485 | ||
|
|
7517d18fbb | ||
|
|
42d0fee536 | ||
|
|
2ca9d3b5c5 | ||
|
|
9cde068f2a | ||
|
|
1243083831 | ||
|
|
356c5055ad | ||
|
|
19693734a3 | ||
|
|
17e60b9e0c | ||
|
|
ac22b2d00a | ||
|
|
de0b4270df | ||
|
|
e738af7c56 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Feature request
|
||||
description: File a new feature request
|
||||
labels: ["enhancement", "needs-triage"]
|
||||
type: Feature
|
||||
body:
|
||||
|
||||
- type: textarea
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Bug report
|
||||
description: If you're actually looking for support instead, see "I need help / I have a question".
|
||||
labels: ["bug", "needs-triage"]
|
||||
type: Bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -3,11 +3,11 @@ updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
23
.github/labeler.yml
vendored
Normal file
23
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: 1
|
||||
labels:
|
||||
|
||||
- label: enhancement
|
||||
title: ^feat\b
|
||||
|
||||
- label: bug
|
||||
title: ^fix\b
|
||||
|
||||
- label: documentation
|
||||
title: ^docs\b
|
||||
|
||||
- label: chore
|
||||
title: ^chore\b
|
||||
|
||||
- label: chore
|
||||
title: ^refactor\b
|
||||
|
||||
- label: build
|
||||
title: ^build\b
|
||||
|
||||
- label: dependencies
|
||||
title: ^build\(deps\)\b
|
||||
17
.github/release.yml
vendored
Normal file
17
.github/release.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- dependencies
|
||||
|
||||
categories:
|
||||
- title: Fixes
|
||||
labels:
|
||||
- bug
|
||||
|
||||
- title: Features
|
||||
labels:
|
||||
- enhancement
|
||||
|
||||
- title: Other
|
||||
labels:
|
||||
- '*'
|
||||
17
.github/workflows/build-infra-dockers.yaml
vendored
17
.github/workflows/build-infra-dockers.yaml
vendored
@@ -7,11 +7,15 @@ on:
|
||||
- infra-*
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.22.3"
|
||||
GO_VERSION: "~1.24.0"
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
@@ -41,6 +45,13 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in arm64 amd64; do
|
||||
@@ -53,13 +64,13 @@ jobs:
|
||||
|
||||
- name: Set Docker tags (all branches)
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:${{ github.sha }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:${{ github.sha }},ghcr.io/syncthing/infra/${{ matrix.pkg }}:${{ github.sha }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Set Docker tags (latest)
|
||||
if: github.ref == 'refs/heads/infrastructure'
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:latest,ghcr.io/syncthing/infra/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push
|
||||
|
||||
18
.github/workflows/build-nightly.yaml
vendored
Normal file
18
.github/workflows/build-nightly.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build Syncthing (Nightly)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-syncthing:
|
||||
uses: ./.github/workflows/build-syncthing.yaml
|
||||
# if we only want nightlies to run for specific users:
|
||||
# if: contains(fromJSON('["syncthing", "calmh"]'), github.repository_owner)
|
||||
secrets: inherit
|
||||
363
.github/workflows/build-syncthing.yaml
vendored
363
.github/workflows/build-syncthing.yaml
vendored
@@ -3,16 +3,17 @@ name: Build Syncthing
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
branches-ignore:
|
||||
- release
|
||||
- release-rc*
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# The go version to use for builds. We set check-latest to true when
|
||||
# installing, so we get the latest patch version that matches the
|
||||
# expression.
|
||||
GO_VERSION: "~1.22.3"
|
||||
GO_VERSION: "~1.24.0"
|
||||
|
||||
# Optimize compatibility on the slow archictures.
|
||||
GO386: softfloat
|
||||
@@ -48,7 +49,7 @@ jobs:
|
||||
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
|
||||
# The oldest version in this list should match what we have in our go.mod.
|
||||
# Variables don't seem to be supported here, or we could have done something nice.
|
||||
go: ["~1.21.7", "~1.22.3"]
|
||||
go: ["~1.23.0", "~1.24.0"]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -83,31 +84,11 @@ jobs:
|
||||
go run build.go test | go-test-json-to-loki
|
||||
env:
|
||||
GOFLAGS: "-json"
|
||||
LOKI_URL: ${{ vars.LOKI_URL }}
|
||||
LOKI_USER: ${{ vars.LOKI_USER }}
|
||||
LOKI_URL: ${{ secrets.LOKI_URL }}
|
||||
LOKI_USER: ${{ secrets.LOKI_USER }}
|
||||
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
|
||||
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
correctness:
|
||||
name: Check correctness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: Check correctness
|
||||
run: |
|
||||
go test -v ./meta
|
||||
|
||||
#
|
||||
# The basic checks job is a virtual one that depends on the matrix tests,
|
||||
# the correctness checks, and various builds that we always do. This makes
|
||||
@@ -122,11 +103,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-test
|
||||
- correctness
|
||||
- package-linux
|
||||
- package-cross
|
||||
- package-source
|
||||
- package-debian
|
||||
- package-windows
|
||||
- govulncheck
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -137,8 +118,6 @@ jobs:
|
||||
|
||||
package-windows:
|
||||
name: Package for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -153,6 +132,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -178,22 +158,74 @@ jobs:
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
go run build.go -goarch arm zip
|
||||
go run build.go -goarch arm64 zip
|
||||
go run build.go -goarch 386 zip
|
||||
$targets = 'syncthing', 'stdiscosrv', 'strelaysrv'
|
||||
$archs = 'amd64', 'arm', 'arm64', '386'
|
||||
foreach ($arch in $archs) {
|
||||
foreach ($tgt in $targets) {
|
||||
go run build.go -goarch $arch zip $tgt
|
||||
}
|
||||
}
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
|
||||
CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }}
|
||||
CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unsigned-packages-windows
|
||||
path: "*.zip"
|
||||
|
||||
codesign-windows:
|
||||
name: Codesign for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- package-windows
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: unsigned-packages-windows
|
||||
path: packages
|
||||
|
||||
- name: Extract packages
|
||||
working-directory: packages
|
||||
run: |
|
||||
$files = Get-ChildItem "." -Filter *.zip
|
||||
foreach ($file in $files) {
|
||||
7z x $file.Name
|
||||
}
|
||||
|
||||
- name: Sign files with Trusted Signing
|
||||
uses: azure/trusted-signing-action@v0.5.1
|
||||
with:
|
||||
azure-tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
|
||||
azure-client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
|
||||
azure-client-secret: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_SECRET }}
|
||||
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
|
||||
trusted-signing-account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT }}
|
||||
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_PROFILE }}
|
||||
files-folder: ${{ github.workspace }}\packages
|
||||
files-folder-filter: exe
|
||||
files-folder-recurse: true
|
||||
file-digest: SHA256
|
||||
timestamp-rfc3161: http://timestamp.acs.microsoft.com
|
||||
timestamp-digest: SHA256
|
||||
|
||||
- name: Repackage packages
|
||||
working-directory: packages
|
||||
run: |
|
||||
$files = Get-ChildItem "." -Filter *.zip
|
||||
foreach ($file in $files) {
|
||||
Remove-Item $file.Name
|
||||
7z a -tzip $file.Name $file.BaseName
|
||||
}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-windows
|
||||
path: syncthing-windows-*.zip
|
||||
path: "packages/*.zip"
|
||||
|
||||
#
|
||||
# Linux
|
||||
@@ -206,6 +238,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -229,7 +262,9 @@ jobs:
|
||||
run: |
|
||||
archs=$(go tool dist list | grep linux | sed 's#linux/##')
|
||||
for goarch in $archs ; do
|
||||
go run build.go -goarch "$goarch" tar
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch "$goarch" tar "$tgt"
|
||||
done
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
@@ -238,7 +273,9 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-linux
|
||||
path: syncthing-linux-*.tar.gz
|
||||
path: |
|
||||
*.tar.gz
|
||||
compat.json
|
||||
|
||||
#
|
||||
# macOS
|
||||
@@ -246,13 +283,14 @@ jobs:
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -299,7 +337,9 @@ jobs:
|
||||
|
||||
- name: Create package (amd64)
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch amd64 zip "$tgt"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -313,7 +353,9 @@ jobs:
|
||||
go "\$@"
|
||||
EOT
|
||||
chmod 755 xgo.sh
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip "$tgt"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -337,15 +379,14 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-macos
|
||||
path: syncthing-*.zip
|
||||
path: "*.zip"
|
||||
|
||||
notarize-macos:
|
||||
name: Notarize for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-macos
|
||||
- basics
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
@@ -357,7 +398,7 @@ jobs:
|
||||
run: |
|
||||
APPSTORECONNECT_API_KEY_PATH="$RUNNER_TEMP/apikey.p8"
|
||||
echo "$APPSTORECONNECT_API_KEY" | base64 -d -o "$APPSTORECONNECT_API_KEY_PATH"
|
||||
for file in syncthing-macos-*.zip ; do
|
||||
for file in *-macos-*.zip ; do
|
||||
xcrun notarytool submit \
|
||||
-k "$APPSTORECONNECT_API_KEY_PATH" \
|
||||
-d "$APPSTORECONNECT_API_KEY_ID" \
|
||||
@@ -380,6 +421,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -422,9 +464,11 @@ jobs:
|
||||
goos="${plat%/*}"
|
||||
goarch="${plat#*/}"
|
||||
echo "::group ::$plat"
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar 2>/dev/null; then
|
||||
echo "::warning ::Failed to build for $plat"
|
||||
fi
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar "$tgt" 2>/dev/null; then
|
||||
echo "::warning ::Failed to build $tgt for $plat"
|
||||
fi
|
||||
done
|
||||
echo "::endgroup::"
|
||||
done
|
||||
env:
|
||||
@@ -434,7 +478,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-other
|
||||
path: syncthing-*.tar.gz
|
||||
path: "*.tar.gz"
|
||||
|
||||
#
|
||||
# Source
|
||||
@@ -447,6 +491,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -482,11 +527,10 @@ jobs:
|
||||
|
||||
sign-for-upgrade:
|
||||
name: Sign for upgrade
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- basics
|
||||
- package-windows
|
||||
- codesign-windows
|
||||
- package-linux
|
||||
- package-macos
|
||||
- package-cross
|
||||
@@ -496,6 +540,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -514,7 +559,7 @@ jobs:
|
||||
|
||||
- name: Install signing tool
|
||||
run: |
|
||||
go install ./cmd/stsigtool
|
||||
go install ./cmd/dev/stsigtool
|
||||
|
||||
- name: Sign archives
|
||||
run: |
|
||||
@@ -529,30 +574,44 @@ jobs:
|
||||
env:
|
||||
STSIGTOOL_PRIVATE_KEY: ${{ secrets.STSIGTOOL_PRIVATE_KEY }}
|
||||
|
||||
- name: Create and sign .asc files
|
||||
- name: Create shasum files
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt -y install gnupg
|
||||
|
||||
export SIGNING_KEY="$RUNNER_TEMP/gpg-secret.asc"
|
||||
echo "$GNUPG_SIGNING_KEY_BASE64" | base64 -d > "$SIGNING_KEY"
|
||||
gpg --import < "$SIGNING_KEY"
|
||||
|
||||
pushd packages
|
||||
files=(*.tar.gz *.zip)
|
||||
sha1sum "${files[@]}" | gpg --clearsign > sha1sum.txt.asc
|
||||
sha256sum "${files[@]}" | gpg --clearsign > sha256sum.txt.asc
|
||||
gpg --sign --armour --detach syncthing-source-*.tar.gz
|
||||
sha1sum "${files[@]}" > sha1sum.txt
|
||||
sha256sum "${files[@]}" > sha256sum.txt
|
||||
popd
|
||||
rm -f "$SIGNING_KEY" .gnupg
|
||||
|
||||
version=$(go run build.go version)
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign shasum files
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign
|
||||
packages/sha1sum.txt packages/sha256sum.txt
|
||||
env:
|
||||
GNUPG_SIGNING_KEY_BASE64: ${{ secrets.GNUPG_SIGNING_KEY_BASE64 }}
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Sign source
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign --detach --ascii
|
||||
packages/syncthing-source-${{ env.VERSION }}.tar.gz
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-signed
|
||||
path: packages/*
|
||||
path: |
|
||||
packages/*.tar.gz
|
||||
packages/*.zip
|
||||
packages/*.asc
|
||||
packages/*.json
|
||||
|
||||
#
|
||||
# Debian
|
||||
@@ -565,6 +624,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -595,7 +655,9 @@ jobs:
|
||||
- name: Package for Debian
|
||||
run: |
|
||||
for arch in amd64 i386 armhf armel arm64 ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb "$tgt"
|
||||
done
|
||||
done
|
||||
env:
|
||||
BUILD_USER: debian
|
||||
@@ -613,10 +675,9 @@ jobs:
|
||||
publish-nightly:
|
||||
name: Publish nightly build
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
|
||||
environment: signing
|
||||
environment: release
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- notarize-macos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -655,7 +716,7 @@ jobs:
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync packages objstore:${{ secrets.S3_BUCKET }}/nightly
|
||||
args: sync -v --no-update-modtime packages objstore:nightly
|
||||
|
||||
#
|
||||
# Push release artifacts to Spaces
|
||||
@@ -663,8 +724,10 @@ jobs:
|
||||
|
||||
publish-release-files:
|
||||
name: Publish release files
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
permissions:
|
||||
contents: write
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- package-debian
|
||||
@@ -673,6 +736,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download signed packages
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -708,7 +772,7 @@ jobs:
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync packages objstore:${{ secrets.S3_BUCKET }}/release/${{ env.VERSION }}
|
||||
args: sync -v --no-update-modtime packages objstore:release/${{ env.VERSION }}
|
||||
|
||||
- name: Push to object store (latest)
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
@@ -721,7 +785,121 @@ jobs:
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync objstore:${{ secrets.S3_BUCKET }}/release/${{ env.VERSION }} objstore:${{ secrets.S3_BUCKET }}/release/latest
|
||||
args: sync -v --no-update-modtime objstore:release/${{ env.VERSION }} objstore:release/latest
|
||||
|
||||
- name: Create GitHub releases and push binaries
|
||||
run: |
|
||||
maybePrerelease=""
|
||||
if [[ $VERSION == *-* ]]; then
|
||||
maybePrerelease="--prerelease"
|
||||
fi
|
||||
export GH_PROMPT_DISABLED=1
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes-from-tag
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
packages/*.asc packages/*.json \
|
||||
packages/syncthing-*.tar.gz \
|
||||
packages/syncthing-*.zip \
|
||||
packages/syncthing_*.deb
|
||||
|
||||
PKGS=$(pwd)/packages
|
||||
cd /tmp # gh will not release for repo x while inside repo y
|
||||
for repo in relaysrv discosrv ; do
|
||||
export GH_REPO="syncthing/$repo"
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes "https://github.com/syncthing/syncthing/releases/tag/$VERSION"
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
$PKGS/*.asc \
|
||||
$PKGS/*${repo}*
|
||||
done
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
#
|
||||
# Push Debian/APT archive
|
||||
#
|
||||
|
||||
publish-apt:
|
||||
name: Publish APT
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-debian
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: debian-packages
|
||||
path: packages
|
||||
|
||||
- name: Set version
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "Version: $version"
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
# Decide whether packages should go to stable, candidate or nightly
|
||||
- name: Prepare packages
|
||||
run: |
|
||||
kind=stable
|
||||
if [[ $VERSION == *-rc.[0-9] ]] ; then
|
||||
kind=candidate
|
||||
elif [[ $VERSION == *-* ]] ; then
|
||||
kind=nightly
|
||||
fi
|
||||
echo "Kind: $kind"
|
||||
mkdir -p packages/syncthing/$kind
|
||||
mv packages/*.deb packages/syncthing/$kind
|
||||
|
||||
- name: Pull archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync objstore:apt/dists dists
|
||||
|
||||
- name: Update archive
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
publish
|
||||
--add packages
|
||||
--dists dists
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Push archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v --no-update-modtime dists objstore:apt/dists
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
@@ -730,8 +908,11 @@ jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release-nightly' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: docker
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
@@ -752,6 +933,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -791,9 +973,18 @@ jobs:
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -805,13 +996,13 @@ jobs:
|
||||
echo Release version, pushing to :latest and version tags
|
||||
major=${version%.*.*}
|
||||
minor=${version%.*}
|
||||
tags=${{ matrix.image }}:$version,${{ matrix.image }}:$major,${{ matrix.image }}:$minor,${{ matrix.image }}:latest
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:$major,ghcr.io/${{ matrix.image }}:$major,docker.io/${{ matrix.image }}:$minor,ghcr.io/${{ matrix.image }}:$minor,docker.io/${{ matrix.image }}:latest,ghcr.io/${{ matrix.image }}:latest
|
||||
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
|
||||
echo Release candidate, pushing to :rc
|
||||
tags=${{ matrix.image }}:rc
|
||||
echo Release candidate, pushing to :rc and version tags
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:rc,ghcr.io/${{ matrix.image }}:rc
|
||||
else
|
||||
echo Development version, pushing to :edge
|
||||
tags=${{ matrix.image }}:edge
|
||||
tags=docker.io/${{ matrix.image }}:edge,ghcr.io/${{ matrix.image }}:edge
|
||||
fi
|
||||
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
49
.github/workflows/pr-linters.yaml
vendored
Normal file
49
.github/workflows/pr-linters.yaml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Run PR linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# golangci-lint runs a suite of static analysis checks on the code
|
||||
#
|
||||
|
||||
golangci:
|
||||
runs-on: ubuntu-latest
|
||||
name: Golangci-lint
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: ensure asset generation
|
||||
run: go run build.go assets
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
only-new-issues: true
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
meta:
|
||||
name: Meta checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- run: |
|
||||
go run build.go assets
|
||||
go test -v ./meta
|
||||
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: PR metadata
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Set labels on PRs, which are then used to categorise release notes
|
||||
#
|
||||
|
||||
labels:
|
||||
name: Set labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: srvaroa/labeler@v1
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Release Syncthing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
- release-rc*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
create-release-tag:
|
||||
name: Create release tag
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Determine version to release
|
||||
run: |
|
||||
if [[ "$GITHUB_REF_NAME" == "release" ]] ; then
|
||||
next=$(go run ./script/next-version.go)
|
||||
else
|
||||
next=$(go run ./script/next-version.go --pre)
|
||||
fi
|
||||
echo "NEXT=$next" >> $GITHUB_ENV
|
||||
echo "Next version is $next"
|
||||
|
||||
prev=$(git describe --exclude "*-*" --abbrev=0)
|
||||
echo "PREV=$prev" >> $GITHUB_ENV
|
||||
echo "Previous version is $prev"
|
||||
|
||||
- name: Determine release notes
|
||||
run: |
|
||||
go run ./script/relnotes.go --new-ver "$NEXT" --branch "$GITHUB_REF_NAME" --prev-ver "$PREV" > notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
git tag -a -F notes.md --cleanup=whitespace "$NEXT"
|
||||
git push origin "$NEXT"
|
||||
|
||||
- name: Trigger the build
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: build-syncthing.yaml
|
||||
ref: refs/tags/${{ env.NEXT }}
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,3 +18,4 @@ deb
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
/compat.json
|
||||
|
||||
@@ -1,26 +1,67 @@
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
enable-all: true
|
||||
default: all
|
||||
disable:
|
||||
- goimports
|
||||
- cyclop
|
||||
- depguard
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- funlen
|
||||
- wsl
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- godox
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- paralleltest
|
||||
- prealloc
|
||||
- predeclared
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- usetesting # go 1.24
|
||||
- varnamelen
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
98
.policy.yml
Normal file
98
.policy.yml
Normal file
@@ -0,0 +1,98 @@
|
||||
# This is the policy-bot configuration for this repository. It controls
|
||||
# which approvals are required for any given pull request. The format is
|
||||
# described at https://github.com/palantir/policy-bot. The syntax of the
|
||||
# policy can be verified by the bot:
|
||||
# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml
|
||||
|
||||
# The policy below is what is required for any pull request.
|
||||
policy:
|
||||
approval:
|
||||
- subject is conventional commit
|
||||
- project metadata requires maintainer approval
|
||||
- or:
|
||||
- is approved by a syncthing contributor
|
||||
- is a translation or dependency update by a contributor
|
||||
- is a trivial change by a contributor
|
||||
|
||||
# Additionally, contributors can disapprove of a PR
|
||||
disapproval:
|
||||
requires:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# The rules for the policy are described below.
|
||||
|
||||
approval_rules:
|
||||
|
||||
# All commits (PRs before squashing) should have a valid conventional
|
||||
# commit type subject.
|
||||
- name: subject is conventional commit
|
||||
requires:
|
||||
conditions:
|
||||
title:
|
||||
matches:
|
||||
- '^(feat|fix|docs|chore|refactor|build): [a-z].+'
|
||||
- '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'
|
||||
|
||||
# Changes to important project metadata and documentation, including this
|
||||
# policy, require signoff by a maintainer
|
||||
- name: project metadata requires maintainer approval
|
||||
if:
|
||||
changed_files:
|
||||
paths:
|
||||
- ^[^/]+\.md
|
||||
- ^\.policy\.yml
|
||||
- ^LICENSE
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Regular pull requests require approval by an active contributor
|
||||
- name: is approved by a syncthing contributor
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Changes to some files (translations, dependencies, compatibility) do not
|
||||
# require approval if they were proposed by a contributor and have a
|
||||
# matching commit subject
|
||||
- name: is a translation or dependency update by a contributor
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- ^gui/default/assets/lang/
|
||||
- ^go\.mod$
|
||||
- ^go\.sum$
|
||||
- ^compat\.yaml$
|
||||
title:
|
||||
matches:
|
||||
- '^chore\(gui\):'
|
||||
- '^build\(deps\):'
|
||||
- '^build\(compat\):'
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# If the change is small and the label "trivial" is added, we accept that
|
||||
# on trust. These PRs can be audited after the fact as appropriate.
|
||||
# Features are not trivial.
|
||||
- name: is a trivial change by a contributor
|
||||
if:
|
||||
modified_lines:
|
||||
total: "< 25"
|
||||
title:
|
||||
not_matches:
|
||||
- '^feat'
|
||||
has_labels:
|
||||
- trivial
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
22
AUTHORS
22
AUTHORS
@@ -20,6 +20,7 @@ Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Ionescu <github@ionescu.sh>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
@@ -47,6 +48,7 @@ Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Ashish Bhate <bhate.ashish@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
@@ -96,6 +98,7 @@ Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@u
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Daniel Padrta <64928366+danpadcz@users.noreply.github.com>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
dashangcun <907225865@qq.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
|
||||
@@ -111,6 +114,7 @@ diemade <spamkill@posteo.ch>
|
||||
digital <didev@dinid.net>
|
||||
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
domain <32405309+szu17dmy@users.noreply.github.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
@@ -141,10 +145,13 @@ greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
guangwu <guoguangwu@magic-shield.com>
|
||||
gudvinr <gudvinr@gmail.com>
|
||||
Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Hazem Krimi <me@hazemkrimi.tech>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hireworks <129852174+hireworksltd@users.noreply.github.com>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
@@ -188,6 +195,7 @@ Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Kapil Sareen <kapilsareen584@gmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Harrison <keithh@protonmail.com>
|
||||
@@ -216,8 +224,10 @@ luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcel Meyer <mm.marcelmeyer@gmail.com>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus B Spencer <marcus@marcusspencer.xyz> <marcus@marcusspencer.us>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
@@ -225,6 +235,7 @@ Martchus <martchus@gmx.net>
|
||||
Martin Polehla <p0l0us@users.noreply.github.com>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
mathias4833 <67101597+mathias4833@users.noreply.github.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
@@ -232,6 +243,7 @@ Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
maxice8 <30738253+maxice8@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
|
||||
@@ -269,6 +281,7 @@ Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Paul Donald <newtwen+github@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
@@ -282,7 +295,9 @@ Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
polyfloyd <polyfloyd@users.noreply.github.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
pullmerge <166967364+pullmerge@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
@@ -305,7 +320,9 @@ Severin von Wnuck-Lipinski <ss7@live.de>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Simon Pickup <simon@pickupinfinity.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Sonu Kumar Saw <31889738+dev-saw99@users.noreply.github.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
@@ -313,18 +330,23 @@ Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Sven Bachmann <dev@mcbachmann.de>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Sébastien WENSKE <sebastien@wenske.fr>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
TheCreeper <TheCreeper@users.noreply.github.com>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tim Nordenfur <tim@gurka.se>
|
||||
Tobias Frölich <40638719+tobifroe@users.noreply.github.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tommy van der Vorst <tommy-github@pixelspark.nl> <tommy@pixelspark.nl>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
|
||||
@@ -49,6 +49,11 @@ services:
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Discovery
|
||||
@@ -84,6 +89,11 @@ services:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
|
||||
12
README.md
12
README.md
@@ -82,13 +82,11 @@ build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
As of v0.10.15 and onwards, release binaries are GPG signed with the key
|
||||
D26E6ED000654A3E, available from https://syncthing.net/security/ and
|
||||
most key servers.
|
||||
|
||||
There is also a built-in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
Release binaries are GPG signed with the key available from
|
||||
https://syncthing.net/security/. There is also a built-in automatic
|
||||
upgrade mechanism (disabled in some distribution channels) which uses a
|
||||
compiled in ECDSA signature. macOS and Windows binaries are also
|
||||
code-signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
12
buf.gen.yaml
Normal file
12
buf.gen.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: v2
|
||||
managed:
|
||||
enabled: true
|
||||
override:
|
||||
- file_option: go_package_prefix
|
||||
value: github.com/syncthing/syncthing/internal/gen
|
||||
plugins:
|
||||
- remote: buf.build/protocolbuffers/go:v1.35.1
|
||||
out: .
|
||||
opt: module=github.com/syncthing/syncthing
|
||||
inputs:
|
||||
- directory: proto
|
||||
10
buf.yaml
Normal file
10
buf.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
version: v2
|
||||
modules:
|
||||
- path: proto
|
||||
name: github.com/syncthing/syncthing
|
||||
lint:
|
||||
use:
|
||||
- STANDARD
|
||||
breaking:
|
||||
use:
|
||||
- WIRE_JSON
|
||||
307
build.go
307
build.go
@@ -4,8 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -34,6 +33,8 @@ import (
|
||||
"time"
|
||||
|
||||
buildpkg "github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -84,7 +85,6 @@ var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff. buildPkgs gets filled out in init()
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
@@ -95,40 +95,40 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0o644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0o644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0o644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0o644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0o644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0o644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0o644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0o644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0o644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0o644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0o644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0o644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
@@ -140,23 +140,22 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stdiscosrv"},
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0o644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"strelaysrv": {
|
||||
name: "strelaysrv",
|
||||
@@ -167,61 +166,47 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaysrv"},
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
name: "strelaypoolsrv",
|
||||
debname: "syncthing-relaypoolsrv",
|
||||
debdeps: []string{"libc6"},
|
||||
description: "Syncthing Relay Pool Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv",
|
||||
},
|
||||
"stupgrades": {
|
||||
name: "stupgrades",
|
||||
description: "Syncthing Upgrade Check Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stupgrades"},
|
||||
binaryName: "stupgrades",
|
||||
},
|
||||
"stcrashreceiver": {
|
||||
name: "stcrashreceiver",
|
||||
description: "Syncthing Crash Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stcrashreceiver"},
|
||||
binaryName: "stcrashreceiver",
|
||||
},
|
||||
"ursrv": {
|
||||
name: "ursrv",
|
||||
description: "Syncthing Usage Reporting Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/ursrv"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/ursrv"},
|
||||
binaryName: "ursrv",
|
||||
},
|
||||
}
|
||||
@@ -230,15 +215,11 @@ func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
pkg = filepath.Base(pkg)
|
||||
if strings.HasPrefix(pkg, ".") {
|
||||
// ignore dotfiles
|
||||
if files, err := filepath.Glob(pkg + "/*.go"); err != nil || len(files) == 0 {
|
||||
// No go files in the directory
|
||||
continue
|
||||
}
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
|
||||
@@ -246,13 +227,13 @@ func initTargets() {
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
for _, file := range listFiles("etc") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0o644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
@@ -342,12 +323,14 @@ func runCommand(cmd string, target target) {
|
||||
|
||||
case "tar":
|
||||
buildTar(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "zip":
|
||||
buildZip(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
buildDeb(target, tags)
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
@@ -405,7 +388,6 @@ func parseFlags() {
|
||||
func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-tags", strings.Join(tags, " ")}
|
||||
if long {
|
||||
timeout = longTimeout
|
||||
@@ -439,7 +421,7 @@ func bench(tags []string, pkgs ...string) {
|
||||
func integration(bench bool) {
|
||||
lazyRebuildAssets()
|
||||
args := []string{"test", "-v", "-timeout", "60m", "-tags"}
|
||||
tags := "purego,integration"
|
||||
tags := "integration"
|
||||
if bench {
|
||||
tags += ",benchmark"
|
||||
}
|
||||
@@ -627,7 +609,7 @@ func buildZip(target target, tags []string) {
|
||||
fmt.Println(filename)
|
||||
}
|
||||
|
||||
func buildDeb(target target) {
|
||||
func buildDeb(target target, tags []string) {
|
||||
os.RemoveAll("deb")
|
||||
|
||||
// "goarch" here is set to whatever the Debian packages expect. We correct
|
||||
@@ -641,7 +623,7 @@ func buildDeb(target target) {
|
||||
goarch = "arm"
|
||||
}
|
||||
|
||||
build(target, []string{"noupgrade"})
|
||||
build(target, append(tags, "noupgrade"))
|
||||
|
||||
for i := range target.installationFiles {
|
||||
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.BinaryName(), 1)
|
||||
@@ -663,6 +645,9 @@ func buildDeb(target target) {
|
||||
// than just 0.14.26. This rectifies that.
|
||||
debver = strings.Replace(debver, "-", "~", -1)
|
||||
}
|
||||
if strings.Contains(debver, "_") {
|
||||
debver = strings.Replace(debver, "_", "~", -1)
|
||||
}
|
||||
args := []string{
|
||||
"-t", "deb",
|
||||
"-s", "dir",
|
||||
@@ -750,7 +735,7 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0o644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
@@ -809,7 +794,7 @@ func copyFile(src, dst string, perm os.FileMode) error {
|
||||
}
|
||||
|
||||
copy:
|
||||
os.MkdirAll(filepath.Dir(dst), 0777)
|
||||
os.MkdirAll(filepath.Dir(dst), 0o777)
|
||||
if err := os.WriteFile(dst, in, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -834,12 +819,12 @@ func listFiles(dir string) []string {
|
||||
|
||||
func rebuildAssets() {
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
shouldRebuildAssets("cmd/infra/strelaypoolsrv/auto/gui.files.go", "cmd/infra/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
@@ -869,7 +854,7 @@ func buildNextGenGUI() bool {
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
if err := copyFile(src, dst, 0o644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -925,22 +910,9 @@ func updateDependencies() {
|
||||
}
|
||||
|
||||
func proto() {
|
||||
pv := protobufVersion()
|
||||
repo := "https://github.com/gogo/protobuf.git"
|
||||
path := filepath.Join("repos", "protobuf")
|
||||
|
||||
runPrint(goCmd, "install", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrint("git", "clone", repo, path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", pv)
|
||||
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
// buf needs to be installed
|
||||
// https://buf.build/docs/installation/
|
||||
runPrint("buf", "generate")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
@@ -1375,10 +1347,7 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
|
||||
func codesign(target target) {
|
||||
switch goos {
|
||||
case "windows":
|
||||
windowsCodesign(target.BinaryName())
|
||||
case "darwin":
|
||||
if goos == "darwin" {
|
||||
macosCodesign(target.BinaryName())
|
||||
}
|
||||
}
|
||||
@@ -1402,70 +1371,6 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func windowsCodesign(file string) {
|
||||
st := "signtool.exe"
|
||||
|
||||
if path := os.Getenv("CODESIGN_SIGNTOOL"); path != "" {
|
||||
st = path
|
||||
}
|
||||
|
||||
for i, algo := range []string{"sha1", "sha256"} {
|
||||
args := []string{"sign", "/fd", algo}
|
||||
if f := os.Getenv("CODESIGN_CERTIFICATE_FILE"); f != "" {
|
||||
args = append(args, "/f", f)
|
||||
} else if b := os.Getenv("CODESIGN_CERTIFICATE_BASE64"); b != "" {
|
||||
// Decode the PFX certificate from base64.
|
||||
bs, err := base64.RawStdEncoding.DecodeString(b)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: decoding base64:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write it to a temporary file
|
||||
f, err := os.CreateTemp("", "codesign-*.pfx")
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: creating temp file:", err)
|
||||
return
|
||||
}
|
||||
_ = f.Chmod(0600) // best effort remove other users' access
|
||||
defer os.Remove(f.Name())
|
||||
if _, err := f.Write(bs); err != nil {
|
||||
log.Println("Codesign: signing failed: writing temp file:", err)
|
||||
return
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Println("Codesign: signing failed: closing temp file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use that when signing
|
||||
args = append(args, "/f", f.Name())
|
||||
}
|
||||
if p := os.Getenv("CODESIGN_CERTIFICATE_PASSWORD"); p != "" {
|
||||
args = append(args, "/p", p)
|
||||
}
|
||||
if tr := os.Getenv("CODESIGN_TIMESTAMP_SERVER"); tr != "" {
|
||||
switch algo {
|
||||
case "sha256":
|
||||
args = append(args, "/tr", tr, "/td", algo)
|
||||
default:
|
||||
args = append(args, "/t", tr)
|
||||
}
|
||||
}
|
||||
if i > 0 {
|
||||
args = append(args, "/as")
|
||||
}
|
||||
args = append(args, file)
|
||||
|
||||
bs, err := runError(st, args...)
|
||||
if err != nil {
|
||||
log.Printf("Codesign: signing failed: %v: %s", err, string(bs))
|
||||
return
|
||||
}
|
||||
log.Println("Codesign: successfully signed", file, "using", algo)
|
||||
}
|
||||
}
|
||||
|
||||
func metalint() {
|
||||
lazyRebuildAssets()
|
||||
runPrint(goCmd, "test", "-run", "Metalint", "./meta")
|
||||
@@ -1483,14 +1388,6 @@ func (t target) BinaryName() string {
|
||||
return t.binaryName
|
||||
}
|
||||
|
||||
func protobufVersion() string {
|
||||
bs, err := runError(goCmd, "list", "-f", "{{.Version}}", "-m", "github.com/gogo/protobuf")
|
||||
if err != nil {
|
||||
log.Fatal("Getting protobuf version:", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
@@ -1557,3 +1454,29 @@ func nextPatchVersion(ver string) string {
|
||||
digits[len(digits)-1] = strconv.Itoa(n + 1)
|
||||
return strings.Join(digits, ".")
|
||||
}
|
||||
|
||||
func writeCompatJSON() {
|
||||
bs, err := os.ReadFile("compat.yaml")
|
||||
if err != nil {
|
||||
log.Fatal("Reading compat.yaml:", err)
|
||||
}
|
||||
|
||||
var entries []upgrade.ReleaseCompatibility
|
||||
if err := yaml.Unmarshal(bs, &entries); err != nil {
|
||||
log.Fatal("Parsing compat.yaml:", err)
|
||||
}
|
||||
|
||||
rt := runtime.Version()
|
||||
for _, e := range entries {
|
||||
if !strings.HasPrefix(rt, e.Runtime) {
|
||||
continue
|
||||
}
|
||||
bs, _ := json.MarshalIndent(e, "", " ")
|
||||
if err := os.WriteFile("compat.json", bs, 0o644); err != nil {
|
||||
log.Fatal("Writing compat.json:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Fatalf("runtime %v not found in compat.yaml", rt)
|
||||
}
|
||||
|
||||
3
build.sh
3
build.sh
@@ -23,10 +23,11 @@ case "${1:-default}" in
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
script copyrights
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
git commit -m 'chore(gui, man, authors): update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discoproto"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
@@ -75,20 +78,21 @@ func recv(bc beacon.Interface) {
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
var ann discoproto.Announce
|
||||
proto.Unmarshal(data[4:], &ann)
|
||||
|
||||
if ann.ID == myID {
|
||||
id, _ := protocol.DeviceIDFromBytes(ann.Id)
|
||||
if id == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := ann.ID.String() + src.String()
|
||||
key := id.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
log.Printf(" %v at %s\n", id, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
@@ -96,11 +100,11 @@ func recv(bc beacon.Interface) {
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
ann := &discoproto.Announce{
|
||||
Id: myID[:],
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.Marshal()
|
||||
bs, _ := proto.Marshal(ann)
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -8,6 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -177,8 +178,8 @@ func (d *diskStore) inventory() error {
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
slices.SortFunc(d.currentFiles, func(a, b currentFile) int {
|
||||
return cmp.Compare(a.mtime, b.mtime)
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
@@ -14,6 +14,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -29,7 +30,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
)
|
||||
|
||||
@@ -71,7 +71,6 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
@@ -9,14 +9,13 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
@@ -53,5 +52,5 @@ func compressAndWrite(bs []byte, fullPath string) error {
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0o644)
|
||||
}
|
||||
@@ -10,7 +10,7 @@ to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
servers. If you are looking to set up a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
@@ -21,4 +21,3 @@ See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
//go:generate go run ../../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -23,11 +23,13 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -109,6 +111,7 @@ var (
|
||||
requestProcessors = 8
|
||||
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
|
||||
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
|
||||
maxRelaysReturned = 100
|
||||
|
||||
requests chan request
|
||||
|
||||
@@ -140,6 +143,7 @@ func main() {
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
|
||||
flag.IntVar(&maxRelaysReturned, "max-relays-returned", maxRelaysReturned, "Maximum number of relays returned for a normal endpoint query")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -330,6 +334,10 @@ func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
||||
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
||||
}
|
||||
mut.RUnlock()
|
||||
if len(relays) > maxRelaysReturned {
|
||||
rand.Shuffle(relays)
|
||||
relays = relays[:maxRelaysReturned]
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
||||
"relays": relays,
|
||||
374
cmd/infra/stupgrades/main.go
Normal file
374
cmd/infra/stupgrades/main.go
Normal file
@@ -0,0 +1,374 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
MetricsListen string `default:":8082" help:"Listen address for metrics"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
if params.MetricsListen != "" {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
metricsListen, err := net.Listen("tcp", params.MetricsListen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metrics: %w", err)
|
||||
}
|
||||
slog.Info("Metrics listener started", "addr", params.MetricsListen)
|
||||
go func() {
|
||||
if err := http.Serve(metricsListen, mux); err != nil {
|
||||
slog.Warn("Metrics server returned", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
cache := &cachedReleases{url: params.URL}
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
return fmt.Errorf("initial cache update: %w", err)
|
||||
} else {
|
||||
slog.Info("Initial cache update done")
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.NewTicker(params.CacheTime).C {
|
||||
slog.Info("Refreshing cached releases", "url", params.URL)
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
slog.Error("Failed to refresh cached releases", "url", params.URL, "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ghRels := &githubReleases{cache: cache}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/ping", ghRels.servePing)
|
||||
mux.HandleFunc("/meta.json", ghRels.serveReleases)
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
slog.Info("Forwarding", "from", path, "to", url)
|
||||
name := strings.ReplaceAll(path, "/", "_")
|
||||
mux.Handle(path, httpcache.SinglePath(&proxy{name: name, url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: params.Listen,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
srvListener, err := net.Listen("tcp", params.Listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
slog.Info("Main listener started", "addr", params.Listen)
|
||||
|
||||
return srv.Serve(srvListener)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
cache *cachedReleases
|
||||
}
|
||||
|
||||
func (p *githubReleases) servePing(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
if len(rels) == 0 {
|
||||
http.Error(w, "No releases available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Syncthing-Num-Releases", strconv.Itoa(len(rels)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *githubReleases) serveReleases(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
ua := req.Header.Get("User-Agent")
|
||||
osv := req.Header.Get("Syncthing-Os-Version")
|
||||
if ua != "" && osv != "" {
|
||||
// We should determine the compatibility of the releases.
|
||||
rels = filterForCompabitility(rels, ua, osv)
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("no-ua-or-osversion").Inc()
|
||||
}
|
||||
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Vary", "User-Agent, Syncthing-Os-Version")
|
||||
_ = json.NewEncoder(w).Encode(rels)
|
||||
|
||||
metricUpgradeChecks.Inc()
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
name string
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
metricHTTPRequests.WithLabelValues(p.name, "success").Inc()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
havePre := make(map[string]bool)
|
||||
haveStable := make(map[string]bool)
|
||||
for _, rel := range rels {
|
||||
major, _, _ := strings.Cut(rel.Tag, ".")
|
||||
if !rel.Prerelease && !haveStable[major] {
|
||||
// Remember the first non-pre for each major
|
||||
filtered = append(filtered, rel)
|
||||
haveStable[major] = true
|
||||
continue
|
||||
}
|
||||
if rel.Prerelease && !havePre[major] && !haveStable[major] {
|
||||
// We remember the first prerelease we find, unless we've
|
||||
// already found a non-pre of the same major.
|
||||
filtered = append(filtered, rel)
|
||||
havePre[major] = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
var userAgentOSArchExp = regexp.MustCompile(`^syncthing.*\(.+ (\w+)-(\w+)\)$`)
|
||||
|
||||
func filterForCompabitility(rels []upgrade.Release, ua, osv string) []upgrade.Release {
|
||||
osArch := userAgentOSArchExp.FindStringSubmatch(ua)
|
||||
if len(osArch) != 3 {
|
||||
metricFilterCalls.WithLabelValues("bad-os-arch").Inc()
|
||||
return rels
|
||||
}
|
||||
os := osArch[1]
|
||||
|
||||
var filtered []upgrade.Release
|
||||
for _, rel := range rels {
|
||||
if rel.Compatibility == nil {
|
||||
// No requirements means it's compatible with everything.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
req, ok := rel.Compatibility.Requirements[os]
|
||||
if !ok {
|
||||
// No entry for the current OS means it's compatible.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(osv, req) >= 0 {
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(filtered) != len(rels) {
|
||||
metricFilterCalls.WithLabelValues("filtered").Inc()
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("unchanged").Inc()
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
type cachedReleases struct {
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
latestRel, latestPre string
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Releases() []upgrade.Release {
|
||||
c.mut.RLock()
|
||||
defer c.mut.RUnlock()
|
||||
return c.current
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Update(ctx context.Context) error {
|
||||
rels, err := fetchGithubReleases(ctx, c.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latestRel, latestPre := "", ""
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease && latestRel == "" {
|
||||
latestRel = rel.Tag
|
||||
}
|
||||
if rel.Prerelease && latestPre == "" {
|
||||
latestPre = rel.Tag
|
||||
}
|
||||
if latestRel != "" && latestPre != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mut.Lock()
|
||||
c.current = rels
|
||||
if latestRel != c.latestRel || latestPre != c.latestPre {
|
||||
metricLatestReleaseInfo.DeleteLabelValues(c.latestRel, c.latestPre)
|
||||
metricLatestReleaseInfo.WithLabelValues(latestRel, latestPre).Set(1)
|
||||
c.latestRel = latestRel
|
||||
c.latestPre = latestPre
|
||||
}
|
||||
c.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGithubReleases(ctx context.Context, url string) ([]upgrade.Release, error) {
|
||||
req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var rels []upgrade.Release
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rels); err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "success").Inc()
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
addReleaseCompatibility(ctx, rels)
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
return rels, nil
|
||||
}
|
||||
|
||||
func addReleaseCompatibility(ctx context.Context, rels []upgrade.Release) {
|
||||
for i := range rels {
|
||||
rel := &rels[i]
|
||||
for i, asset := range rel.Assets {
|
||||
if asset.Name != "compat.json" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Load compat.json into the Compatibility field
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, asset.URL, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
_ = json.NewDecoder(io.LimitReader(resp.Body, 10<<10)).Decode(&rel.Compatibility)
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "success").Inc()
|
||||
resp.Body.Close()
|
||||
|
||||
// Remove compat.json from the asset list since it's been processed
|
||||
rel.Assets = append(rel.Assets[:i], rel.Assets[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
36
cmd/infra/stupgrades/metrics.go
Normal file
36
cmd/infra/stupgrades/metrics.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricUpgradeChecks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "metadata_requests",
|
||||
})
|
||||
metricFilterCalls = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "filter_calls",
|
||||
}, []string{"result"})
|
||||
metricHTTPRequests = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "http_requests",
|
||||
}, []string{"target", "result"})
|
||||
metricLatestReleaseInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "latest_release_info",
|
||||
Help: "Release information",
|
||||
}, []string{"latest_release", "latest_pre"})
|
||||
)
|
||||
@@ -8,22 +8,22 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/aggregate"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/serve"
|
||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
Aggregate aggregate.CLI `cmd:""`
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
|
||||
log.SetOutput(os.Stdout)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
var cli CLI
|
||||
ctx := kong.Parse(&cli)
|
||||
46
cmd/infra/ursrv/serve/metrics.go
Normal file
46
cmd/infra/ursrv/serve/metrics.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "incoming_reports_total",
|
||||
}, []string{"result"})
|
||||
metricsCollectsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collects_total",
|
||||
})
|
||||
metricsCollectSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_total",
|
||||
})
|
||||
metricsCollectSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_last",
|
||||
})
|
||||
metricsWriteSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "write_seconds_last",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("replace")
|
||||
metricReportsTotal.WithLabelValues("accept")
|
||||
}
|
||||
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
@@ -0,0 +1,314 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
const namePrefix = "syncthing_usage_"
|
||||
|
||||
type metricsSet struct {
|
||||
srv *server
|
||||
|
||||
gauges map[string]prometheus.Gauge
|
||||
gaugeVecs map[string]*prometheus.GaugeVec
|
||||
gaugeVecLabels map[string][]string
|
||||
summaries map[string]*metricSummary
|
||||
|
||||
collectMut sync.Mutex
|
||||
collectCutoff time.Duration
|
||||
}
|
||||
|
||||
func newMetricsSet(srv *server) *metricsSet {
|
||||
s := &metricsSet{
|
||||
srv: srv,
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
gaugeVecs: make(map[string]*prometheus.GaugeVec),
|
||||
gaugeVecLabels: make(map[string][]string),
|
||||
summaries: make(map[string]*metricSummary),
|
||||
collectCutoff: -24 * time.Hour,
|
||||
}
|
||||
|
||||
var initForType func(reflect.Type)
|
||||
initForType = func(t reflect.Type) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
initForType(field.Type)
|
||||
continue
|
||||
}
|
||||
name, typ, label := fieldNameTypeLabel(field)
|
||||
sname, labels := nameConstLabels(name)
|
||||
switch typ {
|
||||
case "gauge":
|
||||
s.gauges[name] = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: namePrefix + sname,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
case "summary":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, nil, labels)
|
||||
case "gaugeVec":
|
||||
s.gaugeVecLabels[name] = append(s.gaugeVecLabels[name], label)
|
||||
case "summaryVec":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, []string{label}, labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
initForType(reflect.ValueOf(contract.Report{}).Type())
|
||||
|
||||
for name, labels := range s.gaugeVecLabels {
|
||||
s.gaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: namePrefix + name,
|
||||
}, labels)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func fieldNameTypeLabel(rf reflect.StructField) (string, string, string) {
|
||||
metric := rf.Tag.Get("metric")
|
||||
name, typ, ok := strings.Cut(metric, ",")
|
||||
if !ok {
|
||||
return "", "", ""
|
||||
}
|
||||
gv, label, ok := strings.Cut(typ, ":")
|
||||
if ok {
|
||||
typ = gv
|
||||
}
|
||||
return name, typ, label
|
||||
}
|
||||
|
||||
func nameConstLabels(name string) (string, prometheus.Labels) {
|
||||
if name == "-" {
|
||||
return "", nil
|
||||
}
|
||||
name, labels, ok := strings.Cut(name, "{")
|
||||
if !ok {
|
||||
return name, nil
|
||||
}
|
||||
lls := strings.Split(labels[:len(labels)-1], ",")
|
||||
m := make(map[string]string)
|
||||
for _, l := range lls {
|
||||
k, v, _ := strings.Cut(l, "=")
|
||||
m[k] = v
|
||||
}
|
||||
return name, m
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReport(r *contract.Report) {
|
||||
gaugeVecs := make(map[string][]string)
|
||||
s.addReportStruct(reflect.ValueOf(r).Elem(), gaugeVecs)
|
||||
for name, lv := range gaugeVecs {
|
||||
s.gaugeVecs[name].WithLabelValues(lv...).Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReportStruct(v reflect.Value, gaugeVecs map[string][]string) {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Kind() == reflect.Struct {
|
||||
s.addReportStruct(field, gaugeVecs)
|
||||
continue
|
||||
}
|
||||
|
||||
name, typ, label := fieldNameTypeLabel(t.Field(i))
|
||||
switch typ {
|
||||
case "gauge":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.gauges[name].Add(float64(v))
|
||||
case string:
|
||||
s.gaugeVecs[name].WithLabelValues(v).Add(1)
|
||||
case bool:
|
||||
if v {
|
||||
s.gauges[name].Add(1)
|
||||
}
|
||||
}
|
||||
case "gaugeVec":
|
||||
var labelValue string
|
||||
switch v := field.Interface().(type) {
|
||||
case string:
|
||||
labelValue = v
|
||||
case int:
|
||||
labelValue = strconv.Itoa(v)
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
labelValue = k
|
||||
field.SetInt(int64(v))
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := gaugeVecs[name]; !ok {
|
||||
gaugeVecs[name] = make([]string, len(s.gaugeVecLabels[name]))
|
||||
}
|
||||
for i, l := range s.gaugeVecLabels[name] {
|
||||
if l == label {
|
||||
gaugeVecs[name][i] = labelValue
|
||||
break
|
||||
}
|
||||
}
|
||||
case "summary", "summaryVec":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
case float64:
|
||||
s.summaries[name].Observe("", v)
|
||||
case []int:
|
||||
for _, v := range v {
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
}
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
if k == "" {
|
||||
// avoid empty string labels as those are the sign
|
||||
// of a non-vec summary
|
||||
k = "unknown"
|
||||
}
|
||||
s.summaries[name].Observe(k, float64(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Describe(c chan<- *prometheus.Desc) {
|
||||
for _, g := range s.gauges {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Describe(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Collect(c chan<- prometheus.Metric) {
|
||||
s.collectMut.Lock()
|
||||
defer s.collectMut.Unlock()
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
dur := time.Since(t0).Seconds()
|
||||
metricsCollectSecondsLast.Set(dur)
|
||||
metricsCollectSecondsTotal.Add(dur)
|
||||
metricsCollectsTotal.Inc()
|
||||
}()
|
||||
|
||||
for _, g := range s.gauges {
|
||||
g.Set(0)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Reset()
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Reset()
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(s.collectCutoff)
|
||||
s.srv.reports.Range(func(key string, r *contract.Report) bool {
|
||||
if s.collectCutoff < 0 && r.Received.Before(cutoff) {
|
||||
s.srv.reports.Delete(key)
|
||||
return true
|
||||
}
|
||||
s.addReport(r)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, g := range s.gauges {
|
||||
c <- g
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Collect(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Collect(c)
|
||||
}
|
||||
}
|
||||
|
||||
type metricSummary struct {
|
||||
name string
|
||||
values map[string][]float64
|
||||
zeroes map[string]int
|
||||
|
||||
qDesc *prometheus.Desc
|
||||
countDesc *prometheus.Desc
|
||||
sumDesc *prometheus.Desc
|
||||
zDesc *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMetricSummary(name string, labels []string, constLabels prometheus.Labels) *metricSummary {
|
||||
return &metricSummary{
|
||||
name: name,
|
||||
values: make(map[string][]float64),
|
||||
zeroes: make(map[string]int),
|
||||
qDesc: prometheus.NewDesc(name, "", append(labels, "quantile"), constLabels),
|
||||
countDesc: prometheus.NewDesc(name+"_nonzero_count", "", labels, constLabels),
|
||||
sumDesc: prometheus.NewDesc(name+"_sum", "", labels, constLabels),
|
||||
zDesc: prometheus.NewDesc(name+"_zero_count", "", labels, constLabels),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Observe(labelValue string, v float64) {
|
||||
if v == 0 {
|
||||
q.zeroes[labelValue]++
|
||||
return
|
||||
}
|
||||
q.values[labelValue] = append(q.values[labelValue], v)
|
||||
}
|
||||
|
||||
func (q *metricSummary) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- q.qDesc
|
||||
c <- q.countDesc
|
||||
c <- q.sumDesc
|
||||
c <- q.zDesc
|
||||
}
|
||||
|
||||
func (q *metricSummary) Collect(c chan<- prometheus.Metric) {
|
||||
for lv, vs := range q.values {
|
||||
var labelVals []string
|
||||
if lv != "" {
|
||||
labelVals = []string{lv}
|
||||
}
|
||||
|
||||
c <- prometheus.MustNewConstMetric(q.countDesc, prometheus.GaugeValue, float64(len(vs)), labelVals...)
|
||||
c <- prometheus.MustNewConstMetric(q.zDesc, prometheus.GaugeValue, float64(q.zeroes[lv]), labelVals...)
|
||||
|
||||
var sum float64
|
||||
for _, v := range vs {
|
||||
sum += v
|
||||
}
|
||||
c <- prometheus.MustNewConstMetric(q.sumDesc, prometheus.GaugeValue, sum, labelVals...)
|
||||
|
||||
if len(vs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(vs)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[0], append(labelVals, "0")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*5/100], append(labelVals, "0.05")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)/2], append(labelVals, "0.5")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*9/10], append(labelVals, "0.9")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*95/100], append(labelVals, "0.95")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)-1], append(labelVals, "1")...)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Reset() {
|
||||
clear(q.values)
|
||||
clear(q.zeroes)
|
||||
}
|
||||
422
cmd/infra/ursrv/serve/serve.go
Normal file
422
cmd/infra/ursrv/serve/serve.go
Normal file
@@ -0,0 +1,422 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Listen string `env:"UR_LISTEN" help:"Usage reporting & metrics endpoint listen address" default:"0.0.0.0:8080"`
|
||||
ListenInternal string `env:"UR_LISTEN_INTERNAL" help:"Internal metrics endpoint listen address" default:"0.0.0.0:8082"`
|
||||
GeoIPLicenseKey string `env:"UR_GEOIP_LICENSE_KEY"`
|
||||
GeoIPAccountID int `env:"UR_GEOIP_ACCOUNT_ID"`
|
||||
DumpFile string `env:"UR_DUMP_FILE" default:"reports.jsons.gz"`
|
||||
DumpInterval time.Duration `env:"UR_DUMP_INTERVAL" default:"5m"`
|
||||
|
||||
S3Endpoint string `name:"s3-endpoint" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" env:"UR_S3_SECRET_KEY"`
|
||||
|
||||
AzureBlobAccount string `name:"azure-blob-account" env:"UR_AZUREBLOB_ACCOUNT"`
|
||||
AzureBlobKey string `name:"azure-blob-key" env:"UR_AZUREBLOB_KEY"`
|
||||
AzureBlobContainer string `name:"azure-blob-container" env:"UR_AZUREBLOB_CONTAINER"`
|
||||
}
|
||||
|
||||
var (
|
||||
compilerRe = regexp.MustCompile(`\(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\w@.-]+)`)
|
||||
knownDistributions = []distributionMatch{
|
||||
// Maps well known builders to the official distribution method that
|
||||
// they represent
|
||||
|
||||
{regexp.MustCompile(`\steamcity@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sjenkins@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sbuilder@github\.syncthing\.net`), "GitHub"},
|
||||
|
||||
{regexp.MustCompile(`\sdeb@build\.syncthing\.net`), "APT"},
|
||||
{regexp.MustCompile(`\sdebian@github\.syncthing\.net`), "APT"},
|
||||
|
||||
{regexp.MustCompile(`\sdocker@syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@build.syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@github.syncthing\.net`), "Docker Hub"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-builder@github\.syncthing\.net`), "Google Play"},
|
||||
{regexp.MustCompile(`\sandroid-.*teamcity@build\.syncthing\.net`), "Google Play"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-.*vagrant@basebox-stretch64`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bullseye`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bookworm`), "F-Droid"},
|
||||
|
||||
{regexp.MustCompile(`Anwender@NET2017`), "Syncthing-Fork (3rd party)"},
|
||||
|
||||
{regexp.MustCompile(`\sbuilduser@(archlinux|svetlemodry)`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`\ssyncthing@archlinux`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`@openSUSE`), "openSUSE (3rd party)"},
|
||||
{regexp.MustCompile(`\sbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile(`\sroot@buildkitsandbox`), "LinuxServer.io (3rd party)"},
|
||||
{regexp.MustCompile(`\sports@freebsd`), "FreeBSD (3rd party)"},
|
||||
{regexp.MustCompile(`\snix@nix`), "Nix (3rd party)"},
|
||||
{regexp.MustCompile(`.`), "Others"},
|
||||
}
|
||||
)
|
||||
|
||||
type distributionMatch struct {
|
||||
matcher *regexp.Regexp
|
||||
distribution string
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
slog.Info("Starting", "version", build.Version)
|
||||
|
||||
// Listening
|
||||
|
||||
urListener, err := net.Listen("tcp", cli.Listen)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (usage reports)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (usage reports)", "address", urListener.Addr())
|
||||
|
||||
internalListener, err := net.Listen("tcp", cli.ListenInternal)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (internal)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (internal)", "address", internalListener.Addr())
|
||||
|
||||
var geo *geoip.Provider
|
||||
if cli.GeoIPAccountID != 0 && cli.GeoIPLicenseKey != "" {
|
||||
geo, err = geoip.NewGeoLite2CityProvider(context.Background(), cli.GeoIPAccountID, cli.GeoIPLicenseKey, os.TempDir())
|
||||
if err != nil {
|
||||
slog.Error("Failed to load GeoIP", "error", err)
|
||||
return err
|
||||
}
|
||||
go geo.Serve(context.TODO())
|
||||
}
|
||||
|
||||
// Blob storage
|
||||
|
||||
var blobs blob.Store
|
||||
if cli.S3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 session", "error", err)
|
||||
return err
|
||||
}
|
||||
} else if cli.AzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.AzureBlobAccount, cli.AzureBlobKey, cli.AzureBlobContainer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create Azure blob store", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && blobs != nil {
|
||||
if err := cli.downloadDumpFile(blobs); err != nil {
|
||||
slog.Error("Failed to download dump file", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// server
|
||||
|
||||
srv := &server{
|
||||
geo: geo,
|
||||
reports: xsync.NewMapOf[string, *contract.Report](),
|
||||
}
|
||||
|
||||
if fd, err := os.Open(cli.DumpFile); err == nil {
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err == nil {
|
||||
srv.load(gr)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.Tick(cli.DumpInterval) {
|
||||
if err := cli.saveDumpFile(srv, blobs); err != nil {
|
||||
slog.Error("Failed to write dump file", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// The internal metrics endpoint just serves metrics about what the
|
||||
// server is doing.
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
internalSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
}
|
||||
go internalSrv.Serve(internalListener)
|
||||
|
||||
// New external metrics endpoint accepts reports from clients and serves
|
||||
// aggregated usage reporting metrics.
|
||||
|
||||
ms := newMetricsSet(srv)
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||
mux.HandleFunc("/newdata", srv.handleNewData)
|
||||
mux.HandleFunc("/ping", srv.handlePing)
|
||||
|
||||
metricsSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
slog.Info("Ready to serve")
|
||||
return metricsSrv.Serve(urListener)
|
||||
}
|
||||
|
||||
func (cli *CLI) downloadDumpFile(blobs blob.Store) error {
|
||||
latestKey, err := blobs.LatestKey(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("list latest S3 key: %w", err)
|
||||
}
|
||||
fd, err := os.Create(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Download(context.Background(), latestKey, fd); err != nil {
|
||||
_ = fd.Close()
|
||||
return fmt.Errorf("download dump file: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("close dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file downloaded", "key", latestKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *CLI) saveDumpFile(srv *server, blobs blob.Store) error {
|
||||
fd, err := os.Create(cli.DumpFile + ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dump file: %w", err)
|
||||
}
|
||||
gw := gzip.NewWriter(fd)
|
||||
if err := srv.save(gw); err != nil {
|
||||
return fmt.Errorf("saving dump file: %w", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
fd.Close()
|
||||
return fmt.Errorf("closing gzip writer: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("closing dump file: %w", err)
|
||||
}
|
||||
if err := os.Rename(cli.DumpFile+".tmp", cli.DumpFile); err != nil {
|
||||
return fmt.Errorf("renaming dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file saved")
|
||||
|
||||
if blobs != nil {
|
||||
key := fmt.Sprintf("reports-%s.jsons.gz", time.Now().UTC().Format("2006-01-02"))
|
||||
fd, err := os.Open(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Upload(context.Background(), key, fd); err != nil {
|
||||
return fmt.Errorf("uploading dump file: %w", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
slog.Info("Dump file uploaded")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
geo *geoip.Provider
|
||||
reports *xsync.MapOf[string, *contract.Report]
|
||||
}
|
||||
|
||||
func (s *server) handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *server) handleNewData(w http.ResponseWriter, r *http.Request) {
|
||||
result := "fail"
|
||||
defer func() {
|
||||
// result is "accept" (new report), "replace" (existing report) or
|
||||
// "fail"
|
||||
metricReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Header.Get("X-Forwarded-For")
|
||||
if addr != "" {
|
||||
addr = strings.Split(addr, ", ")[0]
|
||||
} else {
|
||||
addr = r.RemoteAddr
|
||||
}
|
||||
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
log := slog.With("addr", addr)
|
||||
|
||||
if net.ParseIP(addr) == nil {
|
||||
addr = ""
|
||||
}
|
||||
|
||||
var rep contract.Report
|
||||
|
||||
lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
|
||||
bs, _ := io.ReadAll(lr)
|
||||
if err := json.Unmarshal(bs, &rep); err != nil {
|
||||
log.Error("Failed to decode JSON", "error", err)
|
||||
http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rep.Received = time.Now()
|
||||
rep.Date = rep.Received.UTC().Format("20060102")
|
||||
rep.Address = addr
|
||||
|
||||
if err := rep.Validate(); err != nil {
|
||||
log.Error("Failed to validate report", "error", err)
|
||||
http.Error(w, "Validation Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if s.addReport(&rep) {
|
||||
result = "replace"
|
||||
} else {
|
||||
result = "accept"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) addReport(rep *contract.Report) bool {
|
||||
if s.geo != nil {
|
||||
if ip := net.ParseIP(rep.Address); ip != nil {
|
||||
if city, err := s.geo.City(ip); err == nil {
|
||||
rep.Country = city.Country.Names["en"]
|
||||
rep.CountryCode = city.Country.IsoCode
|
||||
}
|
||||
}
|
||||
}
|
||||
if rep.Country == "" {
|
||||
rep.Country = "Unknown"
|
||||
}
|
||||
if rep.CountryCode == "" {
|
||||
rep.CountryCode = "ZZ"
|
||||
}
|
||||
|
||||
rep.Version = transformVersion(rep.Version)
|
||||
if strings.Contains(rep.Version, ".") {
|
||||
split := strings.SplitN(rep.Version, ".", 3)
|
||||
if len(split) == 3 {
|
||||
rep.MajorVersion = strings.Join(split[:2], ".")
|
||||
}
|
||||
}
|
||||
rep.OS, rep.Arch, _ = strings.Cut(rep.Platform, "-")
|
||||
|
||||
if m := compilerRe.FindStringSubmatch(rep.LongVersion); len(m) == 3 {
|
||||
rep.Compiler = m[1]
|
||||
rep.Builder = m[2]
|
||||
}
|
||||
for _, d := range knownDistributions {
|
||||
if d.matcher.MatchString(rep.LongVersion) {
|
||||
rep.Distribution = d.distribution
|
||||
break
|
||||
}
|
||||
}
|
||||
rep.DistDist = rep.Distribution
|
||||
rep.DistOS = rep.OS
|
||||
rep.DistArch = rep.Arch
|
||||
|
||||
_, loaded := s.reports.LoadAndStore(rep.UniqueID, rep)
|
||||
return loaded
|
||||
}
|
||||
|
||||
func (s *server) save(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
enc := json.NewEncoder(bw)
|
||||
var err error
|
||||
s.reports.Range(func(k string, v *contract.Report) bool {
|
||||
err = enc.Encode(v)
|
||||
return err == nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (s *server) load(r io.Reader) {
|
||||
dec := json.NewDecoder(r)
|
||||
s.reports.Clear()
|
||||
for {
|
||||
var rep contract.Report
|
||||
if err := dec.Decode(&rep); errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
slog.Error("Failed to load record", "error", err)
|
||||
break
|
||||
}
|
||||
s.addReport(&rep)
|
||||
}
|
||||
slog.Info("Loaded reports", "count", s.reports.Size())
|
||||
}
|
||||
|
||||
var (
|
||||
plusRe = regexp.MustCompile(`(\+.*|[.-]dev\..*)$`)
|
||||
plusStr = "-dev"
|
||||
)
|
||||
|
||||
// transformVersion returns a version number formatted correctly, with all
|
||||
// development versions aggregated into one.
|
||||
func transformVersion(v string) string {
|
||||
if v == "unknown-dev" {
|
||||
return v
|
||||
}
|
||||
if !strings.HasPrefix(v, "v") {
|
||||
v = "v" + v
|
||||
}
|
||||
v = plusRe.ReplaceAllString(v, plusStr)
|
||||
|
||||
return v
|
||||
}
|
||||
@@ -10,9 +10,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type amqpReplicator struct {
|
||||
@@ -20,7 +26,7 @@ type amqpReplicator struct {
|
||||
broker string
|
||||
sender *amqpSender
|
||||
receiver *amqpReceiver
|
||||
outbox chan ReplicationRecord
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
@@ -29,7 +35,7 @@ func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
sender := &amqpSender{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
svc.Add(sender)
|
||||
|
||||
@@ -45,18 +51,18 @@ func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
broker: broker,
|
||||
sender: sender,
|
||||
receiver: receiver,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReplicator) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
func (s *amqpReplicator) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
s.sender.send(key, ps, seen)
|
||||
}
|
||||
|
||||
type amqpSender struct {
|
||||
broker string
|
||||
clientID string
|
||||
outbox chan ReplicationRecord
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func (s *amqpSender) Serve(ctx context.Context) error {
|
||||
@@ -71,12 +77,12 @@ func (s *amqpSender) Serve(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case rec := <-s.outbox:
|
||||
size := rec.Size()
|
||||
size := proto.Size(rec)
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
n, err := rec.MarshalTo(buf)
|
||||
n, err := protoutil.MarshalTo(buf, rec)
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication marshal: %w", err)
|
||||
@@ -109,9 +115,9 @@ func (s *amqpSender) String() string {
|
||||
return fmt.Sprintf("amqpSender(%q)", s.broker)
|
||||
}
|
||||
|
||||
func (s *amqpSender) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
func (s *amqpSender) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
item := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: ps,
|
||||
Seen: seen,
|
||||
}
|
||||
@@ -156,13 +162,22 @@ func (s *amqpReceiver) Serve(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(msg.Body); err != nil {
|
||||
var rec discosrv.ReplicationRecord
|
||||
if err := proto.Unmarshal(msg.Body, &rec); err != nil {
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication unmarshal: %w", err)
|
||||
}
|
||||
id, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
id, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Replication device ID:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.db.merge(rec.Key, rec.Addresses, rec.Seen); err != nil {
|
||||
if err := s.db.merge(&id, rec.Addresses, rec.Seen); err != nil {
|
||||
return fmt.Errorf("replication database merge: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,13 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stringutil"
|
||||
)
|
||||
@@ -45,10 +46,14 @@ type apiSrv struct {
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
missesIncrease int
|
||||
compression bool
|
||||
gzipWriters sync.Pool
|
||||
seenTracker *retryAfterTracker
|
||||
notSeenTracker *retryAfterTracker
|
||||
}
|
||||
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
type replicator interface {
|
||||
send(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
@@ -61,19 +66,30 @@ type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool, missesIncrease int) *apiSrv {
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool, desiredNotFoundRate float64) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
missesIncrease: missesIncrease,
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
compression: compression,
|
||||
seenTracker: &retryAfterTracker{
|
||||
name: "seenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMinSeconds,
|
||||
},
|
||||
notSeenTracker: &retryAfterTracker{
|
||||
name: "notSeenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMaxSeconds / 2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
@@ -104,8 +120,15 @@ func (s *apiSrv) Serve(_ context.Context) error {
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
ErrorLog: log.New(io.Discard, "", 0),
|
||||
}
|
||||
if !debug {
|
||||
srv.ErrorLog = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
srv.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
@@ -175,7 +198,7 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
log.Println(reqID, "bad device param:", err)
|
||||
}
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -183,8 +206,7 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
rec, err := s.db.get(&deviceID)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
@@ -194,27 +216,14 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses
|
||||
var afterS int
|
||||
if rec.Seen == 0 {
|
||||
afterS = s.notSeenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_ever").Inc()
|
||||
} else {
|
||||
afterS = s.seenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_recent").Inc()
|
||||
}
|
||||
misses += int32(s.missesIncrease)
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses >= notFoundMissesWriteInterval {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
afterS := notFoundRetryAfterSeconds(int(misses))
|
||||
retryAfterHistogram.Observe(float64(afterS))
|
||||
w.Header().Set("Retry-After", strconv.Itoa(afterS))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
@@ -226,10 +235,16 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
var bw io.Writer = w
|
||||
|
||||
// Use compression if the client asks for it
|
||||
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
if s.compression && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gw, ok := s.gzipWriters.Get().(*gzip.Writer)
|
||||
if ok {
|
||||
gw.Reset(w)
|
||||
} else {
|
||||
gw = gzip.NewWriter(w)
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(bw)
|
||||
defer gw.Close()
|
||||
defer s.gzipWriters.Put(gw)
|
||||
bw = gw
|
||||
}
|
||||
|
||||
@@ -268,6 +283,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
if debug {
|
||||
log.Println(reqID, "no addresses")
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
@@ -275,6 +293,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handle:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
@@ -285,6 +306,9 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
if debug {
|
||||
log.Println(reqID, "announced", deviceID, addresses)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
@@ -292,29 +316,31 @@ func (s *apiSrv) Stop() {
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
slices.Sort(addresses)
|
||||
addresses = slices.Compact(addresses)
|
||||
|
||||
dbAddrs := make([]*discosrv.DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i] = &discosrv.DatabaseAddress{
|
||||
Address: addresses[i],
|
||||
Expires: expire,
|
||||
}
|
||||
}
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
s.repl.send(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
return s.db.merge(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
@@ -360,7 +386,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
} else if cert := req.Header.Get("X-Forwarded-Tls-Client-Cert"); cert != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
//
|
||||
// The certificate is in PEM format, maybe with URL encoding
|
||||
@@ -368,19 +394,36 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
// statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
|
||||
if strings.Contains(hdr, "%") {
|
||||
if unesc, err := url.QueryUnescape(hdr); err == nil {
|
||||
hdr = unesc
|
||||
if strings.Contains(cert, "%") {
|
||||
if unesc, err := url.QueryUnescape(cert); err == nil {
|
||||
cert = unesc
|
||||
}
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
const (
|
||||
header = "-----BEGIN CERTIFICATE-----"
|
||||
footer = "-----END CERTIFICATE-----"
|
||||
)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(header) + 1 + len(cert) + len(cert)/64 + 1 + len(footer) + 1)
|
||||
|
||||
b.WriteString(header)
|
||||
b.WriteByte('\n')
|
||||
|
||||
for i := 0; i < len(cert); i += 64 {
|
||||
end := i + 64
|
||||
if end > len(cert) {
|
||||
end = len(cert)
|
||||
}
|
||||
b.WriteString(cert[i:end])
|
||||
b.WriteByte('\n')
|
||||
}
|
||||
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr += "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
b.WriteString(footer)
|
||||
b.WriteByte('\n')
|
||||
|
||||
bs = b.Bytes()
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
@@ -482,7 +525,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
func addressStrs(dbAddrs []*discosrv.DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
@@ -494,15 +537,44 @@ func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterSeconds(misses int) int {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return retryAfterS
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
|
||||
type retryAfterTracker struct {
|
||||
name string
|
||||
desiredRate float64 // requests per second
|
||||
|
||||
mut sync.Mutex
|
||||
lastCount int // requests in the last bucket
|
||||
curCount int // requests in the current bucket
|
||||
bucketStarts time.Time // start of the current bucket
|
||||
currentDelay int // current delay in seconds
|
||||
}
|
||||
|
||||
func (t *retryAfterTracker) retryAfterS() int {
|
||||
now := time.Now()
|
||||
t.mut.Lock()
|
||||
if durS := now.Sub(t.bucketStarts).Seconds(); durS > float64(t.currentDelay) {
|
||||
t.bucketStarts = now
|
||||
t.lastCount = t.curCount
|
||||
lastRate := float64(t.lastCount) / durS
|
||||
|
||||
switch {
|
||||
case t.currentDelay > notFoundRetryUnknownMinSeconds &&
|
||||
lastRate < 0.75*t.desiredRate:
|
||||
t.currentDelay = max(8*t.currentDelay/10, notFoundRetryUnknownMinSeconds)
|
||||
case t.currentDelay < notFoundRetryUnknownMaxSeconds &&
|
||||
lastRate > 1.25*t.desiredRate:
|
||||
t.currentDelay = min(3*t.currentDelay/2, notFoundRetryUnknownMaxSeconds)
|
||||
}
|
||||
|
||||
t.curCount = 0
|
||||
}
|
||||
if t.curCount == 0 {
|
||||
retryAfterLevel.WithLabelValues(t.name).Set(float64(t.currentDelay))
|
||||
}
|
||||
t.curCount++
|
||||
t.mut.Unlock()
|
||||
return t.currentDelay + rand.Intn(t.currentDelay/4)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,20 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
@@ -94,3 +105,79 @@ func addr(host string, port int) *net.TCPAddr {
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAPIRequests(b *testing.B) {
|
||||
db := newInMemoryStore(b.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go db.Serve(ctx)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true, 1000)
|
||||
srv := httptest.NewServer(http.HandlerFunc(api.handler))
|
||||
|
||||
kf := b.TempDir() + "/cert"
|
||||
crt, err := tlsutil.NewCertificate(kf+".crt", kf+".key", "localhost", 7)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs, err := os.ReadFile(kf + ".crt")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs = regexp.MustCompile(`---[^\n]+---\n`).ReplaceAll(certBs, nil)
|
||||
certString := string(strings.ReplaceAll(string(certBs), "\n", " "))
|
||||
|
||||
devID := protocol.NewDeviceID(crt.Certificate[0])
|
||||
devIDString := devID.String()
|
||||
|
||||
b.Run("Announce", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(`{"addresses":["tcp://10.10.10.10:42000"]}`))
|
||||
req.Header.Set("X-Forwarded-Tls-Client-Cert", certString)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Lookup", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LookupNoCompression", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req.Header.Set("Accept-Encoding", "identity") // disable compression
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,23 +4,31 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -34,380 +42,401 @@ func (defaultClock) Now() time.Time {
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error
|
||||
merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error
|
||||
get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
type inMemoryStore struct {
|
||||
m *xsync.MapOf[protocol.DeviceID, *discosrv.DatabaseRecord]
|
||||
dir string
|
||||
flushInterval time.Duration
|
||||
blobs blob.Store
|
||||
objKey string
|
||||
clock clock
|
||||
}
|
||||
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, blobs blob.Store) *inMemoryStore {
|
||||
hn, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
hn = rand.String(8)
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMemoryLevelDBStore() (*levelDBStore, error) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
s := &inMemoryStore{
|
||||
m: xsync.NewMapOf[protocol.DeviceID, *discosrv.DatabaseRecord](),
|
||||
dir: dir,
|
||||
flushInterval: flushInterval,
|
||||
blobs: blobs,
|
||||
objKey: hn + ".db",
|
||||
clock: defaultClock{},
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
nr, err := s.read()
|
||||
if os.IsNotExist(err) && blobs != nil {
|
||||
// Try to read from blob storage
|
||||
latestKey, cerr := blobs.LatestKey(context.Background())
|
||||
if cerr != nil {
|
||||
log.Println("Error finding database from blob storage:", cerr)
|
||||
return s
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||
if cerr != nil {
|
||||
log.Println("Error creating database file:", cerr)
|
||||
return s
|
||||
}
|
||||
if cerr := blobs.Download(context.Background(), latestKey, fd); cerr != nil {
|
||||
log.Printf("Error downloading database from blob storage: %v", cerr)
|
||||
}
|
||||
_ = fd.Close()
|
||||
nr, err = s.read()
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
log.Println("Error reading database:", err)
|
||||
}
|
||||
|
||||
return err
|
||||
log.Printf("Read %d records from database", nr)
|
||||
s.expireAndCalculateStatistics()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
func (s *inMemoryStore) put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
s.m.Store(*key, rec)
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
func (s *inMemoryStore) merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
|
||||
newRec := &discosrv.DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
if oldRec, ok := s.m.Load(*key); ok {
|
||||
newRec = merge(oldRec, newRec)
|
||||
}
|
||||
s.m.Store(*key, newRec)
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
func (s *inMemoryStore) get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
rec, ok := s.m.Load(*key)
|
||||
if !ok {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
return &discosrv.DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
func (s *inMemoryStore) Serve(ctx context.Context) error {
|
||||
if s.flushInterval <= 0 {
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
t := time.NewTimer(s.flushInterval)
|
||||
defer t.Stop()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
log.Println("Calculating statistics")
|
||||
s.expireAndCalculateStatistics()
|
||||
log.Println("Flushing database")
|
||||
if err := s.write(); err != nil {
|
||||
log.Println("Error writing database:", err)
|
||||
}
|
||||
log.Println("Finished flushing database")
|
||||
t.Reset(s.flushInterval)
|
||||
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
return s.write()
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) expireAndCalculateStatistics() {
|
||||
now := s.clock.Now()
|
||||
cutoff24h := now.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, currentIPv6GUA, last24h, last1w := 0, 0, 0, 0, 0, 0
|
||||
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, rec *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
addresses := expire(rec.Addresses, now)
|
||||
if len(addresses) == 0 {
|
||||
rec.Addresses = nil
|
||||
s.m.Store(key, rec)
|
||||
} else if len(addresses) != len(rec.Addresses) {
|
||||
rec.Addresses = addresses
|
||||
s.m.Store(key, rec)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(rec.Addresses) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6, seenIPv6GUA := false, false, false
|
||||
for _, addr := range rec.Addresses {
|
||||
// We do fast and loose matching on strings here instead of
|
||||
// parsing the address and the IP and doing "proper" checks,
|
||||
// to keep things fast and generate less garbage.
|
||||
if strings.Contains(addr.Address, "[") {
|
||||
seenIPv6 = true
|
||||
if strings.Contains(addr.Address, "[2") {
|
||||
seenIPv6GUA = true
|
||||
}
|
||||
} else {
|
||||
seenIPv4 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 && seenIPv6GUA {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
if seenIPv6GUA {
|
||||
currentIPv6GUA++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
default:
|
||||
// drop the record if it's older than a week
|
||||
s.m.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("currentIPv6GUA").Set(float64(currentIPv6GUA))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseStatisticsSeconds.Set(time.Since(now).Seconds())
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) write() (err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
databaseWriteSeconds.Set(time.Since(t0).Seconds())
|
||||
databaseLastWritten.Set(float64(t0.Unix()))
|
||||
}
|
||||
}()
|
||||
|
||||
dbf := path.Join(s.dir, "records.db")
|
||||
fd, err := os.Create(dbf + ".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bw := bufio.NewWriter(fd)
|
||||
|
||||
var buf []byte
|
||||
var rangeErr error
|
||||
now := s.clock.Now()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, value *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
if value.Seen < cutoff1w {
|
||||
// drop the record if it's older than a week
|
||||
return true
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: value.Addresses,
|
||||
Seen: value.Seen,
|
||||
}
|
||||
s := proto.Size(rec)
|
||||
if s+4 > len(buf) {
|
||||
buf = make([]byte, s+4)
|
||||
}
|
||||
n, err := protoutil.MarshalTo(buf[4:], rec)
|
||||
if err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
if _, err := bw.Write(buf[:n+4]); err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if rangeErr != nil {
|
||||
_ = fd.Close()
|
||||
return rangeErr
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
_ = fd.Close
|
||||
return err
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(dbf+".tmp", dbf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload to blob storage
|
||||
if s.blobs != nil {
|
||||
fd, err = os.Open(dbf)
|
||||
if err != nil {
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := s.blobs.Upload(context.Background(), s.objKey, fd); err != nil {
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
}
|
||||
log.Println("Finished uploading database")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
func (s *inMemoryStore) read() (int, error) {
|
||||
fd, err := os.Open(path.Join(s.dir, "records.db"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
addrs := expire(rec.Addresses, nowNanos)
|
||||
switch {
|
||||
case len(addrs) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6 := false, false
|
||||
for _, addr := range addrs {
|
||||
uri, err := url.Parse(addr.Address)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
host, _, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil && ip.To4() != nil {
|
||||
seenIPv4 = true
|
||||
} else if ip != nil {
|
||||
seenIPv6 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
br := bufio.NewReader(fd)
|
||||
var buf []byte
|
||||
nr := 0
|
||||
for {
|
||||
var n uint32
|
||||
if err := binary.Read(br, binary.BigEndian, &n); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return nr, err
|
||||
}
|
||||
if int(n) > len(buf) {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
if _, err := io.ReadFull(br, buf[:n]); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{}
|
||||
if err := proto.Unmarshal(buf[:n], rec); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
key, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
key, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Bad device ID:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
slices.SortFunc(rec.Addresses, Cmp)
|
||||
rec.Addresses = slices.CompactFunc(rec.Addresses, Equal)
|
||||
s.m.Store(key, &discosrv.DatabaseRecord{
|
||||
Addresses: expire(rec.Addresses, s.clock.Now()),
|
||||
Seen: rec.Seen,
|
||||
})
|
||||
nr++
|
||||
}
|
||||
return nr, nil
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// chosen for any duplicates. The address list in a is overwritten and
|
||||
// reused for the result.
|
||||
func merge(a, b *discosrv.DatabaseRecord) *discosrv.DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
a.Seen = max(a.Seen, b.Seen)
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
for aIdx < len(a.Addresses) && bIdx < len(b.Addresses) {
|
||||
switch cmp.Compare(a.Addresses[aIdx].Address, b.Addresses[bIdx].Address) {
|
||||
case 0:
|
||||
// a == b, choose the newer expiry time
|
||||
a.Addresses[aIdx].Expires = max(a.Addresses[aIdx].Expires, b.Addresses[bIdx].Expires)
|
||||
aIdx++
|
||||
bIdx++
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
case -1:
|
||||
// a < b, keep a and move on
|
||||
aIdx++
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
case 1:
|
||||
// a > b, insert b before a
|
||||
a.Addresses = append(a.Addresses[:aIdx], append([]*discosrv.DatabaseAddress{b.Addresses[bIdx]}, a.Addresses[aIdx:]...)...)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
return res
|
||||
if bIdx < len(b.Addresses) {
|
||||
a.Addresses = append(a.Addresses, b.Addresses[bIdx:]...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
addrs = sliceutil.RemoveAndZero(addrs, i)
|
||||
// destroyed. Internal order is preserved.
|
||||
func expire(addrs []*discosrv.DatabaseAddress, now time.Time) []*discosrv.DatabaseAddress {
|
||||
cutoff := now.UnixNano()
|
||||
naddrs := addrs[:0]
|
||||
for i := range addrs {
|
||||
if i > 0 && addrs[i].Address == addrs[i-1].Address {
|
||||
// Skip duplicates
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if addrs[i].Expires >= cutoff {
|
||||
naddrs = append(naddrs, addrs[i])
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
if len(naddrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return naddrs
|
||||
}
|
||||
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
func Cmp(d, other *discosrv.DatabaseAddress) (n int) {
|
||||
if c := cmp.Compare(d.Address, other.Address); c != 0 {
|
||||
return c
|
||||
}
|
||||
return cmp.Compare(d.Expires, other.Expires)
|
||||
}
|
||||
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
func Equal(d, other *discosrv.DatabaseAddress) bool {
|
||||
return d.Address == other.Address
|
||||
}
|
||||
|
||||
@@ -1,847 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
@@ -11,29 +11,26 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
db, err := newMemoryLevelDBStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db := newInMemoryStore(t.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get("abcd")
|
||||
rec, err := db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
@@ -43,16 +40,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
rec.Addresses = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
if err := db.put(&protocol.EmptyDeviceID, rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,16 +66,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []DatabaseAddress{
|
||||
addrs := []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.EmptyDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -101,7 +98,7 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,40 +111,18 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Put a record with misses
|
||||
|
||||
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
addrs = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.GlobalDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
rec, err = db.get(&protocol.GlobalDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -155,48 +130,126 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
a []*discosrv.DatabaseAddress
|
||||
b []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []*discosrv.DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, 10)
|
||||
res := expire(tc.a, time.Unix(0, 10))
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
cases := []struct {
|
||||
a, b, res []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{nil, nil, nil},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "d", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}, {Address: "d", Expires: 10}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
rec := merge(&discosrv.DatabaseRecord{Addresses: tc.a}, &discosrv.DatabaseRecord{Addresses: tc.b})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
rec = merge(&discosrv.DatabaseRecord{Addresses: tc.b}, &discosrv.DatabaseRecord{Addresses: tc.a})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMergeEqual(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ar := []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}}
|
||||
br := []*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 10}}
|
||||
res := merge(&discosrv.DatabaseRecord{Addresses: ar}, &discosrv.DatabaseRecord{Addresses: br})
|
||||
if len(res.Addresses) != 2 {
|
||||
b.Fatal("wrong length")
|
||||
}
|
||||
if res.Addresses[0].Address != "a" || res.Addresses[1].Address != "b" {
|
||||
b.Fatal("wrong address")
|
||||
}
|
||||
if res.Addresses[0].Expires != 15 || res.Addresses[1].Expires != 15 {
|
||||
b.Fatal("wrong expiry")
|
||||
}
|
||||
}
|
||||
b.ReportAllocs() // should be zero per operation
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
@@ -9,23 +9,26 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,17 +42,12 @@ const (
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
// Retry for not found is notFoundRetrySeenSeconds for records we have
|
||||
// seen an announcement for (but it's not active right now) and
|
||||
// notFoundRetryUnknownSeconds for records we have never seen (or not
|
||||
// seen within the last week).
|
||||
notFoundRetryUnknownMinSeconds = 60
|
||||
notFoundRetryUnknownMaxSeconds = 3600
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
@@ -59,184 +57,123 @@ const (
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
var debug = false
|
||||
|
||||
type CLI struct {
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
DesiredNotFoundRate float64 `group:"Listen" help:"Desired maximum rate of not-found replies (/s)" default:"1000"`
|
||||
|
||||
DBDir string `group:"Database" help:"Database directory" default:"." env:"DISCOVERY_DB_DIR"`
|
||||
DBFlushInterval time.Duration `group:"Database" help:"Interval between database flushes" default:"5m" env:"DISCOVERY_DB_FLUSH_INTERVAL"`
|
||||
|
||||
DBS3Endpoint string `name:"db-s3-endpoint" group:"Database (S3 backup)" hidden:"true" help:"S3 endpoint for database" env:"DISCOVERY_DB_S3_ENDPOINT"`
|
||||
DBS3Region string `name:"db-s3-region" group:"Database (S3 backup)" hidden:"true" help:"S3 region for database" env:"DISCOVERY_DB_S3_REGION"`
|
||||
DBS3Bucket string `name:"db-s3-bucket" group:"Database (S3 backup)" hidden:"true" help:"S3 bucket for database" env:"DISCOVERY_DB_S3_BUCKET"`
|
||||
DBS3AccessKeyID string `name:"db-s3-access-key-id" group:"Database (S3 backup)" hidden:"true" help:"S3 access key ID for database" env:"DISCOVERY_DB_S3_ACCESS_KEY_ID"`
|
||||
DBS3SecretKey string `name:"db-s3-secret-key" group:"Database (S3 backup)" hidden:"true" help:"S3 secret key for database" env:"DISCOVERY_DB_S3_SECRET_KEY"`
|
||||
|
||||
DBAzureBlobAccount string `name:"db-azure-blob-account" env:"DISCOVERY_DB_AZUREBLOB_ACCOUNT"`
|
||||
DBAzureBlobKey string `name:"db-azure-blob-key" env:"DISCOVERY_DB_AZUREBLOB_KEY"`
|
||||
DBAzureBlobContainer string `name:"db-azure-blob-container" env:"DISCOVERY_DB_AZUREBLOB_CONTAINER"`
|
||||
|
||||
AMQPAddress string `group:"AMQP replication" hidden:"true" help:"Address to AMQP broker" env:"DISCOVERY_AMQP_ADDRESS"`
|
||||
|
||||
Debug bool `short:"d" help:"Print debug output" env:"DISCOVERY_DEBUG"`
|
||||
Version bool `short:"v" help:"Print version and exit"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var replCertFile string
|
||||
var replKeyFile string
|
||||
var useHTTP bool
|
||||
var largeDB bool
|
||||
var amqpAddress string
|
||||
missesIncrease := 1
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
|
||||
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
|
||||
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
|
||||
flag.StringVar(&amqpAddress, "amqp-address", "", "Address to AMQP broker")
|
||||
flag.IntVar(&missesIncrease, "misses-increase", 1, "How many times to increase the misses counter on each miss")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
var cli CLI
|
||||
kong.Parse(&cli)
|
||||
debug = cli.Debug
|
||||
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if *showVersion {
|
||||
if cli.Version {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo.WithLabelValues(build.Version, runtime.Version(), build.User, build.Date.UTC().Format("2006-01-02T15:04:05Z")).Set(1)
|
||||
|
||||
if largeDB {
|
||||
levelDBOptions.BlockCacheCapacity = 64 << 20
|
||||
levelDBOptions.BlockSize = 64 << 10
|
||||
levelDBOptions.CompactionTableSize = 16 << 20
|
||||
levelDBOptions.CompactionTableSizeMultiplier = 2.0
|
||||
levelDBOptions.WriteBuffer = 64 << 20
|
||||
levelDBOptions.CompactionL0Trigger = 8
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
replCert := cert
|
||||
if replCertFile != "" && replKeyFile != "" {
|
||||
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load replication keypair:", err)
|
||||
}
|
||||
}
|
||||
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
|
||||
log.Println("Replication device ID is", replDevID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
var cert tls.Certificate
|
||||
if !cli.HTTP {
|
||||
var err error
|
||||
cert, err = tls.LoadX509KeyPair(cli.Cert, cli.Key)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(cli.Cert, cli.Key, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
Timeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
// Start the database.
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("Open database:", err)
|
||||
// If configured, use blob storage for database backups.
|
||||
var blobs blob.Store
|
||||
var err error
|
||||
if cli.DBS3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
} else if cli.DBAzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.DBAzureBlobAccount, cli.DBAzureBlobKey, cli.DBAzureBlobContainer)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create blob store: %v", err)
|
||||
}
|
||||
|
||||
// Start the database.
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, blobs)
|
||||
main.Add(db)
|
||||
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, replCert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
}
|
||||
|
||||
// If we have an AMQP broker, start that
|
||||
if amqpAddress != "" {
|
||||
// If we have an AMQP broker for replication, start that
|
||||
var repl replicator
|
||||
if cli.AMQPAddress != "" {
|
||||
clientID := rand.String(10)
|
||||
kr := newAMQPReplicator(amqpAddress, clientID, db)
|
||||
repl = append(repl, kr)
|
||||
kr := newAMQPReplicator(cli.AMQPAddress, clientID, db)
|
||||
main.Add(kr)
|
||||
repl = kr
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
for _, r := range repl {
|
||||
r.send("<heartbeat>", nil, time.Now().UnixNano())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP, missesIncrease)
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression, cli.DesiredNotFoundRate)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if metricsListen != "" {
|
||||
if cli.MetricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
log.Fatal(http.ListenAndServe(cli.MetricsListen, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Cancel on signal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
go func() {
|
||||
sig := <-signalChan
|
||||
log.Printf("Received signal %s; shutting down", sig)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Engage!
|
||||
main.Serve(context.Background())
|
||||
main.Serve(ctx)
|
||||
}
|
||||
|
||||
@@ -1,325 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
replicationReadTimeout = time.Minute
|
||||
replicationWriteTimeout = 30 * time.Second
|
||||
replicationHeartbeatInterval = time.Second * 30
|
||||
)
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// The replication stream is not especially latency sensitive, but it is
|
||||
// quite a lot of data in small writes. Make it more efficient.
|
||||
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
|
||||
_ = tcpc.SetNoDelay(false)
|
||||
}
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -96,13 +96,28 @@ var (
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
|
||||
retryAfterHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
Buckets: prometheus.ExponentialBuckets(60, 2, 7), // 60, 120, 240, 480, 960, 1920, 3840
|
||||
})
|
||||
databaseWriteSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_write_seconds",
|
||||
Help: "Time spent writing the database.",
|
||||
})
|
||||
databaseLastWritten = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_last_written",
|
||||
Help: "Timestamp of the last successful database write.",
|
||||
})
|
||||
|
||||
retryAfterLevel = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -123,5 +138,6 @@ func init() {
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds,
|
||||
retryAfterHistogram)
|
||||
databaseWriteSeconds, databaseLastWritten,
|
||||
retryAfterLevel)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -185,7 +184,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
ses := newSession(requestedPeer, id, sessionLimitBps, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -30,7 +32,6 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,7 +51,6 @@ var (
|
||||
globalLimitBps int
|
||||
overLimit atomic.Bool
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
@@ -227,9 +227,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
bytesProxied atomic.Int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionLimitBps int, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
@@ -40,12 +40,17 @@ func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit,
|
||||
return nil
|
||||
}
|
||||
|
||||
var sessionRateLimit *rate.Limiter
|
||||
if sessionLimitBps > 0 {
|
||||
sessionRateLimit = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
limiter: sessionRateLimit,
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
@@ -68,7 +73,6 @@ func findSession(key string) *session {
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
@@ -110,6 +114,7 @@ type session struct {
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int)
|
||||
limiter *rate.Limiter
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
MetricsListen string `default:":8081" help:"Listen address for metrics"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
if params.MetricsListen != "" {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
log.Println("Listening for metrics on", params.MetricsListen)
|
||||
if err := http.ListenAndServe(params.MetricsListen, mux); err != nil {
|
||||
log.Fatalf("Failed to start metrics server: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
log.Println("Forwarding", path, "to", url)
|
||||
mux.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: params.Listen,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
return srv.ListenAndServe()
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
rels := upgrade.FetchLatestReleases(p.url, "")
|
||||
if rels == nil {
|
||||
http.Error(w, "no releases", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = json.NewEncoder(buf).Encode(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -33,19 +37,19 @@ func indexDump() error {
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
fmt.Printf(" V:%v\n", &f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
var flv dbproto.VersionList
|
||||
proto.Unmarshal(it.Value(), &flv)
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, &flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
@@ -94,11 +98,11 @@ func indexDump() error {
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d", folder)
|
||||
var cs db.CountsSet
|
||||
if err := cs.Unmarshal(it.Value()); err != nil {
|
||||
var cs dbproto.CountsSet
|
||||
if err := proto.Unmarshal(it.Value(), &cs); err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", cs)
|
||||
fmt.Printf(" V:%v\n", &cs)
|
||||
}
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
@@ -125,20 +129,20 @@ func indexDump() error {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x", key[1:])
|
||||
var v protocol.Vector
|
||||
err := v.Unmarshal(it.Value())
|
||||
var v bep.Vector
|
||||
err := proto.Unmarshal(it.Value(), &v)
|
||||
if err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", v)
|
||||
fmt.Printf(" V:%v\n", &v)
|
||||
}
|
||||
|
||||
case db.KeyTypePendingFolder:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := string(key[5:])
|
||||
var of db.ObservedFolder
|
||||
of.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, of)
|
||||
var of dbproto.ObservedFolder
|
||||
proto.Unmarshal(it.Value(), &of)
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, &of)
|
||||
|
||||
case db.KeyTypePendingDevice:
|
||||
device := "<invalid>"
|
||||
@@ -146,9 +150,9 @@ func indexDump() error {
|
||||
if err == nil {
|
||||
device = dev.String()
|
||||
}
|
||||
var od db.ObservedDevice
|
||||
od.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, od)
|
||||
var od dbproto.ObservedDevice
|
||||
proto.Unmarshal(it.Value(), &od)
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, &od)
|
||||
|
||||
default:
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
|
||||
@@ -7,9 +7,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
@@ -77,8 +78,8 @@ func indexDumpSize() error {
|
||||
elems = append(elems, ele)
|
||||
}
|
||||
|
||||
sort.Slice(elems, func(i, j int) bool {
|
||||
return elems[i].size > elems[j].size
|
||||
slices.SortFunc(elems, func(a, b sizedElement) int {
|
||||
return cmp.Compare(b.size, a.size)
|
||||
})
|
||||
for _, ele := range elems {
|
||||
fmt.Println(ele.key, ele.size)
|
||||
|
||||
@@ -8,11 +8,16 @@ package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -42,12 +47,12 @@ func indexCheck() (err error) {
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
fileInfos := make(map[fileInfoKey]protocol.FileInfo)
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
fileInfos := make(map[fileInfoKey]*bep.FileInfo)
|
||||
globals := make(map[globalKey]*dbproto.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]protocol.Vector)
|
||||
versions := make(map[string]*bep.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
@@ -74,26 +79,26 @@ func indexCheck() (err error) {
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
if err != nil {
|
||||
fmt.Println("Unable to unmarshal FileInfo:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfos[fileInfoKey{folder, device, name}] = f
|
||||
fileInfos[fileInfoKey{folder, device, name}] = &f
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
if err := flv.Unmarshal(it.Value()); err != nil {
|
||||
var flv dbproto.VersionList
|
||||
if err := proto.Unmarshal(it.Value(), &flv); err != nil {
|
||||
fmt.Println("Unable to unmarshal VersionList:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
globals[globalKey{folder, name}] = flv
|
||||
globals[globalKey{folder, name}] = &flv
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
@@ -124,13 +129,13 @@ func indexCheck() (err error) {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
var v bep.Vector
|
||||
if err := proto.Unmarshal(it.Value(), &v); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = v
|
||||
versions[hash] = &v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,11 +208,11 @@ func indexCheck() (err error) {
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
|
||||
sort.Slice(missingSeq, func(a, b int) bool {
|
||||
if missingSeq[a].folder != missingSeq[b].folder {
|
||||
return missingSeq[a].folder < missingSeq[b].folder
|
||||
slices.SortFunc(missingSeq, func(a, b sequenceKey) int {
|
||||
if a.folder != b.folder {
|
||||
return cmp.Compare(a.folder, b.folder)
|
||||
}
|
||||
return missingSeq[a].sequence < missingSeq[b].sequence
|
||||
return cmp.Compare(a.sequence, b.sequence)
|
||||
})
|
||||
|
||||
var folder uint32
|
||||
@@ -248,25 +253,27 @@ func indexCheck() (err error) {
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !fiv.Equal(version) {
|
||||
if !protocol.VectorFromWire(fiv).Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
ffi := protocol.FileInfoFromDB(fi)
|
||||
if ffi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, ffi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
if ffi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, ffi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.RawVersions {
|
||||
for i, fv := range vl.Versions {
|
||||
ver := protocol.VectorFromWire(fv.Version)
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
checkGlobal(i, device, ver, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
checkGlobal(i, device, ver, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,10 +283,10 @@ func indexCheck() (err error) {
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
fv, _ := vlGetGlobal(vl)
|
||||
devB, _ := fvFirstDevice(fv)
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
fi := protocol.FileInfoFromDB(fileInfos[fileInfoKey{gk.folder, dev, gk.name}])
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
}
|
||||
@@ -345,11 +352,84 @@ func indexCheck() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
gfv, gok := vl.GetGlobal()
|
||||
func needsLocally(vl *dbproto.VersionList) bool {
|
||||
gfv, gok := vlGetGlobal(vl)
|
||||
if !gok { // That's weird, but we hardly need something non-existent
|
||||
return false
|
||||
}
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, fv.Version)
|
||||
fv, ok := vlGet(vl, protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, protocol.VectorFromWire(fv.Version))
|
||||
}
|
||||
|
||||
// Get returns a FileVersion that contains the given device and whether it has
|
||||
// been found at all.
|
||||
func vlGet(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool) {
|
||||
_, i, _, ok := vlFindDevice(vl, device)
|
||||
if !ok {
|
||||
return &dbproto.FileVersion{}, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// GetGlobal returns the current global FileVersion. The returned FileVersion
|
||||
// may be invalid, if all FileVersions are invalid. Returns false only if
|
||||
// VersionList is empty.
|
||||
func vlGetGlobal(vl *dbproto.VersionList) (*dbproto.FileVersion, bool) {
|
||||
i := vlFindGlobal(vl)
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// findGlobal returns the first version that isn't invalid, or if all versions are
|
||||
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
|
||||
func vlFindGlobal(vl *dbproto.VersionList) int {
|
||||
for i := range vl.Versions {
|
||||
if !fvIsInvalid(vl.Versions[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// findDevice returns whether the device is in InvalidVersions or Versions and
|
||||
// in InvalidDevices or Devices (true for invalid), the positions in the version
|
||||
// and device slices and whether it has been found at all.
|
||||
func vlFindDevice(vl *dbproto.VersionList, device []byte) (bool, int, int, bool) {
|
||||
for i, v := range vl.Versions {
|
||||
if j := deviceIndex(v.Devices, device); j != -1 {
|
||||
return false, i, j, true
|
||||
}
|
||||
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
|
||||
return true, i, j, true
|
||||
}
|
||||
}
|
||||
return false, -1, -1, false
|
||||
}
|
||||
|
||||
func deviceIndex(devices [][]byte, device []byte) int {
|
||||
for i, dev := range devices {
|
||||
if bytes.Equal(device, dev) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func fvFirstDevice(fv *dbproto.FileVersion) ([]byte, bool) {
|
||||
if len(fv.Devices) != 0 {
|
||||
return fv.Devices[0], true
|
||||
}
|
||||
if len(fv.InvalidDevices) != 0 {
|
||||
return fv.InvalidDevices[0], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func fvIsInvalid(fv *dbproto.FileVersion) bool {
|
||||
return fv == nil || len(fv.Devices) == 0
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -67,7 +67,7 @@ func (*stdinCommand) Run() error {
|
||||
fmt.Println("Reading commands from stdin...", args)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
input, err := shellquote.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing input: %w", err)
|
||||
}
|
||||
|
||||
@@ -9,15 +9,14 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,7 +37,9 @@ func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
slices.SortFunc(files, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, ".reported.") {
|
||||
// We've already sent this file. It'll be cleaned out at some
|
||||
|
||||
@@ -10,6 +10,4 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
)
|
||||
var l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
@@ -280,10 +283,11 @@ func loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
var encFi bep.FileInfo
|
||||
if err := proto.Unmarshal(trailer, &encFi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi := protocol.FileInfoFromWire(&encFi)
|
||||
|
||||
return &encFi, nil
|
||||
return &fi, nil
|
||||
}
|
||||
|
||||
@@ -23,14 +23,14 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"github.com/willabides/kongplete"
|
||||
|
||||
@@ -38,10 +38,10 @@ import (
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/generate"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
@@ -92,11 +92,6 @@ above.
|
||||
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
STVERSIONEXTRA Add extra information to the version string in logs and the
|
||||
version line in the GUI. Can be set to the name of a wrapper
|
||||
or tool controlling syncthing to communicate this to the end
|
||||
@@ -354,10 +349,12 @@ func (options serveOptions) Run() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure that our home directory exists.
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0o700); err != nil {
|
||||
l.Warnln("Failure on home directory:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
// Ensure that our config and data directories exist.
|
||||
for _, loc := range []locations.BaseDirEnum{locations.ConfigBaseDir, locations.DataBaseDir} {
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(loc), 0o700); err != nil {
|
||||
l.Warnln("Failed to ensure directory exists:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
}
|
||||
|
||||
if options.UpgradeTo != "" {
|
||||
@@ -381,15 +378,18 @@ func (options serveOptions) Run() error {
|
||||
if options.Upgrade {
|
||||
release, err := checkUpgrade()
|
||||
if err == nil {
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
var ldb backend.Backend
|
||||
ldb, err = syncthing.OpenDBBackend(locations.Get(locations.Database), config.TuningAuto)
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
os.Exit(1)
|
||||
} else if locked {
|
||||
err = upgradeViaRest()
|
||||
} else {
|
||||
_ = ldb.Close()
|
||||
err = upgrade.To(release)
|
||||
}
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
@@ -443,7 +443,7 @@ func debugFacilities() string {
|
||||
maxLen = len(name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
|
||||
// Format the choices
|
||||
b := new(bytes.Buffer)
|
||||
@@ -549,6 +549,17 @@ func syncthingMain(options serveOptions) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Ensure we are the only running instance
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
if err != nil {
|
||||
l.Warnln("Failed to acquire lock:", err)
|
||||
os.Exit(1)
|
||||
} else if !locked {
|
||||
l.Warnln("Failed to acquire lock: is another Syncthing instance already running?")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -567,6 +578,7 @@ func syncthingMain(options serveOptions) {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
earlyService.Add(cfgWrapper)
|
||||
config.RegisterInfoMetrics(cfgWrapper)
|
||||
|
||||
// Candidate builds should auto upgrade. Make sure the option is set,
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
@@ -629,9 +641,21 @@ func syncthingMain(options serveOptions) {
|
||||
DBRecheckInterval: options.DebugDBRecheckInterval,
|
||||
DBIndirectGCInterval: options.DebugDBIndirectGCInterval,
|
||||
}
|
||||
if options.Audit {
|
||||
appOpts.AuditWriter = auditWriter(options.AuditFile)
|
||||
|
||||
if options.Audit || cfgWrapper.Options().AuditEnabled {
|
||||
l.Infoln("Auditing is enabled.")
|
||||
|
||||
auditFile := cfgWrapper.Options().AuditFile
|
||||
|
||||
// Ignore config option if command-line option is set
|
||||
if options.AuditFile != "" {
|
||||
l.Debugln("Using the audit file from the command-line parameter.")
|
||||
auditFile = options.AuditFile
|
||||
}
|
||||
|
||||
appOpts.AuditWriter = auditWriter(auditFile)
|
||||
}
|
||||
|
||||
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
|
||||
appOpts.DBRecheckInterval = dur
|
||||
}
|
||||
@@ -685,6 +709,10 @@ func syncthingMain(options serveOptions) {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
// Best effort remove lockfile, doesn't matter if it succeeds
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
|
||||
os.Exit(int(status))
|
||||
}
|
||||
|
||||
@@ -835,6 +863,10 @@ func initialAutoUpgradeCheck(misc *db.NamespacedKV) (upgrade.Release, error) {
|
||||
if err != nil {
|
||||
return upgrade.Release{}, err
|
||||
}
|
||||
if upgrade.CompareVersions(release.Tag, build.Version) == upgrade.MajorNewer {
|
||||
return upgrade.Release{}, errors.New("higher major version")
|
||||
}
|
||||
|
||||
if lastVersion, ok, err := misc.String(upgradeVersionKey); err == nil && ok && lastVersion == release.Tag {
|
||||
// Only check time if we try to upgrade to the same release.
|
||||
if lastTime, ok, err := misc.Time(upgradeTimeKey); err == nil && ok && time.Since(lastTime) < upgradeRetryInterval {
|
||||
|
||||
@@ -64,7 +64,7 @@ func monitorMain(options serveOptions) {
|
||||
fileDst, err = open(logFile)
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Failed to setup logging to file, proceeding with logging to stdout only:", err)
|
||||
l.Warnln("Failed to set up logging to file, proceeding with logging to stdout only:", err)
|
||||
} else {
|
||||
if build.IsWindows {
|
||||
// Translate line breaks to Windows standard
|
||||
@@ -238,35 +238,18 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
return
|
||||
}
|
||||
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.Contains(line, "SIGILL") {
|
||||
l.Warnln(`
|
||||
*******************************************************************************
|
||||
* Crash due to illegal instruction detected. This is most likely due to a CPU *
|
||||
* incompatibility with the high performance hashing package. Switching to the *
|
||||
* standard hashing package instead. Please report this issue at: *
|
||||
* *
|
||||
* https://github.com/syncthing/syncthing/issues *
|
||||
* *
|
||||
* Include the details of your CPU. *
|
||||
*******************************************************************************
|
||||
`)
|
||||
os.Setenv("STHASHING", "standard")
|
||||
return
|
||||
if panicFd == nil && (strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:")) {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
*********************************************************************************
|
||||
* Crash due to corrupt database. *
|
||||
* *
|
||||
@@ -279,22 +262,21 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
*********************************************************************************
|
||||
`)
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
|
||||
panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
|
||||
}
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ func checkNotExist(t *testing.T, name string) {
|
||||
func TestAutoClosedFile(t *testing.T) {
|
||||
os.RemoveAll("_autoclose")
|
||||
defer os.RemoveAll("_autoclose")
|
||||
os.Mkdir("_autoclose", 0755)
|
||||
os.Mkdir("_autoclose", 0o755)
|
||||
file := filepath.FromSlash("_autoclose/tmp")
|
||||
data := []byte("hello, world\n")
|
||||
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package aggregate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
DBConn string `env:"UR_DB_URL" default:"postgres://user:password@localhost/ur?sslmode=disable"`
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
log.SetFlags(log.Ltime | log.Ldate)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
db, err := sql.Open("postgres", cli.DBConn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
err = setupDB(db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
runAggregation(db)
|
||||
// Sleep until one minute past next midnight
|
||||
sleepUntilNext(24*time.Hour, 1*time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func runAggregation(db *sql.DB) {
|
||||
since := maxIndexedDay(db, "VersionSummary")
|
||||
log.Println("Aggregating VersionSummary data since", since)
|
||||
rows, err := aggregateVersionSummary(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "Performance")
|
||||
log.Println("Aggregating Performance data since", since)
|
||||
rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "BlockStats")
|
||||
log.Println("Aggregating BlockStats data since", since)
|
||||
rows, err = aggregateBlockStats(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
}
|
||||
|
||||
func sleepUntilNext(intv, margin time.Duration) {
|
||||
now := time.Now().UTC()
|
||||
next := now.Truncate(intv).Add(intv).Add(margin)
|
||||
log.Println("Sleeping until", next)
|
||||
time.Sleep(next.Sub(now))
|
||||
}
|
||||
|
||||
func setupDB(db *sql.DB) error {
|
||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS VersionSummary (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Version VARCHAR(8) NOT NULL,
|
||||
Count INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
TotFiles INTEGER NOT NULL,
|
||||
TotMiB INTEGER NOT NULL,
|
||||
SHA256Perf DOUBLE PRECISION NOT NULL,
|
||||
MemorySize INTEGER NOT NULL,
|
||||
MemoryUsageMiB INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Reports INTEGER NOT NULL,
|
||||
Total BIGINT NOT NULL,
|
||||
Renamed BIGINT NOT NULL,
|
||||
Reused BIGINT NOT NULL,
|
||||
Pulled BIGINT NOT NULL,
|
||||
CopyOrigin BIGINT NOT NULL,
|
||||
CopyOriginShifted BIGINT NOT NULL,
|
||||
CopyElsewhere BIGINT NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var t string
|
||||
|
||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
||||
var t time.Time
|
||||
row := db.QueryRow("SELECT MAX(DATE_TRUNC('day', Day)) FROM " + table)
|
||||
err := row.Scan(&t)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO VersionSummary (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
SUBSTRING(Report->>'version' FROM '^v\d.\d+') AS Ver,
|
||||
COUNT(*) AS Count
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day, Ver
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO Performance (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
AVG((Report->>'totFiles')::numeric) As TotFiles,
|
||||
AVG((Report->>'totMiB')::numeric) As TotMiB,
|
||||
AVG((Report->>'sha256Perf')::numeric) As SHA256Perf,
|
||||
AVG((Report->>'memorySize')::numeric) As MemorySize,
|
||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
/* Some custom implementation reported bytes when we expect megabytes, cap at petabyte */
|
||||
AND (Report->>'memorySize')::numeric < 1073741824
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
||||
// Filter out anything prior 0.14.41 as that has sum aggregations which
|
||||
// made no sense.
|
||||
res, err := db.Exec(`INSERT INTO BlockStats (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
COUNT(1) As Reports,
|
||||
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND (Report->>'urVersion')::numeric >= 3
|
||||
AND Report->>'version' like 'v_.%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.39%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.38%'
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type analytic struct {
|
||||
Key string
|
||||
Count int
|
||||
Percentage float64
|
||||
Items []analytic `json:",omitempty"`
|
||||
}
|
||||
|
||||
type analyticList []analytic
|
||||
|
||||
func (l analyticList) Less(a, b int) bool {
|
||||
if l[a].Key == "Others" {
|
||||
return false
|
||||
}
|
||||
if l[b].Key == "Others" {
|
||||
return true
|
||||
}
|
||||
return l[b].Count < l[a].Count // inverse
|
||||
}
|
||||
|
||||
func (l analyticList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l analyticList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
// Returns a list of frequency analytics for a given list of strings.
|
||||
func analyticsFor(ss []string, cutoff int) []analytic {
|
||||
m := make(map[string]int)
|
||||
t := 0
|
||||
for _, s := range ss {
|
||||
m[s]++
|
||||
t++
|
||||
}
|
||||
|
||||
l := make([]analytic, 0, len(m))
|
||||
for k, c := range m {
|
||||
l = append(l, analytic{
|
||||
Key: k,
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(l))
|
||||
|
||||
if cutoff > 0 && len(l) > cutoff {
|
||||
c := 0
|
||||
for _, i := range l[cutoff:] {
|
||||
c += i.Count
|
||||
}
|
||||
l = append(l[:cutoff], analytic{
|
||||
Key: "Others",
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Find the points at which certain penetration levels are met
|
||||
func penetrationLevels(as []analytic, points []float64) []analytic {
|
||||
sort.Slice(as, func(a, b int) bool {
|
||||
return versionLess(as[b].Key, as[a].Key)
|
||||
})
|
||||
|
||||
var res []analytic
|
||||
|
||||
idx := 0
|
||||
sum := 0.0
|
||||
for _, a := range as {
|
||||
sum += a.Percentage
|
||||
if sum >= points[idx] {
|
||||
a.Count = int(points[idx])
|
||||
a.Percentage = sum
|
||||
res = append(res, a)
|
||||
idx++
|
||||
if idx == len(points) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInts(data []int) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Ints(data)
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInt64s(data []int64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(data, func(a, b int) bool {
|
||||
return data[a] < data[b]
|
||||
})
|
||||
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForFloats(data []float64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Float64s(data)
|
||||
res[0] = data[int(float64(len(data))*0.05)]
|
||||
res[1] = data[len(data)/2]
|
||||
res[2] = data[int(float64(len(data))*0.95)]
|
||||
res[3] = data[len(data)-1]
|
||||
return res
|
||||
}
|
||||
|
||||
func group(by func(string) string, as []analytic, perGroup int, otherPct float64) []analytic {
|
||||
var res []analytic
|
||||
|
||||
next:
|
||||
for _, a := range as {
|
||||
group := by(a.Key)
|
||||
for i := range res {
|
||||
if res[i].Key == group {
|
||||
res[i].Count += a.Count
|
||||
res[i].Percentage += a.Percentage
|
||||
if len(res[i].Items) < perGroup {
|
||||
res[i].Items = append(res[i].Items, a)
|
||||
}
|
||||
continue next
|
||||
}
|
||||
}
|
||||
res = append(res, analytic{
|
||||
Key: group,
|
||||
Count: a.Count,
|
||||
Percentage: a.Percentage,
|
||||
Items: []analytic{a},
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(res))
|
||||
|
||||
if otherPct > 0 {
|
||||
// Groups with less than otherPCt go into "Other"
|
||||
other := analytic{
|
||||
Key: "Other",
|
||||
}
|
||||
for i := 0; i < len(res); i++ {
|
||||
if res[i].Percentage < otherPct || res[i].Key == "Other" {
|
||||
other.Count += res[i].Count
|
||||
other.Percentage += res[i].Percentage
|
||||
res = append(res[:i], res[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if other.Count > 0 {
|
||||
res = append(res, other)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func byVersion(s string) string {
|
||||
parts := strings.Split(s, ".")
|
||||
if len(parts) >= 2 {
|
||||
return strings.Join(parts[:2], ".")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func byPlatform(s string) string {
|
||||
parts := strings.Split(s, "-")
|
||||
if len(parts) >= 2 {
|
||||
return parts[0]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var numericGoVersion = regexp.MustCompile(`^go[0-9]\.[0-9]+`)
|
||||
|
||||
func byCompiler(s string) string {
|
||||
if m := numericGoVersion.FindString(s); m != "" {
|
||||
return m
|
||||
}
|
||||
return "Other"
|
||||
}
|
||||
|
||||
func versionLess(a, b string) bool {
|
||||
arel, apre := versionParts(a)
|
||||
brel, bpre := versionParts(b)
|
||||
|
||||
minlen := len(arel)
|
||||
if l := len(brel); l < minlen {
|
||||
minlen = l
|
||||
}
|
||||
|
||||
for i := 0; i < minlen; i++ {
|
||||
if arel[i] != brel[i] {
|
||||
return arel[i] < brel[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Longer version is newer, when the preceding parts are equal
|
||||
if len(arel) != len(brel) {
|
||||
return len(arel) < len(brel)
|
||||
}
|
||||
|
||||
if apre != bpre {
|
||||
// "(+dev)" versions are ahead
|
||||
if apre == plusStr {
|
||||
return false
|
||||
}
|
||||
if bpre == plusStr {
|
||||
return true
|
||||
}
|
||||
return apre < bpre
|
||||
}
|
||||
|
||||
// don't actually care how the prerelease stuff compares for our purposes
|
||||
return false
|
||||
}
|
||||
|
||||
// Split a version as returned from transformVersion into parts.
|
||||
// "1.2.3-beta.2" -> []int{1, 2, 3}, "beta.2"}
|
||||
func versionParts(v string) ([]int, string) {
|
||||
parts := strings.SplitN(v[1:], " ", 2) // " (+dev)" versions
|
||||
if len(parts) == 1 {
|
||||
parts = strings.SplitN(parts[0], "-", 2) // "-rc.1" type versions
|
||||
}
|
||||
fields := strings.Split(parts[0], ".")
|
||||
|
||||
release := make([]int, len(fields))
|
||||
for i, s := range fields {
|
||||
v, _ := strconv.Atoi(s)
|
||||
release[i] = v
|
||||
}
|
||||
|
||||
var prerelease string
|
||||
if len(parts) > 1 {
|
||||
prerelease = parts[1]
|
||||
}
|
||||
|
||||
return release, prerelease
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NumberType int
|
||||
|
||||
const (
|
||||
NumberMetric NumberType = iota
|
||||
NumberBinary
|
||||
NumberDuration
|
||||
)
|
||||
|
||||
func number(ntype NumberType, v float64) string {
|
||||
switch ntype {
|
||||
case NumberMetric:
|
||||
return metric(v)
|
||||
case NumberDuration:
|
||||
return duration(v)
|
||||
case NumberBinary:
|
||||
return binary(v)
|
||||
default:
|
||||
return metric(v)
|
||||
}
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
Suffix string
|
||||
Multiplier float64
|
||||
}
|
||||
|
||||
var metricSuffixes = []suffix{
|
||||
{"G", 1e9},
|
||||
{"M", 1e6},
|
||||
{"k", 1e3},
|
||||
}
|
||||
|
||||
var binarySuffixes = []suffix{
|
||||
{"Gi", 1 << 30},
|
||||
{"Mi", 1 << 20},
|
||||
{"Ki", 1 << 10},
|
||||
}
|
||||
|
||||
var durationSuffix = []suffix{
|
||||
{"year", 365 * 24 * 60 * 60},
|
||||
{"month", 30 * 24 * 60 * 60},
|
||||
{"day", 24 * 60 * 60},
|
||||
{"hour", 60 * 60},
|
||||
{"minute", 60},
|
||||
{"second", 1},
|
||||
}
|
||||
|
||||
func metric(v float64) string {
|
||||
return withSuffix(v, metricSuffixes, false)
|
||||
}
|
||||
|
||||
func binary(v float64) string {
|
||||
return withSuffix(v, binarySuffixes, false)
|
||||
}
|
||||
|
||||
func duration(v float64) string {
|
||||
return withSuffix(v, durationSuffix, true)
|
||||
}
|
||||
|
||||
func withSuffix(v float64, ps []suffix, pluralize bool) string {
|
||||
for _, p := range ps {
|
||||
if v >= p.Multiplier {
|
||||
suffix := p.Suffix
|
||||
if pluralize && v/p.Multiplier != 1.0 {
|
||||
suffix += "s"
|
||||
}
|
||||
// If the number only has decimal zeroes, strip em off.
|
||||
num := strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v/p.Multiplier), "0"), ".")
|
||||
return fmt.Sprintf("%s %s", num, suffix)
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v), "0"), ".")
|
||||
}
|
||||
|
||||
// commatize returns a number with sep as thousands separators. Handles
|
||||
// integers and plain floats.
|
||||
func commatize(sep, s string) string {
|
||||
// If no dot, don't do anything.
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
fs := strings.SplitN(s, ".", 2)
|
||||
|
||||
l := len(fs[0])
|
||||
for i := range fs[0] {
|
||||
b.Write([]byte{s[i]})
|
||||
if i < l-1 && (l-i)%3 == 1 {
|
||||
b.WriteString(sep)
|
||||
}
|
||||
}
|
||||
|
||||
if len(fs) > 1 && len(fs[1]) > 0 {
|
||||
b.WriteString(".")
|
||||
b.WriteString(fs[1])
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func proportion(m map[string]int, count int) float64 {
|
||||
total := 0
|
||||
isMax := true
|
||||
for _, n := range m {
|
||||
total += n
|
||||
if n > count {
|
||||
isMax = false
|
||||
}
|
||||
}
|
||||
pct := (100 * float64(count)) / float64(total)
|
||||
// To avoid rounding errors in the template, surpassing 100% and breaking
|
||||
// the progress bars.
|
||||
if isMax && len(m) > 1 && count != total {
|
||||
pct -= 0.01
|
||||
}
|
||||
return pct
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv",
|
||||
Name: "reports_total",
|
||||
}, []string{"version"})
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("duplicate")
|
||||
metricReportsTotal.WithLabelValues("v1")
|
||||
metricReportsTotal.WithLabelValues("v2")
|
||||
metricReportsTotal.WithLabelValues("v3")
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 4.8 KiB |
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
Before Width: | Height: | Size: 61 KiB |
Binary file not shown.
Binary file not shown.
@@ -1,623 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<!--
|
||||
Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
Use of this source code is governed by an MIT-style license that can be
|
||||
found in the LICENSE file.
|
||||
-->
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
<link rel="shortcut icon" href="static/assets/img/favicon.png">
|
||||
|
||||
<title>Syncthing Usage Reports</title>
|
||||
<link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
|
||||
<script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/heatmapjs@2.0.2/heatmap.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/leaflet-heatmap@1.0.0/leaflet-heatmap.js"></script>
|
||||
|
||||
<style type="text/css">
|
||||
body {
|
||||
margin: 40px;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
}
|
||||
tr.main td {
|
||||
font-weight: bold;
|
||||
}
|
||||
tr.child td.first {
|
||||
padding-left: 2em;
|
||||
}
|
||||
.progress-bar {
|
||||
overflow:hidden;
|
||||
white-space:nowrap;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript"
|
||||
src='https://www.google.com/jsapi?autoload={
|
||||
"modules":[{
|
||||
"name":"visualization",
|
||||
"version":"1",
|
||||
"packages":["corechart"]
|
||||
}]
|
||||
}'></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
google.setOnLoadCallback(drawVersionChart);
|
||||
google.setOnLoadCallback(drawBlockStatsChart);
|
||||
google.setOnLoadCallback(drawPerformanceCharts);
|
||||
|
||||
function drawVersionChart() {
|
||||
// Summary version chart for versions that at some point in the chart
|
||||
// reaches 250 devices. This filters out versions that are old and
|
||||
// uninteresting yet linger forever with like four users.
|
||||
var jsonData = $.ajax({url: "summary.json?min=250", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
isStacked: true,
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('versionChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function formatGibibytes(gibibytes, decimals) {
|
||||
if(gibibytes == 0) return '0 GiB';
|
||||
var k = 1024,
|
||||
dm = decimals || 2,
|
||||
sizes = ['GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
|
||||
i = Math.floor(Math.log(gibibytes) / Math.log(k));
|
||||
if (i < 0) {
|
||||
sizes = 'MiB';
|
||||
} else {
|
||||
sizes = sizes[i];
|
||||
}
|
||||
return parseFloat((gibibytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes;
|
||||
}
|
||||
|
||||
|
||||
function drawBlockStatsChart() {
|
||||
var jsonData = $.ajax({url: "blockstats.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
|
||||
var totals = [0, 0, 0, 0, 0, 0];
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
for (var j = 2; j < rows[i].length; j++) {
|
||||
totals[j-2] += rows[i][j];
|
||||
}
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var totalTotals = totals.reduce(function(a, b) { return a + b; }, 0);
|
||||
|
||||
if (totalTotals > 0) {
|
||||
var content = "<table class='table'>\n"
|
||||
for (var j = 2; j < rows[0].length; j++) {
|
||||
content += "<tr><td><b>" + rows[0][j].replace(' (GiB)', '') + "</b></td><td>" + formatGibibytes(totals[j-2].toFixed(2)) + " (" + ((100*totals[j-2])/totalTotals).toFixed(2) +"%)</td></tr>\n";
|
||||
}
|
||||
content += "</table>";
|
||||
document.getElementById("data-to-date").innerHTML = content;
|
||||
} else {
|
||||
// No data, hide it.
|
||||
document.getElementById("block-stats").outerHTML = "";
|
||||
return;
|
||||
}
|
||||
|
||||
var options = {
|
||||
focusTarget: 'category',
|
||||
vAxes: {0: {}, 1: {}},
|
||||
series: {0: {type: 'line', targetAxisIndex:1}},
|
||||
isStacked: true,
|
||||
legend: {position: 'none'},
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('blockStatsChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function drawPerformanceCharts() {
|
||||
var jsonData = $.ajax({url: "/performance.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
}
|
||||
|
||||
drawChart(rows, 1, 'Total Number of Files', 'totFilesChart', 1e6, 1);
|
||||
drawChart(rows, 2, 'Total Folder Size (GiB)', 'totMiBChart', 1e6, 1024);
|
||||
drawChart(rows, 3, 'Hash Performance (MiB/s)', 'hashPerfChart', 1000, 1);
|
||||
drawChart(rows, 4, 'System RAM Size (GiB)', 'memSizeChart', 1e6, 1024);
|
||||
drawChart(rows, 5, 'Memory Usage (MiB)', 'memUsageChart', 250, 1);
|
||||
}
|
||||
|
||||
function drawChart(rows, index, title, id, cutoff, divisor) {
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
data.addColumn('number', title);
|
||||
|
||||
var row;
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
row = [rows[i][0], rows[i][index] / divisor];
|
||||
if (row[1] > cutoff) {
|
||||
row[1] = null;
|
||||
}
|
||||
data.addRow(row);
|
||||
}
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
vAxes: {0: {minValue: 0}},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.LineChart(document.getElementById(id));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
var locations = [];
|
||||
{{range $location, $weight := .locations}}
|
||||
locations.push({lat:{{- $location.Latitude -}},lng:{{- $location.Longitude -}},count:Math.min(100, {{- $weight -}})});
|
||||
{{- end}}
|
||||
|
||||
function drawHeatMap() {
|
||||
if (locations.length == 0) {
|
||||
return;
|
||||
}
|
||||
var testData = {
|
||||
data: locations
|
||||
};
|
||||
|
||||
var baseLayer = L.tileLayer(
|
||||
'https://tile.openstreetmap.org/{z}/{x}/{y}.png',{
|
||||
attribution: '...',
|
||||
maxZoom: 18
|
||||
}
|
||||
);
|
||||
var cfg = {
|
||||
"radius": 1,
|
||||
"minOpacity": .25,
|
||||
"maxOpacity": .8,
|
||||
"scaleRadius": true,
|
||||
"useLocalExtrema": true,
|
||||
latField: 'lat',
|
||||
lngField: 'lng',
|
||||
valueField: 'count',
|
||||
gradient: {
|
||||
'.1': 'cyan',
|
||||
'.8': 'blue',
|
||||
'.95': 'red'
|
||||
}
|
||||
};
|
||||
var heatmapLayer = new HeatmapOverlay(cfg);
|
||||
|
||||
var map = new L.Map('map', {
|
||||
center: new L.LatLng(25, 0),
|
||||
zoom: 1,
|
||||
layers: [baseLayer, heatmapLayer]
|
||||
});
|
||||
heatmapLayer.setData(testData);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1>Syncthing Usage Data</h1>
|
||||
|
||||
<h4 id="active-users">Active Users per Day and Version</h4>
|
||||
<p>
|
||||
This is the total number of unique users with reporting enabled, per day. Area color represents the major version.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<div id="block-stats">
|
||||
<h4>Data Transfers per Day</h4>
|
||||
<p>
|
||||
This is total data transferred per day. Also shows how much data was saved (not transferred) by each of the methods syncthing uses.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="blockStatsChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<h4 id="totals-to-date">Totals to date</h4>
|
||||
<p id="data-to-date">
|
||||
No data
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<h4 id="metrics">Usage Metrics</h4>
|
||||
<p>
|
||||
This is the aggregated usage report data for the last 24 hours. Data based on <b>{{.nodes}}</b> devices that have reported in.
|
||||
</p>
|
||||
|
||||
{{if .locations}}
|
||||
<div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<p class="text-muted">
|
||||
Heatmap max intensity is capped at 100 reports within a location.
|
||||
</p>
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading">
|
||||
<h4 class="panel-title">
|
||||
<a data-toggle="collapse" href="#collapseTwo">Break down per country</a>
|
||||
</h4>
|
||||
</div>
|
||||
<div id="collapseTwo" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 1}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 2}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th colspan="4" class="text-center">
|
||||
<a href="https://en.wikipedia.org/wiki/Percentile">Percentile</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th class="text-right">5%</th>
|
||||
<th class="text-right">50%</th>
|
||||
<th class="text-right">95%</th>
|
||||
<th class="text-right">100%</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .categories}}
|
||||
<tr>
|
||||
<td>{{.Descr}}</td>
|
||||
<td class="text-right">{{index .Values 0 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 1 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 2 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 3 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Version</th><th class="text-right">Devices</th><th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versions}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Penetration Level</th>
|
||||
<th>Version</th>
|
||||
<th class="text-right">Actual</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versionPenetrations}}
|
||||
<tr>
|
||||
<td>{{.Count}}%</td>
|
||||
<td>≥ {{.Key}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Platform</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .platforms}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Compiler</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .compilers}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if or (gt .Percentage 0.1) (eq .Key "Others")}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Distribution Channel</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .distributions}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Builder</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .builders}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage. Some features are reported per report, some are per sum of units within report (eg. devices with static addresses among all known devices per report).
|
||||
Currently there are <b>{{.versionNodes.v2}}</b> devices reporting for version 2 and <b>{{.versionNodes.v3}}</b> for version 3.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.features $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
<tr>
|
||||
<td style="width: 50%">{{.Key}} ({{.Version}})</td>
|
||||
<td style="width: 10%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 40%" {{if lt .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px" {{if ge .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Group Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage groups, which might include multiple occourances of a feature use per report.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.featureGroups $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Group Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
{{$counts := .Counts}}
|
||||
<tr>
|
||||
<td style="width: 50%">
|
||||
<div data-toggle="tooltip" title='{{range $key, $value := .Counts}}{{$key}} ({{$value | proportion $counts | printf "%.02f"}}% - {{$value}})</br>{{end}}'>
|
||||
{{.Key}} ({{.Version}})
|
||||
</div>
|
||||
</td>
|
||||
<td style="width: 50%">
|
||||
<div class="progress" role="progressbar" style="width: 100%">
|
||||
{{$j := counter}}
|
||||
{{range $key, $value := .Counts}}
|
||||
{{with $valuePct := $value | proportion $counts}}
|
||||
<div class="progress-bar {{ $j.Current | progressBarClassByIndex }}" style='width: {{$valuePct | printf "%.02f"}}%' data-toggle="tooltip" title='{{$key}} ({{$valuePct | printf "%.02f"}}% - {{$value}})'>
|
||||
{{if ge $valuePct 30.0}}{{$key}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
{{ $j.Increment }}
|
||||
{{end}}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1 id="performance-charts">Historical Performance Data</h1>
|
||||
<p>These charts are all the average of the corresponding metric, for the entire population of a given day.</p>
|
||||
|
||||
<h4 id="hash-performance">Hash Performance (MiB/s)</h4>
|
||||
<div class="img-thumbnail" id="hashPerfChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="memory-usage">Memory Usage (MiB)</h4>
|
||||
<div class="img-thumbnail" id="memUsageChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-files">Total Number of Files</h4>
|
||||
<div class="img-thumbnail" id="totFilesChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-size">Total Folder Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="totMiBChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="system-ram">System RAM Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="memSizeChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
<a href="https://github.com/syncthing/syncthing/tree/main/cmd/ursrv">Source code</a>.
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
</p>
|
||||
<script type="text/javascript">
|
||||
$('[data-toggle="tooltip"]').tooltip({html:true});
|
||||
drawHeatMap();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
34
compat.yaml
Normal file
34
compat.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
- runtime: go1.21
|
||||
requirements:
|
||||
# See https://en.wikipedia.org/wiki/MacOS_version_history#Releases
|
||||
#
|
||||
# macOS 10.15 (Catalina) per https://go.dev/doc/go1.22#darwin
|
||||
darwin: "19"
|
||||
# Per https://go.dev/doc/go1.23#linux
|
||||
linux: "2.6.32"
|
||||
# Windows 10's initial release was 10.0.10240.16405, per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/release-information
|
||||
# and Windows 11's initial release was 10.0.22000.194 per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
|
||||
#
|
||||
# Windows 10/Windows Server 2016 per https://go.dev/doc/go1.21#windows
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.22
|
||||
requirements:
|
||||
darwin: "19"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.23
|
||||
requirements:
|
||||
# macOS 11 (Big Sur) per https://tip.golang.org/doc/go1.23#darwin
|
||||
darwin: "20"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.24
|
||||
requirements:
|
||||
darwin: "20"
|
||||
linux: "3.2"
|
||||
windows: "10.0"
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Start Syncthing
|
||||
GenericName=File synchronization
|
||||
Comment=Starts the main syncthing process in the background.
|
||||
Exec=/usr/bin/syncthing serve --no-browser --logfile=default
|
||||
Exec=syncthing serve --no-browser --logfile=default
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Syncthing Web UI
|
||||
GenericName=File synchronization UI
|
||||
Comment=Opens Syncthing's Web UI in the default browser (Syncthing must already be started).
|
||||
Exec=/usr/bin/syncthing --browser-only
|
||||
Exec=syncthing --browser-only
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
89
go.mod
89
go.mod
@@ -1,93 +1,100 @@
|
||||
module github.com/syncthing/syncthing
|
||||
|
||||
go 1.21.0
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f
|
||||
github.com/alecthomas/kong v0.9.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
|
||||
github.com/alecthomas/kong v1.11.0
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
github.com/calmh/incontainer v1.0.0
|
||||
github.com/calmh/xdr v1.1.0
|
||||
github.com/ccding/go-stun v0.1.4
|
||||
github.com/calmh/xdr v1.2.0
|
||||
github.com/ccding/go-stun v0.1.5
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
github.com/getsentry/raven-go v0.2.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.8
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/greatroar/blobloom v0.8.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/jackpal/gateway v1.0.15
|
||||
github.com/jackpal/gateway v1.0.16
|
||||
github.com/jackpal/go-nat-pmp v1.0.2
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/maruel/panicparse/v2 v2.3.1
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
|
||||
github.com/maruel/panicparse/v2 v2.5.0
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
|
||||
github.com/oschwald/geoip2-golang v1.11.0
|
||||
github.com/pierrec/lz4/v4 v4.1.21
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/quic-go/quic-go v0.44.0
|
||||
github.com/pierrec/lz4/v4 v4.1.22
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/quic-go/quic-go v0.52.0
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
|
||||
github.com/shirou/gopsutil/v4 v4.25.4
|
||||
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
|
||||
github.com/thejerf/suture/v4 v4.0.5
|
||||
github.com/urfave/cli v1.22.15
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
github.com/urfave/cli v1.22.16
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0
|
||||
github.com/willabides/kongplete v0.4.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.21.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/net v0.40.0
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/text v0.25.0
|
||||
golang.org/x/time v0.11.0
|
||||
golang.org/x/tools v0.33.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/posener/complete v1.2.3 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.54.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user