mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9562cfd5c2 |
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
@@ -1,7 +1,6 @@
|
||||
name: Feature request
|
||||
description: File a new feature request
|
||||
labels: ["enhancement", "needs-triage"]
|
||||
type: Feature
|
||||
body:
|
||||
|
||||
- type: textarea
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
@@ -1,7 +1,6 @@
|
||||
name: Bug report
|
||||
description: If you're actually looking for support instead, see "I need help / I have a question".
|
||||
labels: ["bug", "needs-triage"]
|
||||
type: Bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -3,11 +3,11 @@ updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: monthly
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: monthly
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
23
.github/labeler.yml
vendored
23
.github/labeler.yml
vendored
@@ -1,23 +0,0 @@
|
||||
version: 1
|
||||
labels:
|
||||
|
||||
- label: enhancement
|
||||
title: ^feat\b
|
||||
|
||||
- label: bug
|
||||
title: ^fix\b
|
||||
|
||||
- label: documentation
|
||||
title: ^docs\b
|
||||
|
||||
- label: chore
|
||||
title: ^chore\b
|
||||
|
||||
- label: chore
|
||||
title: ^refactor\b
|
||||
|
||||
- label: build
|
||||
title: ^build\b
|
||||
|
||||
- label: dependencies
|
||||
title: ^build\(deps\)\b
|
||||
17
.github/release.yml
vendored
17
.github/release.yml
vendored
@@ -1,17 +0,0 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- dependencies
|
||||
|
||||
categories:
|
||||
- title: Fixes
|
||||
labels:
|
||||
- bug
|
||||
|
||||
- title: Features
|
||||
labels:
|
||||
- enhancement
|
||||
|
||||
- title: Other
|
||||
labels:
|
||||
- '*'
|
||||
17
.github/workflows/build-infra-dockers.yaml
vendored
17
.github/workflows/build-infra-dockers.yaml
vendored
@@ -7,15 +7,11 @@ on:
|
||||
- infra-*
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.24.0"
|
||||
GO_VERSION: "~1.22.3"
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
@@ -45,13 +41,6 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in arm64 amd64; do
|
||||
@@ -64,13 +53,13 @@ jobs:
|
||||
|
||||
- name: Set Docker tags (all branches)
|
||||
run: |
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:${{ github.sha }},ghcr.io/syncthing/infra/${{ matrix.pkg }}:${{ github.sha }}
|
||||
tags=syncthing/${{ matrix.pkg }}:${{ github.sha }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Set Docker tags (latest)
|
||||
if: github.ref == 'refs/heads/infrastructure'
|
||||
run: |
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:latest,ghcr.io/syncthing/infra/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
tags=syncthing/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push
|
||||
|
||||
18
.github/workflows/build-nightly.yaml
vendored
18
.github/workflows/build-nightly.yaml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Build Syncthing (Nightly)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-syncthing:
|
||||
uses: ./.github/workflows/build-syncthing.yaml
|
||||
# if we only want nightlies to run for specific users:
|
||||
# if: contains(fromJSON('["syncthing", "calmh"]'), github.repository_owner)
|
||||
secrets: inherit
|
||||
408
.github/workflows/build-syncthing.yaml
vendored
408
.github/workflows/build-syncthing.yaml
vendored
@@ -3,17 +3,16 @@ name: Build Syncthing
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches-ignore:
|
||||
- release
|
||||
- release-rc*
|
||||
workflow_call:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# The go version to use for builds. We set check-latest to true when
|
||||
# installing, so we get the latest patch version that matches the
|
||||
# expression.
|
||||
GO_VERSION: "~1.24.0"
|
||||
GO_VERSION: "~1.22.3"
|
||||
|
||||
# Optimize compatibility on the slow archictures.
|
||||
GO386: softfloat
|
||||
@@ -49,7 +48,7 @@ jobs:
|
||||
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
|
||||
# The oldest version in this list should match what we have in our go.mod.
|
||||
# Variables don't seem to be supported here, or we could have done something nice.
|
||||
go: ["~1.23.0", "~1.24.0"]
|
||||
go: ["~1.21.7", "~1.22.3"]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -84,11 +83,31 @@ jobs:
|
||||
go run build.go test | go-test-json-to-loki
|
||||
env:
|
||||
GOFLAGS: "-json"
|
||||
LOKI_URL: ${{ secrets.LOKI_URL }}
|
||||
LOKI_USER: ${{ secrets.LOKI_USER }}
|
||||
LOKI_URL: ${{ vars.LOKI_URL }}
|
||||
LOKI_USER: ${{ vars.LOKI_USER }}
|
||||
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
|
||||
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
correctness:
|
||||
name: Check correctness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: Check correctness
|
||||
run: |
|
||||
go test -v ./meta
|
||||
|
||||
#
|
||||
# The basic checks job is a virtual one that depends on the matrix tests,
|
||||
# the correctness checks, and various builds that we always do. This makes
|
||||
@@ -103,11 +122,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-test
|
||||
- correctness
|
||||
- package-linux
|
||||
- package-cross
|
||||
- package-source
|
||||
- package-debian
|
||||
- package-windows
|
||||
- govulncheck
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -118,6 +137,8 @@ jobs:
|
||||
|
||||
package-windows:
|
||||
name: Package for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
@@ -132,7 +153,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -158,74 +178,22 @@ jobs:
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
$targets = 'syncthing', 'stdiscosrv', 'strelaysrv'
|
||||
$archs = 'amd64', 'arm', 'arm64', '386'
|
||||
foreach ($arch in $archs) {
|
||||
foreach ($tgt in $targets) {
|
||||
go run build.go -goarch $arch zip $tgt
|
||||
}
|
||||
}
|
||||
go run build.go -goarch amd64 zip
|
||||
go run build.go -goarch arm zip
|
||||
go run build.go -goarch arm64 zip
|
||||
go run build.go -goarch 386 zip
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: unsigned-packages-windows
|
||||
path: "*.zip"
|
||||
|
||||
codesign-windows:
|
||||
name: Codesign for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
- package-windows
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: unsigned-packages-windows
|
||||
path: packages
|
||||
|
||||
- name: Extract packages
|
||||
working-directory: packages
|
||||
run: |
|
||||
$files = Get-ChildItem "." -Filter *.zip
|
||||
foreach ($file in $files) {
|
||||
7z x $file.Name
|
||||
}
|
||||
|
||||
- name: Sign files with Trusted Signing
|
||||
uses: azure/trusted-signing-action@v0.5.1
|
||||
with:
|
||||
azure-tenant-id: ${{ secrets.AZURE_TRUSTED_SIGNING_TENANT_ID }}
|
||||
azure-client-id: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_ID }}
|
||||
azure-client-secret: ${{ secrets.AZURE_TRUSTED_SIGNING_CLIENT_SECRET }}
|
||||
endpoint: ${{ secrets.AZURE_TRUSTED_SIGNING_ENDPOINT }}
|
||||
trusted-signing-account-name: ${{ secrets.AZURE_TRUSTED_SIGNING_ACCOUNT }}
|
||||
certificate-profile-name: ${{ secrets.AZURE_TRUSTED_SIGNING_PROFILE }}
|
||||
files-folder: ${{ github.workspace }}\packages
|
||||
files-folder-filter: exe
|
||||
files-folder-recurse: true
|
||||
file-digest: SHA256
|
||||
timestamp-rfc3161: http://timestamp.acs.microsoft.com
|
||||
timestamp-digest: SHA256
|
||||
|
||||
- name: Repackage packages
|
||||
working-directory: packages
|
||||
run: |
|
||||
$files = Get-ChildItem "." -Filter *.zip
|
||||
foreach ($file in $files) {
|
||||
Remove-Item $file.Name
|
||||
7z a -tzip $file.Name $file.BaseName
|
||||
}
|
||||
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
|
||||
CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }}
|
||||
CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-windows
|
||||
path: "packages/*.zip"
|
||||
path: syncthing-windows-*.zip
|
||||
|
||||
#
|
||||
# Linux
|
||||
@@ -238,7 +206,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -262,9 +229,7 @@ jobs:
|
||||
run: |
|
||||
archs=$(go tool dist list | grep linux | sed 's#linux/##')
|
||||
for goarch in $archs ; do
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch "$goarch" tar "$tgt"
|
||||
done
|
||||
go run build.go -goarch "$goarch" tar
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
@@ -273,9 +238,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-linux
|
||||
path: |
|
||||
*.tar.gz
|
||||
compat.json
|
||||
path: syncthing-linux-*.tar.gz
|
||||
|
||||
#
|
||||
# macOS
|
||||
@@ -283,14 +246,13 @@ jobs:
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -337,9 +299,7 @@ jobs:
|
||||
|
||||
- name: Create package (amd64)
|
||||
run: |
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -goarch amd64 zip "$tgt"
|
||||
done
|
||||
go run build.go -goarch amd64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -353,9 +313,7 @@ jobs:
|
||||
go "\$@"
|
||||
EOT
|
||||
chmod 755 xgo.sh
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip "$tgt"
|
||||
done
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
@@ -379,14 +337,15 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-macos
|
||||
path: "*.zip"
|
||||
path: syncthing-*.zip
|
||||
|
||||
notarize-macos:
|
||||
name: Notarize for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
needs:
|
||||
- package-macos
|
||||
- basics
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
@@ -398,7 +357,7 @@ jobs:
|
||||
run: |
|
||||
APPSTORECONNECT_API_KEY_PATH="$RUNNER_TEMP/apikey.p8"
|
||||
echo "$APPSTORECONNECT_API_KEY" | base64 -d -o "$APPSTORECONNECT_API_KEY_PATH"
|
||||
for file in *-macos-*.zip ; do
|
||||
for file in syncthing-macos-*.zip ; do
|
||||
xcrun notarytool submit \
|
||||
-k "$APPSTORECONNECT_API_KEY_PATH" \
|
||||
-d "$APPSTORECONNECT_API_KEY_ID" \
|
||||
@@ -421,7 +380,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -464,11 +422,9 @@ jobs:
|
||||
goos="${plat%/*}"
|
||||
goarch="${plat#*/}"
|
||||
echo "::group ::$plat"
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar "$tgt" 2>/dev/null; then
|
||||
echo "::warning ::Failed to build $tgt for $plat"
|
||||
fi
|
||||
done
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar 2>/dev/null; then
|
||||
echo "::warning ::Failed to build for $plat"
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
done
|
||||
env:
|
||||
@@ -478,7 +434,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-other
|
||||
path: "*.tar.gz"
|
||||
path: syncthing-*.tar.gz
|
||||
|
||||
#
|
||||
# Source
|
||||
@@ -491,7 +447,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -527,10 +482,11 @@ jobs:
|
||||
|
||||
sign-for-upgrade:
|
||||
name: Sign for upgrade
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
needs:
|
||||
- codesign-windows
|
||||
- basics
|
||||
- package-windows
|
||||
- package-linux
|
||||
- package-macos
|
||||
- package-cross
|
||||
@@ -540,7 +496,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -559,7 +514,7 @@ jobs:
|
||||
|
||||
- name: Install signing tool
|
||||
run: |
|
||||
go install ./cmd/dev/stsigtool
|
||||
go install ./cmd/stsigtool
|
||||
|
||||
- name: Sign archives
|
||||
run: |
|
||||
@@ -574,44 +529,30 @@ jobs:
|
||||
env:
|
||||
STSIGTOOL_PRIVATE_KEY: ${{ secrets.STSIGTOOL_PRIVATE_KEY }}
|
||||
|
||||
- name: Create shasum files
|
||||
- name: Create and sign .asc files
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt -y install gnupg
|
||||
|
||||
export SIGNING_KEY="$RUNNER_TEMP/gpg-secret.asc"
|
||||
echo "$GNUPG_SIGNING_KEY_BASE64" | base64 -d > "$SIGNING_KEY"
|
||||
gpg --import < "$SIGNING_KEY"
|
||||
|
||||
pushd packages
|
||||
files=(*.tar.gz *.zip)
|
||||
sha1sum "${files[@]}" > sha1sum.txt
|
||||
sha256sum "${files[@]}" > sha256sum.txt
|
||||
sha1sum "${files[@]}" | gpg --clearsign > sha1sum.txt.asc
|
||||
sha256sum "${files[@]}" | gpg --clearsign > sha256sum.txt.asc
|
||||
gpg --sign --armour --detach syncthing-source-*.tar.gz
|
||||
popd
|
||||
|
||||
version=$(go run build.go version)
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Sign shasum files
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign
|
||||
packages/sha1sum.txt packages/sha256sum.txt
|
||||
rm -f "$SIGNING_KEY" .gnupg
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Sign source
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
sign --detach --ascii
|
||||
packages/syncthing-source-${{ env.VERSION }}.tar.gz
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
GNUPG_SIGNING_KEY_BASE64: ${{ secrets.GNUPG_SIGNING_KEY_BASE64 }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages-signed
|
||||
path: |
|
||||
packages/*.tar.gz
|
||||
packages/*.zip
|
||||
packages/*.asc
|
||||
packages/*.json
|
||||
path: packages/*
|
||||
|
||||
#
|
||||
# Debian
|
||||
@@ -624,7 +565,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -655,9 +595,7 @@ jobs:
|
||||
- name: Package for Debian
|
||||
run: |
|
||||
for arch in amd64 i386 armhf armel arm64 ; do
|
||||
for tgt in syncthing stdiscosrv strelaysrv ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb "$tgt"
|
||||
done
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb
|
||||
done
|
||||
env:
|
||||
BUILD_USER: debian
|
||||
@@ -675,9 +613,10 @@ jobs:
|
||||
publish-nightly:
|
||||
name: Publish nightly build
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
|
||||
environment: release
|
||||
environment: signing
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- notarize-macos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -703,20 +642,19 @@ jobs:
|
||||
cd packages
|
||||
"$GITHUB_WORKSPACE/tools/generate-release-json" "$BASE_URL" > nightly.json
|
||||
env:
|
||||
BASE_URL: ${{ secrets.NIGHTLY_BASE_URL }}
|
||||
BASE_URL: https://syncthing.ams3.digitaloceanspaces.com/nightly/
|
||||
|
||||
- name: Push artifacts
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync -v --no-update-modtime packages objstore:nightly
|
||||
args: sync packages spaces:syncthing/nightly
|
||||
|
||||
#
|
||||
# Push release artifacts to Spaces
|
||||
@@ -724,10 +662,8 @@ jobs:
|
||||
|
||||
publish-release-files:
|
||||
name: Publish release files
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
permissions:
|
||||
contents: write
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- package-debian
|
||||
@@ -736,7 +672,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download signed packages
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -761,145 +696,29 @@ jobs:
|
||||
version=$(go run build.go version)
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Push to object store (${{ env.VERSION }})
|
||||
- name: Push to Spaces (${{ env.VERSION }})
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync -v --no-update-modtime packages objstore:release/${{ env.VERSION }}
|
||||
args: sync packages spaces:syncthing/release/${{ env.VERSION }}
|
||||
|
||||
- name: Push to object store (latest)
|
||||
- name: Push to Spaces (latest)
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync -v --no-update-modtime objstore:release/${{ env.VERSION }} objstore:release/latest
|
||||
|
||||
- name: Create GitHub releases and push binaries
|
||||
run: |
|
||||
maybePrerelease=""
|
||||
if [[ $VERSION == *-* ]]; then
|
||||
maybePrerelease="--prerelease"
|
||||
fi
|
||||
export GH_PROMPT_DISABLED=1
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes-from-tag
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
packages/*.asc packages/*.json \
|
||||
packages/syncthing-*.tar.gz \
|
||||
packages/syncthing-*.zip \
|
||||
packages/syncthing_*.deb
|
||||
|
||||
PKGS=$(pwd)/packages
|
||||
cd /tmp # gh will not release for repo x while inside repo y
|
||||
for repo in relaysrv discosrv ; do
|
||||
export GH_REPO="syncthing/$repo"
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes "https://github.com/syncthing/syncthing/releases/tag/$VERSION"
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
$PKGS/*.asc \
|
||||
$PKGS/*${repo}*
|
||||
done
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
#
|
||||
# Push Debian/APT archive
|
||||
#
|
||||
|
||||
publish-apt:
|
||||
name: Publish APT
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-debian
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- name: Download packages
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: debian-packages
|
||||
path: packages
|
||||
|
||||
- name: Set version
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "Version: $version"
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
# Decide whether packages should go to stable, candidate or nightly
|
||||
- name: Prepare packages
|
||||
run: |
|
||||
kind=stable
|
||||
if [[ $VERSION == *-rc.[0-9] ]] ; then
|
||||
kind=candidate
|
||||
elif [[ $VERSION == *-* ]] ; then
|
||||
kind=nightly
|
||||
fi
|
||||
echo "Kind: $kind"
|
||||
mkdir -p packages/syncthing/$kind
|
||||
mv packages/*.deb packages/syncthing/$kind
|
||||
|
||||
- name: Pull archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync objstore:apt/dists dists
|
||||
|
||||
- name: Update archive
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
with:
|
||||
args:
|
||||
publish
|
||||
--add packages
|
||||
--dists dists
|
||||
env:
|
||||
EZAPT_KEYRING_BASE64: ${{ secrets.APT_GPG_KEYRING_BASE64 }}
|
||||
|
||||
- name: Push archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v --no-update-modtime dists objstore:apt/dists
|
||||
args: sync spaces:syncthing/release/${{ env.VERSION }} spaces:syncthing/release/latest
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
@@ -908,11 +727,8 @@ jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release-nightly' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || github.ref == 'refs/heads/main' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: docker
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
@@ -933,7 +749,6 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
@@ -973,18 +788,9 @@ jobs:
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -996,13 +802,13 @@ jobs:
|
||||
echo Release version, pushing to :latest and version tags
|
||||
major=${version%.*.*}
|
||||
minor=${version%.*}
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:$major,ghcr.io/${{ matrix.image }}:$major,docker.io/${{ matrix.image }}:$minor,ghcr.io/${{ matrix.image }}:$minor,docker.io/${{ matrix.image }}:latest,ghcr.io/${{ matrix.image }}:latest
|
||||
tags=${{ matrix.image }}:$version,${{ matrix.image }}:$major,${{ matrix.image }}:$minor,${{ matrix.image }}:latest
|
||||
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
|
||||
echo Release candidate, pushing to :rc and version tags
|
||||
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:rc,ghcr.io/${{ matrix.image }}:rc
|
||||
echo Release candidate, pushing to :rc
|
||||
tags=${{ matrix.image }}:rc
|
||||
else
|
||||
echo Development version, pushing to :edge
|
||||
tags=docker.io/${{ matrix.image }}:edge,ghcr.io/${{ matrix.image }}:edge
|
||||
tags=${{ matrix.image }}:edge
|
||||
fi
|
||||
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
49
.github/workflows/pr-linters.yaml
vendored
49
.github/workflows/pr-linters.yaml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Run PR linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# golangci-lint runs a suite of static analysis checks on the code
|
||||
#
|
||||
|
||||
golangci:
|
||||
runs-on: ubuntu-latest
|
||||
name: Golangci-lint
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: ensure asset generation
|
||||
run: go run build.go assets
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
only-new-issues: true
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
meta:
|
||||
name: Meta checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- run: |
|
||||
go run build.go assets
|
||||
go test -v ./meta
|
||||
27
.github/workflows/pr-metadata.yaml
vendored
27
.github/workflows/pr-metadata.yaml
vendored
@@ -1,27 +0,0 @@
|
||||
name: PR metadata
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Set labels on PRs, which are then used to categorise release notes
|
||||
#
|
||||
|
||||
labels:
|
||||
name: Set labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: srvaroa/labeler@v1
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
60
.github/workflows/release-syncthing.yaml
vendored
60
.github/workflows/release-syncthing.yaml
vendored
@@ -1,60 +0,0 @@
|
||||
name: Release Syncthing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
- release-rc*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
create-release-tag:
|
||||
name: Create release tag
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Determine version to release
|
||||
run: |
|
||||
if [[ "$GITHUB_REF_NAME" == "release" ]] ; then
|
||||
next=$(go run ./script/next-version.go)
|
||||
else
|
||||
next=$(go run ./script/next-version.go --pre)
|
||||
fi
|
||||
echo "NEXT=$next" >> $GITHUB_ENV
|
||||
echo "Next version is $next"
|
||||
|
||||
prev=$(git describe --exclude "*-*" --abbrev=0)
|
||||
echo "PREV=$prev" >> $GITHUB_ENV
|
||||
echo "Previous version is $prev"
|
||||
|
||||
- name: Determine release notes
|
||||
run: |
|
||||
go run ./script/relnotes.go --new-ver "$NEXT" --branch "$GITHUB_REF_NAME" --prev-ver "$PREV" > notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
git tag -a -F notes.md --cleanup=whitespace "$NEXT"
|
||||
git push origin "$NEXT"
|
||||
|
||||
- name: Trigger the build
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: build-syncthing.yaml
|
||||
ref: refs/tags/${{ env.NEXT }}
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,4 +18,3 @@ deb
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
/compat.json
|
||||
|
||||
@@ -1,67 +1,26 @@
|
||||
version: "2"
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
linters:
|
||||
default: all
|
||||
enable-all: true
|
||||
disable:
|
||||
- cyclop
|
||||
- goimports
|
||||
- depguard
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- paralleltest
|
||||
- prealloc
|
||||
- predeclared
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- usetesting # go 1.24
|
||||
- varnamelen
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- funlen
|
||||
- wsl
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- gocognit
|
||||
- godox
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
|
||||
98
.policy.yml
98
.policy.yml
@@ -1,98 +0,0 @@
|
||||
# This is the policy-bot configuration for this repository. It controls
|
||||
# which approvals are required for any given pull request. The format is
|
||||
# described at https://github.com/palantir/policy-bot. The syntax of the
|
||||
# policy can be verified by the bot:
|
||||
# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml
|
||||
|
||||
# The policy below is what is required for any pull request.
|
||||
policy:
|
||||
approval:
|
||||
- subject is conventional commit
|
||||
- project metadata requires maintainer approval
|
||||
- or:
|
||||
- is approved by a syncthing contributor
|
||||
- is a translation or dependency update by a contributor
|
||||
- is a trivial change by a contributor
|
||||
|
||||
# Additionally, contributors can disapprove of a PR
|
||||
disapproval:
|
||||
requires:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# The rules for the policy are described below.
|
||||
|
||||
approval_rules:
|
||||
|
||||
# All commits (PRs before squashing) should have a valid conventional
|
||||
# commit type subject.
|
||||
- name: subject is conventional commit
|
||||
requires:
|
||||
conditions:
|
||||
title:
|
||||
matches:
|
||||
- '^(feat|fix|docs|chore|refactor|build): [a-z].+'
|
||||
- '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'
|
||||
|
||||
# Changes to important project metadata and documentation, including this
|
||||
# policy, require signoff by a maintainer
|
||||
- name: project metadata requires maintainer approval
|
||||
if:
|
||||
changed_files:
|
||||
paths:
|
||||
- ^[^/]+\.md
|
||||
- ^\.policy\.yml
|
||||
- ^LICENSE
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Regular pull requests require approval by an active contributor
|
||||
- name: is approved by a syncthing contributor
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_contributor: true
|
||||
|
||||
# Changes to some files (translations, dependencies, compatibility) do not
|
||||
# require approval if they were proposed by a contributor and have a
|
||||
# matching commit subject
|
||||
- name: is a translation or dependency update by a contributor
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- ^gui/default/assets/lang/
|
||||
- ^go\.mod$
|
||||
- ^go\.sum$
|
||||
- ^compat\.yaml$
|
||||
title:
|
||||
matches:
|
||||
- '^chore\(gui\):'
|
||||
- '^build\(deps\):'
|
||||
- '^build\(compat\):'
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# If the change is small and the label "trivial" is added, we accept that
|
||||
# on trust. These PRs can be audited after the fact as appropriate.
|
||||
# Features are not trivial.
|
||||
- name: is a trivial change by a contributor
|
||||
if:
|
||||
modified_lines:
|
||||
total: "< 25"
|
||||
title:
|
||||
not_matches:
|
||||
- '^feat'
|
||||
has_labels:
|
||||
- trivial
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
25
AUTHORS
25
AUTHORS
@@ -20,7 +20,6 @@ Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Ionescu <github@ionescu.sh>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
@@ -48,7 +47,6 @@ Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Ashish Bhate <bhate.ashish@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
@@ -98,7 +96,6 @@ Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@u
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Daniel Padrta <64928366+danpadcz@users.noreply.github.com>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
dashangcun <907225865@qq.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
|
||||
@@ -114,7 +111,6 @@ diemade <spamkill@posteo.ch>
|
||||
digital <didev@dinid.net>
|
||||
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
domain <32405309+szu17dmy@users.noreply.github.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
@@ -145,13 +141,10 @@ greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
guangwu <guoguangwu@magic-shield.com>
|
||||
gudvinr <gudvinr@gmail.com>
|
||||
Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Hazem Krimi <me@hazemkrimi.tech>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hireworks <129852174+hireworksltd@users.noreply.github.com>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
@@ -195,7 +188,6 @@ Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Kapil Sareen <kapilsareen584@gmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Harrison <keithh@protonmail.com>
|
||||
@@ -217,17 +209,14 @@ Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
|
||||
luchenhan <168071714+luchenhan@users.noreply.github.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
Luke Hamburg <1992842+luckman212@users.noreply.github.com>
|
||||
luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcel Meyer <mm.marcelmeyer@gmail.com>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus B Spencer <marcus@marcusspencer.xyz> <marcus@marcusspencer.us>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
@@ -235,7 +224,6 @@ Martchus <martchus@gmx.net>
|
||||
Martin Polehla <p0l0us@users.noreply.github.com>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
mathias4833 <67101597+mathias4833@users.noreply.github.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
@@ -243,7 +231,6 @@ Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
maxice8 <30738253+maxice8@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
|
||||
@@ -281,7 +268,6 @@ Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Paul Donald <newtwen+github@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
@@ -295,9 +281,7 @@ Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
polyfloyd <polyfloyd@users.noreply.github.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
pullmerge <166967364+pullmerge@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
@@ -316,13 +300,10 @@ Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
sec65 <106604020+sec65@users.noreply.github.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Sertonix <83883937+Sertonix@users.noreply.github.com>
|
||||
Severin von Wnuck-Lipinski <ss7@live.de>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Simon Pickup <simon@pickupinfinity.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Sonu Kumar Saw <31889738+dev-saw99@users.noreply.github.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
@@ -330,23 +311,18 @@ Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Sven Bachmann <dev@mcbachmann.de>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Sébastien WENSKE <sebastien@wenske.fr>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
TheCreeper <TheCreeper@users.noreply.github.com>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tim Nordenfur <tim@gurka.se>
|
||||
Tobias Frölich <40638719+tobifroe@users.noreply.github.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tommy van der Vorst <tommy-github@pixelspark.nl> <tommy@pixelspark.nl>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
@@ -359,7 +335,6 @@ Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
villekalliomaki <53118179+villekalliomaki@users.noreply.github.com>
|
||||
Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
WangXi <xib1102@icloud.com>
|
||||
Will Rouesnel <wrouesnel@wrouesnel.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
wouter bolsterlee <wouter@bolsterl.ee>
|
||||
|
||||
@@ -11,6 +11,14 @@ LABEL org.opencontainers.image.authors="The Syncthing Project" \
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
COPY strelaypoolsrv-linux-${TARGETARCH} /bin/strelaypoolsrv
|
||||
RUN apk add --no-cache ca-certificates su-exec curl
|
||||
ENV PUID=1000 PGID=1000 MAXMIND_KEY=
|
||||
|
||||
ENTRYPOINT ["/bin/strelaypoolsrv", "-listen", ":8080"]
|
||||
RUN mkdir /var/strelaypoolsrv && chown 1000 /var/strelaypoolsrv
|
||||
USER 1000
|
||||
|
||||
COPY strelaypoolsrv-linux-${TARGETARCH} /bin/strelaypoolsrv
|
||||
COPY script/strelaypoolsrv-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
WORKDIR /var/strelaypoolsrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaypoolsrv", "-listen", ":8080"]
|
||||
|
||||
@@ -49,11 +49,6 @@ services:
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Discovery
|
||||
@@ -89,11 +84,6 @@ services:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
|
||||
12
README.md
12
README.md
@@ -82,11 +82,13 @@ build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
Release binaries are GPG signed with the key available from
|
||||
https://syncthing.net/security/. There is also a built-in automatic
|
||||
upgrade mechanism (disabled in some distribution channels) which uses a
|
||||
compiled in ECDSA signature. macOS and Windows binaries are also
|
||||
code-signed.
|
||||
As of v0.10.15 and onwards, release binaries are GPG signed with the key
|
||||
D26E6ED000654A3E, available from https://syncthing.net/security/ and
|
||||
most key servers.
|
||||
|
||||
There is also a built-in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
12
buf.gen.yaml
12
buf.gen.yaml
@@ -1,12 +0,0 @@
|
||||
version: v2
|
||||
managed:
|
||||
enabled: true
|
||||
override:
|
||||
- file_option: go_package_prefix
|
||||
value: github.com/syncthing/syncthing/internal/gen
|
||||
plugins:
|
||||
- remote: buf.build/protocolbuffers/go:v1.35.1
|
||||
out: .
|
||||
opt: module=github.com/syncthing/syncthing
|
||||
inputs:
|
||||
- directory: proto
|
||||
10
buf.yaml
10
buf.yaml
@@ -1,10 +0,0 @@
|
||||
version: v2
|
||||
modules:
|
||||
- path: proto
|
||||
name: github.com/syncthing/syncthing
|
||||
lint:
|
||||
use:
|
||||
- STANDARD
|
||||
breaking:
|
||||
use:
|
||||
- WIRE_JSON
|
||||
309
build.go
309
build.go
@@ -4,8 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build tools
|
||||
// +build tools
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -32,9 +33,8 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
buildpkg "github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -85,6 +85,7 @@ var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff. buildPkgs gets filled out in init()
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
@@ -95,40 +96,41 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0o644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0o644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0o644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0o644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0o644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0o644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0o644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0o644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0o644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0o644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0o644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0o644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
@@ -140,22 +142,23 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stdiscosrv"},
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdService: "stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"strelaysrv": {
|
||||
name: "strelaysrv",
|
||||
@@ -166,47 +169,61 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaysrv"},
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdService: "strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0o644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
name: "strelaypoolsrv",
|
||||
debname: "syncthing-relaypoolsrv",
|
||||
debdeps: []string{"libc6"},
|
||||
description: "Syncthing Relay Pool Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
},
|
||||
"stupgrades": {
|
||||
name: "stupgrades",
|
||||
description: "Syncthing Upgrade Check Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stupgrades"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
|
||||
binaryName: "stupgrades",
|
||||
},
|
||||
"stcrashreceiver": {
|
||||
name: "stcrashreceiver",
|
||||
description: "Syncthing Crash Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stcrashreceiver"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
|
||||
binaryName: "stcrashreceiver",
|
||||
},
|
||||
"ursrv": {
|
||||
name: "ursrv",
|
||||
description: "Syncthing Usage Reporting Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/ursrv"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/ursrv"},
|
||||
binaryName: "ursrv",
|
||||
},
|
||||
}
|
||||
@@ -215,11 +232,15 @@ func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
if files, err := filepath.Glob(pkg + "/*.go"); err != nil || len(files) == 0 {
|
||||
// No go files in the directory
|
||||
pkg = filepath.Base(pkg)
|
||||
if strings.HasPrefix(pkg, ".") {
|
||||
// ignore dotfiles
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/%s", pkg))
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
|
||||
@@ -227,13 +248,13 @@ func initTargets() {
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
for _, file := range listFiles("etc") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0o644})
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
@@ -323,14 +344,12 @@ func runCommand(cmd string, target target) {
|
||||
|
||||
case "tar":
|
||||
buildTar(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "zip":
|
||||
buildZip(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "deb":
|
||||
buildDeb(target, tags)
|
||||
buildDeb(target)
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
@@ -388,6 +407,7 @@ func parseFlags() {
|
||||
func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-tags", strings.Join(tags, " ")}
|
||||
if long {
|
||||
timeout = longTimeout
|
||||
@@ -421,7 +441,7 @@ func bench(tags []string, pkgs ...string) {
|
||||
func integration(bench bool) {
|
||||
lazyRebuildAssets()
|
||||
args := []string{"test", "-v", "-timeout", "60m", "-tags"}
|
||||
tags := "integration"
|
||||
tags := "purego,integration"
|
||||
if bench {
|
||||
tags += ",benchmark"
|
||||
}
|
||||
@@ -609,7 +629,7 @@ func buildZip(target target, tags []string) {
|
||||
fmt.Println(filename)
|
||||
}
|
||||
|
||||
func buildDeb(target target, tags []string) {
|
||||
func buildDeb(target target) {
|
||||
os.RemoveAll("deb")
|
||||
|
||||
// "goarch" here is set to whatever the Debian packages expect. We correct
|
||||
@@ -623,7 +643,7 @@ func buildDeb(target target, tags []string) {
|
||||
goarch = "arm"
|
||||
}
|
||||
|
||||
build(target, append(tags, "noupgrade"))
|
||||
build(target, []string{"noupgrade"})
|
||||
|
||||
for i := range target.installationFiles {
|
||||
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.BinaryName(), 1)
|
||||
@@ -645,9 +665,6 @@ func buildDeb(target target, tags []string) {
|
||||
// than just 0.14.26. This rectifies that.
|
||||
debver = strings.Replace(debver, "-", "~", -1)
|
||||
}
|
||||
if strings.Contains(debver, "_") {
|
||||
debver = strings.Replace(debver, "_", "~", -1)
|
||||
}
|
||||
args := []string{
|
||||
"-t", "deb",
|
||||
"-s", "dir",
|
||||
@@ -735,7 +752,7 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
err = os.WriteFile(jsonPath, bs, 0o644)
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
@@ -794,7 +811,7 @@ func copyFile(src, dst string, perm os.FileMode) error {
|
||||
}
|
||||
|
||||
copy:
|
||||
os.MkdirAll(filepath.Dir(dst), 0o777)
|
||||
os.MkdirAll(filepath.Dir(dst), 0777)
|
||||
if err := os.WriteFile(dst, in, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -819,12 +836,12 @@ func listFiles(dir string) []string {
|
||||
|
||||
func rebuildAssets() {
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/infra/strelaypoolsrv/auto/gui.files.go", "cmd/infra/strelaypoolsrv/gui")
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
@@ -854,7 +871,7 @@ func buildNextGenGUI() bool {
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0o644); err != nil {
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -910,9 +927,22 @@ func updateDependencies() {
|
||||
}
|
||||
|
||||
func proto() {
|
||||
// buf needs to be installed
|
||||
// https://buf.build/docs/installation/
|
||||
runPrint("buf", "generate")
|
||||
pv := protobufVersion()
|
||||
repo := "https://github.com/gogo/protobuf.git"
|
||||
path := filepath.Join("repos", "protobuf")
|
||||
|
||||
runPrint(goCmd, "install", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrint("git", "clone", repo, path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", pv)
|
||||
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
@@ -1347,7 +1377,10 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
|
||||
func codesign(target target) {
|
||||
if goos == "darwin" {
|
||||
switch goos {
|
||||
case "windows":
|
||||
windowsCodesign(target.BinaryName())
|
||||
case "darwin":
|
||||
macosCodesign(target.BinaryName())
|
||||
}
|
||||
}
|
||||
@@ -1371,6 +1404,70 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func windowsCodesign(file string) {
|
||||
st := "signtool.exe"
|
||||
|
||||
if path := os.Getenv("CODESIGN_SIGNTOOL"); path != "" {
|
||||
st = path
|
||||
}
|
||||
|
||||
for i, algo := range []string{"sha1", "sha256"} {
|
||||
args := []string{"sign", "/fd", algo}
|
||||
if f := os.Getenv("CODESIGN_CERTIFICATE_FILE"); f != "" {
|
||||
args = append(args, "/f", f)
|
||||
} else if b := os.Getenv("CODESIGN_CERTIFICATE_BASE64"); b != "" {
|
||||
// Decode the PFX certificate from base64.
|
||||
bs, err := base64.RawStdEncoding.DecodeString(b)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: decoding base64:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write it to a temporary file
|
||||
f, err := os.CreateTemp("", "codesign-*.pfx")
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: creating temp file:", err)
|
||||
return
|
||||
}
|
||||
_ = f.Chmod(0600) // best effort remove other users' access
|
||||
defer os.Remove(f.Name())
|
||||
if _, err := f.Write(bs); err != nil {
|
||||
log.Println("Codesign: signing failed: writing temp file:", err)
|
||||
return
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Println("Codesign: signing failed: closing temp file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use that when signing
|
||||
args = append(args, "/f", f.Name())
|
||||
}
|
||||
if p := os.Getenv("CODESIGN_CERTIFICATE_PASSWORD"); p != "" {
|
||||
args = append(args, "/p", p)
|
||||
}
|
||||
if tr := os.Getenv("CODESIGN_TIMESTAMP_SERVER"); tr != "" {
|
||||
switch algo {
|
||||
case "sha256":
|
||||
args = append(args, "/tr", tr, "/td", algo)
|
||||
default:
|
||||
args = append(args, "/t", tr)
|
||||
}
|
||||
}
|
||||
if i > 0 {
|
||||
args = append(args, "/as")
|
||||
}
|
||||
args = append(args, file)
|
||||
|
||||
bs, err := runError(st, args...)
|
||||
if err != nil {
|
||||
log.Printf("Codesign: signing failed: %v: %s", err, string(bs))
|
||||
return
|
||||
}
|
||||
log.Println("Codesign: successfully signed", file, "using", algo)
|
||||
}
|
||||
}
|
||||
|
||||
func metalint() {
|
||||
lazyRebuildAssets()
|
||||
runPrint(goCmd, "test", "-run", "Metalint", "./meta")
|
||||
@@ -1388,6 +1485,14 @@ func (t target) BinaryName() string {
|
||||
return t.binaryName
|
||||
}
|
||||
|
||||
func protobufVersion() string {
|
||||
bs, err := runError(goCmd, "list", "-f", "{{.Version}}", "-m", "github.com/gogo/protobuf")
|
||||
if err != nil {
|
||||
log.Fatal("Getting protobuf version:", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
@@ -1454,29 +1559,3 @@ func nextPatchVersion(ver string) string {
|
||||
digits[len(digits)-1] = strconv.Itoa(n + 1)
|
||||
return strings.Join(digits, ".")
|
||||
}
|
||||
|
||||
func writeCompatJSON() {
|
||||
bs, err := os.ReadFile("compat.yaml")
|
||||
if err != nil {
|
||||
log.Fatal("Reading compat.yaml:", err)
|
||||
}
|
||||
|
||||
var entries []upgrade.ReleaseCompatibility
|
||||
if err := yaml.Unmarshal(bs, &entries); err != nil {
|
||||
log.Fatal("Parsing compat.yaml:", err)
|
||||
}
|
||||
|
||||
rt := runtime.Version()
|
||||
for _, e := range entries {
|
||||
if !strings.HasPrefix(rt, e.Runtime) {
|
||||
continue
|
||||
}
|
||||
bs, _ := json.MarshalIndent(e, "", " ")
|
||||
if err := os.WriteFile("compat.json", bs, 0o644); err != nil {
|
||||
log.Fatal("Writing compat.json:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Fatalf("runtime %v not found in compat.yaml", rt)
|
||||
}
|
||||
|
||||
3
build.sh
3
build.sh
@@ -23,11 +23,10 @@ case "${1:-default}" in
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
script copyrights
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'chore(gui, man, authors): update docs, translations, and contributors'
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -1,374 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
MetricsListen string `default:":8082" help:"Listen address for metrics"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
if params.MetricsListen != "" {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
metricsListen, err := net.Listen("tcp", params.MetricsListen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metrics: %w", err)
|
||||
}
|
||||
slog.Info("Metrics listener started", "addr", params.MetricsListen)
|
||||
go func() {
|
||||
if err := http.Serve(metricsListen, mux); err != nil {
|
||||
slog.Warn("Metrics server returned", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
cache := &cachedReleases{url: params.URL}
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
return fmt.Errorf("initial cache update: %w", err)
|
||||
} else {
|
||||
slog.Info("Initial cache update done")
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.NewTicker(params.CacheTime).C {
|
||||
slog.Info("Refreshing cached releases", "url", params.URL)
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
slog.Error("Failed to refresh cached releases", "url", params.URL, "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ghRels := &githubReleases{cache: cache}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/ping", ghRels.servePing)
|
||||
mux.HandleFunc("/meta.json", ghRels.serveReleases)
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
slog.Info("Forwarding", "from", path, "to", url)
|
||||
name := strings.ReplaceAll(path, "/", "_")
|
||||
mux.Handle(path, httpcache.SinglePath(&proxy{name: name, url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: params.Listen,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
srvListener, err := net.Listen("tcp", params.Listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
slog.Info("Main listener started", "addr", params.Listen)
|
||||
|
||||
return srv.Serve(srvListener)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
cache *cachedReleases
|
||||
}
|
||||
|
||||
func (p *githubReleases) servePing(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
if len(rels) == 0 {
|
||||
http.Error(w, "No releases available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Syncthing-Num-Releases", strconv.Itoa(len(rels)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *githubReleases) serveReleases(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
ua := req.Header.Get("User-Agent")
|
||||
osv := req.Header.Get("Syncthing-Os-Version")
|
||||
if ua != "" && osv != "" {
|
||||
// We should determine the compatibility of the releases.
|
||||
rels = filterForCompabitility(rels, ua, osv)
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("no-ua-or-osversion").Inc()
|
||||
}
|
||||
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Vary", "User-Agent, Syncthing-Os-Version")
|
||||
_ = json.NewEncoder(w).Encode(rels)
|
||||
|
||||
metricUpgradeChecks.Inc()
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
name string
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
metricHTTPRequests.WithLabelValues(p.name, "success").Inc()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
havePre := make(map[string]bool)
|
||||
haveStable := make(map[string]bool)
|
||||
for _, rel := range rels {
|
||||
major, _, _ := strings.Cut(rel.Tag, ".")
|
||||
if !rel.Prerelease && !haveStable[major] {
|
||||
// Remember the first non-pre for each major
|
||||
filtered = append(filtered, rel)
|
||||
haveStable[major] = true
|
||||
continue
|
||||
}
|
||||
if rel.Prerelease && !havePre[major] && !haveStable[major] {
|
||||
// We remember the first prerelease we find, unless we've
|
||||
// already found a non-pre of the same major.
|
||||
filtered = append(filtered, rel)
|
||||
havePre[major] = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
var userAgentOSArchExp = regexp.MustCompile(`^syncthing.*\(.+ (\w+)-(\w+)\)$`)
|
||||
|
||||
func filterForCompabitility(rels []upgrade.Release, ua, osv string) []upgrade.Release {
|
||||
osArch := userAgentOSArchExp.FindStringSubmatch(ua)
|
||||
if len(osArch) != 3 {
|
||||
metricFilterCalls.WithLabelValues("bad-os-arch").Inc()
|
||||
return rels
|
||||
}
|
||||
os := osArch[1]
|
||||
|
||||
var filtered []upgrade.Release
|
||||
for _, rel := range rels {
|
||||
if rel.Compatibility == nil {
|
||||
// No requirements means it's compatible with everything.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
req, ok := rel.Compatibility.Requirements[os]
|
||||
if !ok {
|
||||
// No entry for the current OS means it's compatible.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(osv, req) >= 0 {
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(filtered) != len(rels) {
|
||||
metricFilterCalls.WithLabelValues("filtered").Inc()
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("unchanged").Inc()
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
type cachedReleases struct {
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
latestRel, latestPre string
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Releases() []upgrade.Release {
|
||||
c.mut.RLock()
|
||||
defer c.mut.RUnlock()
|
||||
return c.current
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Update(ctx context.Context) error {
|
||||
rels, err := fetchGithubReleases(ctx, c.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latestRel, latestPre := "", ""
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease && latestRel == "" {
|
||||
latestRel = rel.Tag
|
||||
}
|
||||
if rel.Prerelease && latestPre == "" {
|
||||
latestPre = rel.Tag
|
||||
}
|
||||
if latestRel != "" && latestPre != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mut.Lock()
|
||||
c.current = rels
|
||||
if latestRel != c.latestRel || latestPre != c.latestPre {
|
||||
metricLatestReleaseInfo.DeleteLabelValues(c.latestRel, c.latestPre)
|
||||
metricLatestReleaseInfo.WithLabelValues(latestRel, latestPre).Set(1)
|
||||
c.latestRel = latestRel
|
||||
c.latestPre = latestPre
|
||||
}
|
||||
c.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGithubReleases(ctx context.Context, url string) ([]upgrade.Release, error) {
|
||||
req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var rels []upgrade.Release
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rels); err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "success").Inc()
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
addReleaseCompatibility(ctx, rels)
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
return rels, nil
|
||||
}
|
||||
|
||||
func addReleaseCompatibility(ctx context.Context, rels []upgrade.Release) {
|
||||
for i := range rels {
|
||||
rel := &rels[i]
|
||||
for i, asset := range rel.Assets {
|
||||
if asset.Name != "compat.json" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Load compat.json into the Compatibility field
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, asset.URL, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
_ = json.NewDecoder(io.LimitReader(resp.Body, 10<<10)).Decode(&rel.Compatibility)
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "success").Inc()
|
||||
resp.Body.Close()
|
||||
|
||||
// Remove compat.json from the asset list since it's been processed
|
||||
rel.Assets = append(rel.Assets[:i], rel.Assets[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricUpgradeChecks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "metadata_requests",
|
||||
})
|
||||
metricFilterCalls = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "filter_calls",
|
||||
}, []string{"result"})
|
||||
metricHTTPRequests = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "http_requests",
|
||||
}, []string{"target", "result"})
|
||||
metricLatestReleaseInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "latest_release_info",
|
||||
Help: "Release information",
|
||||
}, []string{"latest_release", "latest_pre"})
|
||||
)
|
||||
@@ -1,46 +0,0 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "incoming_reports_total",
|
||||
}, []string{"result"})
|
||||
metricsCollectsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collects_total",
|
||||
})
|
||||
metricsCollectSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_total",
|
||||
})
|
||||
metricsCollectSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_last",
|
||||
})
|
||||
metricsWriteSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "write_seconds_last",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("replace")
|
||||
metricReportsTotal.WithLabelValues("accept")
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
const namePrefix = "syncthing_usage_"
|
||||
|
||||
type metricsSet struct {
|
||||
srv *server
|
||||
|
||||
gauges map[string]prometheus.Gauge
|
||||
gaugeVecs map[string]*prometheus.GaugeVec
|
||||
gaugeVecLabels map[string][]string
|
||||
summaries map[string]*metricSummary
|
||||
|
||||
collectMut sync.Mutex
|
||||
collectCutoff time.Duration
|
||||
}
|
||||
|
||||
func newMetricsSet(srv *server) *metricsSet {
|
||||
s := &metricsSet{
|
||||
srv: srv,
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
gaugeVecs: make(map[string]*prometheus.GaugeVec),
|
||||
gaugeVecLabels: make(map[string][]string),
|
||||
summaries: make(map[string]*metricSummary),
|
||||
collectCutoff: -24 * time.Hour,
|
||||
}
|
||||
|
||||
var initForType func(reflect.Type)
|
||||
initForType = func(t reflect.Type) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
initForType(field.Type)
|
||||
continue
|
||||
}
|
||||
name, typ, label := fieldNameTypeLabel(field)
|
||||
sname, labels := nameConstLabels(name)
|
||||
switch typ {
|
||||
case "gauge":
|
||||
s.gauges[name] = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: namePrefix + sname,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
case "summary":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, nil, labels)
|
||||
case "gaugeVec":
|
||||
s.gaugeVecLabels[name] = append(s.gaugeVecLabels[name], label)
|
||||
case "summaryVec":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, []string{label}, labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
initForType(reflect.ValueOf(contract.Report{}).Type())
|
||||
|
||||
for name, labels := range s.gaugeVecLabels {
|
||||
s.gaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: namePrefix + name,
|
||||
}, labels)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func fieldNameTypeLabel(rf reflect.StructField) (string, string, string) {
|
||||
metric := rf.Tag.Get("metric")
|
||||
name, typ, ok := strings.Cut(metric, ",")
|
||||
if !ok {
|
||||
return "", "", ""
|
||||
}
|
||||
gv, label, ok := strings.Cut(typ, ":")
|
||||
if ok {
|
||||
typ = gv
|
||||
}
|
||||
return name, typ, label
|
||||
}
|
||||
|
||||
func nameConstLabels(name string) (string, prometheus.Labels) {
|
||||
if name == "-" {
|
||||
return "", nil
|
||||
}
|
||||
name, labels, ok := strings.Cut(name, "{")
|
||||
if !ok {
|
||||
return name, nil
|
||||
}
|
||||
lls := strings.Split(labels[:len(labels)-1], ",")
|
||||
m := make(map[string]string)
|
||||
for _, l := range lls {
|
||||
k, v, _ := strings.Cut(l, "=")
|
||||
m[k] = v
|
||||
}
|
||||
return name, m
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReport(r *contract.Report) {
|
||||
gaugeVecs := make(map[string][]string)
|
||||
s.addReportStruct(reflect.ValueOf(r).Elem(), gaugeVecs)
|
||||
for name, lv := range gaugeVecs {
|
||||
s.gaugeVecs[name].WithLabelValues(lv...).Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReportStruct(v reflect.Value, gaugeVecs map[string][]string) {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Kind() == reflect.Struct {
|
||||
s.addReportStruct(field, gaugeVecs)
|
||||
continue
|
||||
}
|
||||
|
||||
name, typ, label := fieldNameTypeLabel(t.Field(i))
|
||||
switch typ {
|
||||
case "gauge":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.gauges[name].Add(float64(v))
|
||||
case string:
|
||||
s.gaugeVecs[name].WithLabelValues(v).Add(1)
|
||||
case bool:
|
||||
if v {
|
||||
s.gauges[name].Add(1)
|
||||
}
|
||||
}
|
||||
case "gaugeVec":
|
||||
var labelValue string
|
||||
switch v := field.Interface().(type) {
|
||||
case string:
|
||||
labelValue = v
|
||||
case int:
|
||||
labelValue = strconv.Itoa(v)
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
labelValue = k
|
||||
field.SetInt(int64(v))
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := gaugeVecs[name]; !ok {
|
||||
gaugeVecs[name] = make([]string, len(s.gaugeVecLabels[name]))
|
||||
}
|
||||
for i, l := range s.gaugeVecLabels[name] {
|
||||
if l == label {
|
||||
gaugeVecs[name][i] = labelValue
|
||||
break
|
||||
}
|
||||
}
|
||||
case "summary", "summaryVec":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
case float64:
|
||||
s.summaries[name].Observe("", v)
|
||||
case []int:
|
||||
for _, v := range v {
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
}
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
if k == "" {
|
||||
// avoid empty string labels as those are the sign
|
||||
// of a non-vec summary
|
||||
k = "unknown"
|
||||
}
|
||||
s.summaries[name].Observe(k, float64(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Describe(c chan<- *prometheus.Desc) {
|
||||
for _, g := range s.gauges {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Describe(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Collect(c chan<- prometheus.Metric) {
|
||||
s.collectMut.Lock()
|
||||
defer s.collectMut.Unlock()
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
dur := time.Since(t0).Seconds()
|
||||
metricsCollectSecondsLast.Set(dur)
|
||||
metricsCollectSecondsTotal.Add(dur)
|
||||
metricsCollectsTotal.Inc()
|
||||
}()
|
||||
|
||||
for _, g := range s.gauges {
|
||||
g.Set(0)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Reset()
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Reset()
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(s.collectCutoff)
|
||||
s.srv.reports.Range(func(key string, r *contract.Report) bool {
|
||||
if s.collectCutoff < 0 && r.Received.Before(cutoff) {
|
||||
s.srv.reports.Delete(key)
|
||||
return true
|
||||
}
|
||||
s.addReport(r)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, g := range s.gauges {
|
||||
c <- g
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Collect(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Collect(c)
|
||||
}
|
||||
}
|
||||
|
||||
type metricSummary struct {
|
||||
name string
|
||||
values map[string][]float64
|
||||
zeroes map[string]int
|
||||
|
||||
qDesc *prometheus.Desc
|
||||
countDesc *prometheus.Desc
|
||||
sumDesc *prometheus.Desc
|
||||
zDesc *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMetricSummary(name string, labels []string, constLabels prometheus.Labels) *metricSummary {
|
||||
return &metricSummary{
|
||||
name: name,
|
||||
values: make(map[string][]float64),
|
||||
zeroes: make(map[string]int),
|
||||
qDesc: prometheus.NewDesc(name, "", append(labels, "quantile"), constLabels),
|
||||
countDesc: prometheus.NewDesc(name+"_nonzero_count", "", labels, constLabels),
|
||||
sumDesc: prometheus.NewDesc(name+"_sum", "", labels, constLabels),
|
||||
zDesc: prometheus.NewDesc(name+"_zero_count", "", labels, constLabels),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Observe(labelValue string, v float64) {
|
||||
if v == 0 {
|
||||
q.zeroes[labelValue]++
|
||||
return
|
||||
}
|
||||
q.values[labelValue] = append(q.values[labelValue], v)
|
||||
}
|
||||
|
||||
func (q *metricSummary) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- q.qDesc
|
||||
c <- q.countDesc
|
||||
c <- q.sumDesc
|
||||
c <- q.zDesc
|
||||
}
|
||||
|
||||
func (q *metricSummary) Collect(c chan<- prometheus.Metric) {
|
||||
for lv, vs := range q.values {
|
||||
var labelVals []string
|
||||
if lv != "" {
|
||||
labelVals = []string{lv}
|
||||
}
|
||||
|
||||
c <- prometheus.MustNewConstMetric(q.countDesc, prometheus.GaugeValue, float64(len(vs)), labelVals...)
|
||||
c <- prometheus.MustNewConstMetric(q.zDesc, prometheus.GaugeValue, float64(q.zeroes[lv]), labelVals...)
|
||||
|
||||
var sum float64
|
||||
for _, v := range vs {
|
||||
sum += v
|
||||
}
|
||||
c <- prometheus.MustNewConstMetric(q.sumDesc, prometheus.GaugeValue, sum, labelVals...)
|
||||
|
||||
if len(vs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(vs)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[0], append(labelVals, "0")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*5/100], append(labelVals, "0.05")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)/2], append(labelVals, "0.5")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*9/10], append(labelVals, "0.9")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*95/100], append(labelVals, "0.95")...)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)-1], append(labelVals, "1")...)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Reset() {
|
||||
clear(q.values)
|
||||
clear(q.zeroes)
|
||||
}
|
||||
@@ -1,422 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Listen string `env:"UR_LISTEN" help:"Usage reporting & metrics endpoint listen address" default:"0.0.0.0:8080"`
|
||||
ListenInternal string `env:"UR_LISTEN_INTERNAL" help:"Internal metrics endpoint listen address" default:"0.0.0.0:8082"`
|
||||
GeoIPLicenseKey string `env:"UR_GEOIP_LICENSE_KEY"`
|
||||
GeoIPAccountID int `env:"UR_GEOIP_ACCOUNT_ID"`
|
||||
DumpFile string `env:"UR_DUMP_FILE" default:"reports.jsons.gz"`
|
||||
DumpInterval time.Duration `env:"UR_DUMP_INTERVAL" default:"5m"`
|
||||
|
||||
S3Endpoint string `name:"s3-endpoint" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" env:"UR_S3_SECRET_KEY"`
|
||||
|
||||
AzureBlobAccount string `name:"azure-blob-account" env:"UR_AZUREBLOB_ACCOUNT"`
|
||||
AzureBlobKey string `name:"azure-blob-key" env:"UR_AZUREBLOB_KEY"`
|
||||
AzureBlobContainer string `name:"azure-blob-container" env:"UR_AZUREBLOB_CONTAINER"`
|
||||
}
|
||||
|
||||
var (
|
||||
compilerRe = regexp.MustCompile(`\(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\w@.-]+)`)
|
||||
knownDistributions = []distributionMatch{
|
||||
// Maps well known builders to the official distribution method that
|
||||
// they represent
|
||||
|
||||
{regexp.MustCompile(`\steamcity@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sjenkins@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sbuilder@github\.syncthing\.net`), "GitHub"},
|
||||
|
||||
{regexp.MustCompile(`\sdeb@build\.syncthing\.net`), "APT"},
|
||||
{regexp.MustCompile(`\sdebian@github\.syncthing\.net`), "APT"},
|
||||
|
||||
{regexp.MustCompile(`\sdocker@syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@build.syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@github.syncthing\.net`), "Docker Hub"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-builder@github\.syncthing\.net`), "Google Play"},
|
||||
{regexp.MustCompile(`\sandroid-.*teamcity@build\.syncthing\.net`), "Google Play"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-.*vagrant@basebox-stretch64`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bullseye`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bookworm`), "F-Droid"},
|
||||
|
||||
{regexp.MustCompile(`Anwender@NET2017`), "Syncthing-Fork (3rd party)"},
|
||||
|
||||
{regexp.MustCompile(`\sbuilduser@(archlinux|svetlemodry)`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`\ssyncthing@archlinux`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`@openSUSE`), "openSUSE (3rd party)"},
|
||||
{regexp.MustCompile(`\sbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile(`\sroot@buildkitsandbox`), "LinuxServer.io (3rd party)"},
|
||||
{regexp.MustCompile(`\sports@freebsd`), "FreeBSD (3rd party)"},
|
||||
{regexp.MustCompile(`\snix@nix`), "Nix (3rd party)"},
|
||||
{regexp.MustCompile(`.`), "Others"},
|
||||
}
|
||||
)
|
||||
|
||||
type distributionMatch struct {
|
||||
matcher *regexp.Regexp
|
||||
distribution string
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
slog.Info("Starting", "version", build.Version)
|
||||
|
||||
// Listening
|
||||
|
||||
urListener, err := net.Listen("tcp", cli.Listen)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (usage reports)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (usage reports)", "address", urListener.Addr())
|
||||
|
||||
internalListener, err := net.Listen("tcp", cli.ListenInternal)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (internal)", "error", err)
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (internal)", "address", internalListener.Addr())
|
||||
|
||||
var geo *geoip.Provider
|
||||
if cli.GeoIPAccountID != 0 && cli.GeoIPLicenseKey != "" {
|
||||
geo, err = geoip.NewGeoLite2CityProvider(context.Background(), cli.GeoIPAccountID, cli.GeoIPLicenseKey, os.TempDir())
|
||||
if err != nil {
|
||||
slog.Error("Failed to load GeoIP", "error", err)
|
||||
return err
|
||||
}
|
||||
go geo.Serve(context.TODO())
|
||||
}
|
||||
|
||||
// Blob storage
|
||||
|
||||
var blobs blob.Store
|
||||
if cli.S3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 session", "error", err)
|
||||
return err
|
||||
}
|
||||
} else if cli.AzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.AzureBlobAccount, cli.AzureBlobKey, cli.AzureBlobContainer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create Azure blob store", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && blobs != nil {
|
||||
if err := cli.downloadDumpFile(blobs); err != nil {
|
||||
slog.Error("Failed to download dump file", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// server
|
||||
|
||||
srv := &server{
|
||||
geo: geo,
|
||||
reports: xsync.NewMapOf[string, *contract.Report](),
|
||||
}
|
||||
|
||||
if fd, err := os.Open(cli.DumpFile); err == nil {
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err == nil {
|
||||
srv.load(gr)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.Tick(cli.DumpInterval) {
|
||||
if err := cli.saveDumpFile(srv, blobs); err != nil {
|
||||
slog.Error("Failed to write dump file", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// The internal metrics endpoint just serves metrics about what the
|
||||
// server is doing.
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
internalSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
}
|
||||
go internalSrv.Serve(internalListener)
|
||||
|
||||
// New external metrics endpoint accepts reports from clients and serves
|
||||
// aggregated usage reporting metrics.
|
||||
|
||||
ms := newMetricsSet(srv)
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||
mux.HandleFunc("/newdata", srv.handleNewData)
|
||||
mux.HandleFunc("/ping", srv.handlePing)
|
||||
|
||||
metricsSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
slog.Info("Ready to serve")
|
||||
return metricsSrv.Serve(urListener)
|
||||
}
|
||||
|
||||
func (cli *CLI) downloadDumpFile(blobs blob.Store) error {
|
||||
latestKey, err := blobs.LatestKey(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("list latest S3 key: %w", err)
|
||||
}
|
||||
fd, err := os.Create(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Download(context.Background(), latestKey, fd); err != nil {
|
||||
_ = fd.Close()
|
||||
return fmt.Errorf("download dump file: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("close dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file downloaded", "key", latestKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *CLI) saveDumpFile(srv *server, blobs blob.Store) error {
|
||||
fd, err := os.Create(cli.DumpFile + ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dump file: %w", err)
|
||||
}
|
||||
gw := gzip.NewWriter(fd)
|
||||
if err := srv.save(gw); err != nil {
|
||||
return fmt.Errorf("saving dump file: %w", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
fd.Close()
|
||||
return fmt.Errorf("closing gzip writer: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("closing dump file: %w", err)
|
||||
}
|
||||
if err := os.Rename(cli.DumpFile+".tmp", cli.DumpFile); err != nil {
|
||||
return fmt.Errorf("renaming dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file saved")
|
||||
|
||||
if blobs != nil {
|
||||
key := fmt.Sprintf("reports-%s.jsons.gz", time.Now().UTC().Format("2006-01-02"))
|
||||
fd, err := os.Open(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Upload(context.Background(), key, fd); err != nil {
|
||||
return fmt.Errorf("uploading dump file: %w", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
slog.Info("Dump file uploaded")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
geo *geoip.Provider
|
||||
reports *xsync.MapOf[string, *contract.Report]
|
||||
}
|
||||
|
||||
func (s *server) handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *server) handleNewData(w http.ResponseWriter, r *http.Request) {
|
||||
result := "fail"
|
||||
defer func() {
|
||||
// result is "accept" (new report), "replace" (existing report) or
|
||||
// "fail"
|
||||
metricReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Header.Get("X-Forwarded-For")
|
||||
if addr != "" {
|
||||
addr = strings.Split(addr, ", ")[0]
|
||||
} else {
|
||||
addr = r.RemoteAddr
|
||||
}
|
||||
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
log := slog.With("addr", addr)
|
||||
|
||||
if net.ParseIP(addr) == nil {
|
||||
addr = ""
|
||||
}
|
||||
|
||||
var rep contract.Report
|
||||
|
||||
lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
|
||||
bs, _ := io.ReadAll(lr)
|
||||
if err := json.Unmarshal(bs, &rep); err != nil {
|
||||
log.Error("Failed to decode JSON", "error", err)
|
||||
http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rep.Received = time.Now()
|
||||
rep.Date = rep.Received.UTC().Format("20060102")
|
||||
rep.Address = addr
|
||||
|
||||
if err := rep.Validate(); err != nil {
|
||||
log.Error("Failed to validate report", "error", err)
|
||||
http.Error(w, "Validation Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if s.addReport(&rep) {
|
||||
result = "replace"
|
||||
} else {
|
||||
result = "accept"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) addReport(rep *contract.Report) bool {
|
||||
if s.geo != nil {
|
||||
if ip := net.ParseIP(rep.Address); ip != nil {
|
||||
if city, err := s.geo.City(ip); err == nil {
|
||||
rep.Country = city.Country.Names["en"]
|
||||
rep.CountryCode = city.Country.IsoCode
|
||||
}
|
||||
}
|
||||
}
|
||||
if rep.Country == "" {
|
||||
rep.Country = "Unknown"
|
||||
}
|
||||
if rep.CountryCode == "" {
|
||||
rep.CountryCode = "ZZ"
|
||||
}
|
||||
|
||||
rep.Version = transformVersion(rep.Version)
|
||||
if strings.Contains(rep.Version, ".") {
|
||||
split := strings.SplitN(rep.Version, ".", 3)
|
||||
if len(split) == 3 {
|
||||
rep.MajorVersion = strings.Join(split[:2], ".")
|
||||
}
|
||||
}
|
||||
rep.OS, rep.Arch, _ = strings.Cut(rep.Platform, "-")
|
||||
|
||||
if m := compilerRe.FindStringSubmatch(rep.LongVersion); len(m) == 3 {
|
||||
rep.Compiler = m[1]
|
||||
rep.Builder = m[2]
|
||||
}
|
||||
for _, d := range knownDistributions {
|
||||
if d.matcher.MatchString(rep.LongVersion) {
|
||||
rep.Distribution = d.distribution
|
||||
break
|
||||
}
|
||||
}
|
||||
rep.DistDist = rep.Distribution
|
||||
rep.DistOS = rep.OS
|
||||
rep.DistArch = rep.Arch
|
||||
|
||||
_, loaded := s.reports.LoadAndStore(rep.UniqueID, rep)
|
||||
return loaded
|
||||
}
|
||||
|
||||
func (s *server) save(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
enc := json.NewEncoder(bw)
|
||||
var err error
|
||||
s.reports.Range(func(k string, v *contract.Report) bool {
|
||||
err = enc.Encode(v)
|
||||
return err == nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (s *server) load(r io.Reader) {
|
||||
dec := json.NewDecoder(r)
|
||||
s.reports.Clear()
|
||||
for {
|
||||
var rep contract.Report
|
||||
if err := dec.Decode(&rep); errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
slog.Error("Failed to load record", "error", err)
|
||||
break
|
||||
}
|
||||
s.addReport(&rep)
|
||||
}
|
||||
slog.Info("Loaded reports", "count", s.reports.Size())
|
||||
}
|
||||
|
||||
var (
|
||||
plusRe = regexp.MustCompile(`(\+.*|[.-]dev\..*)$`)
|
||||
plusStr = "-dev"
|
||||
)
|
||||
|
||||
// transformVersion returns a version number formatted correctly, with all
|
||||
// development versions aggregated into one.
|
||||
func transformVersion(v string) string {
|
||||
if v == "unknown-dev" {
|
||||
return v
|
||||
}
|
||||
if !strings.HasPrefix(v, "v") {
|
||||
v = "v" + v
|
||||
}
|
||||
v = plusRe.ReplaceAllString(v, plusStr)
|
||||
|
||||
return v
|
||||
}
|
||||
@@ -7,7 +7,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -8,7 +8,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -178,8 +177,8 @@ func (d *diskStore) inventory() error {
|
||||
})
|
||||
return nil
|
||||
})
|
||||
slices.SortFunc(d.currentFiles, func(a, b currentFile) int {
|
||||
return cmp.Compare(a.mtime, b.mtime)
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
@@ -14,7 +14,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -22,29 +21,25 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
type cli struct {
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
MetricsListen string `help:"HTTP listen address for metrics" default:":8081" env:"METRICS_LISTEN_ADDRESS"`
|
||||
IngorePatterns string `help:"File containing ignore patterns (regexp)" env:"IGNORE_PATTERNS" type:"existingfile"`
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -67,38 +62,19 @@ func main() {
|
||||
}
|
||||
go ss.Serve(context.Background())
|
||||
|
||||
var ip *ignorePatterns
|
||||
if params.IngorePatterns != "" {
|
||||
var err error
|
||||
ip, err = loadIgnorePatterns(params.IngorePatterns)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ignore patterns: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cr := &crashReceiver{
|
||||
store: ds,
|
||||
sentry: ss,
|
||||
ignore: ip,
|
||||
}
|
||||
|
||||
mux.Handle("/", cr)
|
||||
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
|
||||
if params.MetricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
if err := http.ListenAndServe(params.MetricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
if params.DSN != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"), ip))
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports")))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
@@ -107,7 +83,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http.ResponseWriter, req *http.Request) {
|
||||
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
result := "failure"
|
||||
defer func() {
|
||||
@@ -122,11 +98,6 @@ func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http
|
||||
return
|
||||
}
|
||||
|
||||
if ignore.match(bs) {
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
var reports []ur.FailureReport
|
||||
err = json.Unmarshal(bs, &reports)
|
||||
if err != nil {
|
||||
@@ -139,7 +110,7 @@ func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http
|
||||
return
|
||||
}
|
||||
|
||||
version, err := build.ParseVersion(reports[0].Version)
|
||||
version, err := parseVersion(reports[0].Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
@@ -187,42 +158,3 @@ func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string,
|
||||
}
|
||||
return reportServer + path, nil
|
||||
}
|
||||
|
||||
type ignorePatterns struct {
|
||||
patterns []*regexp.Regexp
|
||||
}
|
||||
|
||||
func loadIgnorePatterns(path string) (*ignorePatterns, error) {
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var patterns []*regexp.Regexp
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
re, err := regexp.Compile(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patterns = append(patterns, re)
|
||||
}
|
||||
|
||||
log.Printf("Loaded %d ignore patterns", len(patterns))
|
||||
return &ignorePatterns{patterns: patterns}, nil
|
||||
}
|
||||
|
||||
func (i *ignorePatterns) match(report []byte) bool {
|
||||
if i == nil {
|
||||
return false
|
||||
}
|
||||
for _, re := range i.patterns {
|
||||
if re.Match(report) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/v2/stack"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
@@ -106,7 +105,7 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
return nil, errors.New("no first line")
|
||||
}
|
||||
|
||||
version, err := build.ParseVersion(string(parts[0]))
|
||||
version, err := parseVersion(string(parts[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -144,12 +143,12 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.Commit != "" {
|
||||
if version.commit != "" {
|
||||
// We have a commit hash, so we know exactly which source to use
|
||||
loader.LockWithVersion(version.Commit)
|
||||
} else if strings.HasPrefix(version.Tag, "v") {
|
||||
loader.LockWithVersion(version.commit)
|
||||
} else if strings.HasPrefix(version.tag, "v") {
|
||||
// Lets hope the tag is close enough
|
||||
loader.LockWithVersion(version.Tag)
|
||||
loader.LockWithVersion(version.tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("main")
|
||||
@@ -216,26 +215,106 @@ func crashReportFingerprint(message string) []string {
|
||||
return []string{"{{ default }}", message}
|
||||
}
|
||||
|
||||
func packet(version build.VersionParts, reportType string) *raven.Packet {
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
// or, somewhere along the way the "+" in the version tag disappeared:
|
||||
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
|
||||
var (
|
||||
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
|
||||
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
|
||||
)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
if v.commit != "" {
|
||||
return "Development"
|
||||
}
|
||||
if strings.Contains(v.tag, "-rc.") {
|
||||
return "Candidate"
|
||||
}
|
||||
if strings.Contains(v.tag, "-") {
|
||||
return "Beta"
|
||||
}
|
||||
return "Stable"
|
||||
}
|
||||
|
||||
func parseVersion(line string) (version, error) {
|
||||
m := longVersionRE.FindStringSubmatch(line)
|
||||
if len(m) == 0 {
|
||||
return version{}, errors.New("unintelligeble version string")
|
||||
}
|
||||
|
||||
v := version{
|
||||
version: m[1],
|
||||
codename: m[2],
|
||||
runtime: m[3],
|
||||
goos: m[4],
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
// Split the version tag into tag and commit. This is old style
|
||||
// v1.2.3-something.4+11-g12345678 or newer with just dots
|
||||
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
|
||||
parts := []string{v.version}
|
||||
if strings.Contains(v.version, "+") {
|
||||
parts = strings.Split(v.version, "+")
|
||||
} else {
|
||||
idxs := gitExtraRE.FindStringIndex(v.version)
|
||||
if len(idxs) > 0 {
|
||||
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
|
||||
}
|
||||
}
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
fields := gitExtraSepRE.Split(parts[1], -1)
|
||||
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
|
||||
v.commit = fields[1][1:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.Tag,
|
||||
Environment: version.Environment(),
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.Version},
|
||||
raven.Tag{Key: "tag", Value: version.Tag},
|
||||
raven.Tag{Key: "codename", Value: version.Codename},
|
||||
raven.Tag{Key: "runtime", Value: version.Runtime},
|
||||
raven.Tag{Key: "goos", Value: version.GOOS},
|
||||
raven.Tag{Key: "goarch", Value: version.GOARCH},
|
||||
raven.Tag{Key: "builder", Value: version.Builder},
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.Commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.Commit})
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
}
|
||||
for _, tag := range version.Extra {
|
||||
for _, tag := range version.extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
return pkt
|
||||
@@ -12,6 +12,66 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
longVersion string
|
||||
parsed version
|
||||
}{
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
|
||||
parsed: version{
|
||||
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
|
||||
tag: "v1.23.7-dev",
|
||||
commit: "df7b56ae",
|
||||
codename: "Fermium Flea",
|
||||
runtime: "go1.20.5",
|
||||
goos: "darwin",
|
||||
goarch: "arm64",
|
||||
builder: "jb@ok.kastelo.net",
|
||||
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := os.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
@@ -71,6 +71,7 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
@@ -12,16 +12,11 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type crashReceiver struct {
|
||||
store *diskStore
|
||||
sentry *sentryService
|
||||
ignore *ignorePatterns
|
||||
|
||||
ignoredMut sync.RWMutex
|
||||
ignored map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -69,12 +64,6 @@ func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
return // found
|
||||
}
|
||||
if !r.store.Exists(reportID) {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
@@ -87,15 +76,6 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
metricCrashReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
result = "ignored_cached"
|
||||
io.Copy(io.Discard, req.Body)
|
||||
return // found
|
||||
}
|
||||
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
@@ -106,17 +86,6 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
if r.ignore.match(bs) {
|
||||
r.ignoredMut.Lock()
|
||||
if r.ignored == nil {
|
||||
r.ignored = make(map[string]struct{})
|
||||
}
|
||||
r.ignored[reportID] = struct{}{}
|
||||
r.ignoredMut.Unlock()
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
result = "success"
|
||||
|
||||
// Store the report
|
||||
@@ -9,13 +9,14 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
@@ -52,5 +53,5 @@ func compressAndWrite(bs []byte, fullPath string) error {
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0o644)
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
}
|
||||
@@ -15,9 +15,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discoproto"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
@@ -78,21 +75,20 @@ func recv(bc beacon.Interface) {
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discoproto.Announce
|
||||
proto.Unmarshal(data[4:], &ann)
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
|
||||
id, _ := protocol.DeviceIDFromBytes(ann.Id)
|
||||
if id == myID {
|
||||
if ann.ID == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := id.String() + src.String()
|
||||
key := ann.ID.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", id, strings.Join(ann.Addresses, ", "))
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
@@ -100,11 +96,11 @@ func recv(bc beacon.Interface) {
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := &discoproto.Announce{
|
||||
Id: myID[:],
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := proto.Marshal(ann)
|
||||
bs, _ := ann.Marshal()
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
@@ -1,261 +0,0 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type amqpReplicator struct {
|
||||
suture.Service
|
||||
broker string
|
||||
sender *amqpSender
|
||||
receiver *amqpReceiver
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
svc := suture.New("amqpReplicator", suture.Spec{PassThroughPanics: true})
|
||||
|
||||
sender := &amqpSender{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
svc.Add(sender)
|
||||
|
||||
receiver := &amqpReceiver{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
db: db,
|
||||
}
|
||||
svc.Add(receiver)
|
||||
|
||||
return &amqpReplicator{
|
||||
Service: svc,
|
||||
broker: broker,
|
||||
sender: sender,
|
||||
receiver: receiver,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReplicator) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
s.sender.send(key, ps, seen)
|
||||
}
|
||||
|
||||
type amqpSender struct {
|
||||
broker string
|
||||
clientID string
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func (s *amqpSender) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case rec := <-s.outbox:
|
||||
size := proto.Size(rec)
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
n, err := protoutil.MarshalTo(buf, rec)
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication marshal: %w", err)
|
||||
}
|
||||
|
||||
err = ch.PublishWithContext(ctx,
|
||||
"discovery", // exchange
|
||||
"", // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
ContentType: "application/protobuf",
|
||||
Body: buf[:n],
|
||||
AppId: s.clientID,
|
||||
})
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication publish: %w", err)
|
||||
}
|
||||
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpSender) String() string {
|
||||
return fmt.Sprintf("amqpSender(%q)", s.broker)
|
||||
}
|
||||
|
||||
func (s *amqpSender) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
item := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: ps,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
type amqpReceiver struct {
|
||||
broker string
|
||||
clientID string
|
||||
db database
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
msgs, err := amqpConsume(ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-msgs:
|
||||
if !ok {
|
||||
return fmt.Errorf("subscription closed: %w", io.EOF)
|
||||
}
|
||||
|
||||
// ignore messages from ourself
|
||||
if msg.AppId == s.clientID {
|
||||
continue
|
||||
}
|
||||
|
||||
var rec discosrv.ReplicationRecord
|
||||
if err := proto.Unmarshal(msg.Body, &rec); err != nil {
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication unmarshal: %w", err)
|
||||
}
|
||||
id, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
id, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Replication device ID:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.db.merge(&id, rec.Addresses, rec.Seen); err != nil {
|
||||
return fmt.Errorf("replication database merge: %w", err)
|
||||
}
|
||||
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) String() string {
|
||||
return fmt.Sprintf("amqpReceiver(%q)", s.broker)
|
||||
}
|
||||
|
||||
func amqpChannel(dst string) (*amqp.Connection, *amqp.Channel, error) {
|
||||
conn, err := amqp.Dial(dst)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP dial: %w", err)
|
||||
}
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP channel: %w", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
"discovery", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP declare exchange: %w", err)
|
||||
}
|
||||
|
||||
return conn, ch, nil
|
||||
}
|
||||
|
||||
func amqpConsume(ch *amqp.Channel) (<-chan amqp.Delivery, error) {
|
||||
q, err := ch.QueueDeclare(
|
||||
"", // name
|
||||
false, // durable
|
||||
false, // delete when unused
|
||||
true, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP declare queue: %w", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue name
|
||||
"", // routing key
|
||||
"discovery", // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP bind queue: %w", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
true, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // args
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP consume: %w", err)
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
@@ -22,13 +22,12 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stringutil"
|
||||
)
|
||||
@@ -40,20 +39,15 @@ type announcement struct {
|
||||
}
|
||||
|
||||
type apiSrv struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
compression bool
|
||||
gzipWriters sync.Pool
|
||||
seenTracker *retryAfterTracker
|
||||
notSeenTracker *retryAfterTracker
|
||||
}
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
|
||||
type replicator interface {
|
||||
send(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64)
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
@@ -66,30 +60,18 @@ type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool, desiredNotFoundRate float64) *apiSrv {
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
compression: compression,
|
||||
seenTracker: &retryAfterTracker{
|
||||
name: "seenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMinSeconds,
|
||||
},
|
||||
notSeenTracker: &retryAfterTracker{
|
||||
name: "notSeenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMaxSeconds / 2,
|
||||
},
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
@@ -120,15 +102,8 @@ func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
ErrorLog: log.New(io.Discard, "", 0),
|
||||
}
|
||||
if !debug {
|
||||
srv.ErrorLog = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
srv.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
@@ -198,7 +173,7 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param:", err)
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -206,7 +181,8 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
rec, err := s.db.get(&deviceID)
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
@@ -216,14 +192,28 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
var afterS int
|
||||
if rec.Seen == 0 {
|
||||
afterS = s.notSeenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_ever").Inc()
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses + 1
|
||||
} else {
|
||||
afterS = s.seenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_recent").Inc()
|
||||
misses++
|
||||
}
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses%notFoundMissesWriteInterval == 0 {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
afterS := notFoundRetryAfterSeconds(int(misses))
|
||||
retryAfterHistogram.Observe(float64(afterS))
|
||||
w.Header().Set("Retry-After", strconv.Itoa(afterS))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
@@ -235,16 +225,10 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
var bw io.Writer = w
|
||||
|
||||
// Use compression if the client asks for it
|
||||
if s.compression && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gw, ok := s.gzipWriters.Get().(*gzip.Writer)
|
||||
if ok {
|
||||
gw.Reset(w)
|
||||
} else {
|
||||
gw = gzip.NewWriter(w)
|
||||
}
|
||||
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(bw)
|
||||
defer gw.Close()
|
||||
defer s.gzipWriters.Put(gw)
|
||||
bw = gw
|
||||
}
|
||||
|
||||
@@ -283,9 +267,6 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
if debug {
|
||||
log.Println(reqID, "no addresses")
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
@@ -293,9 +274,6 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handle:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
@@ -306,9 +284,6 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
if debug {
|
||||
log.Println(reqID, "announced", deviceID, addresses)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
@@ -316,31 +291,29 @@ func (s *apiSrv) Stop() {
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
slices.Sort(addresses)
|
||||
addresses = slices.Compact(addresses)
|
||||
|
||||
dbAddrs := make([]*discosrv.DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i] = &discosrv.DatabaseAddress{
|
||||
Address: addresses[i],
|
||||
Expires: expire,
|
||||
}
|
||||
}
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(&deviceID, dbAddrs, seen)
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(&deviceID, dbAddrs, seen)
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
@@ -386,7 +359,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if cert := req.Header.Get("X-Forwarded-Tls-Client-Cert"); cert != "" {
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
//
|
||||
// The certificate is in PEM format, maybe with URL encoding
|
||||
@@ -394,36 +367,19 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
// statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
|
||||
if strings.Contains(cert, "%") {
|
||||
if unesc, err := url.QueryUnescape(cert); err == nil {
|
||||
cert = unesc
|
||||
if strings.Contains(hdr, "%") {
|
||||
if unesc, err := url.QueryUnescape(hdr); err == nil {
|
||||
hdr = unesc
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
header = "-----BEGIN CERTIFICATE-----"
|
||||
footer = "-----END CERTIFICATE-----"
|
||||
)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(header) + 1 + len(cert) + len(cert)/64 + 1 + len(footer) + 1)
|
||||
|
||||
b.WriteString(header)
|
||||
b.WriteByte('\n')
|
||||
|
||||
for i := 0; i < len(cert); i += 64 {
|
||||
end := i + 64
|
||||
if end > len(cert) {
|
||||
end = len(cert)
|
||||
}
|
||||
b.WriteString(cert[i:end])
|
||||
b.WriteByte('\n')
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
}
|
||||
|
||||
b.WriteString(footer)
|
||||
b.WriteByte('\n')
|
||||
|
||||
bs = b.Bytes()
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr += "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
@@ -488,6 +444,7 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
// remote is nil, unable to determine host IP
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If zero port was specified, use remote port.
|
||||
@@ -525,7 +482,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []*discosrv.DatabaseAddress) []string {
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
@@ -537,44 +494,15 @@ func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterSeconds(misses int) int {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return retryAfterS
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
|
||||
type retryAfterTracker struct {
|
||||
name string
|
||||
desiredRate float64 // requests per second
|
||||
|
||||
mut sync.Mutex
|
||||
lastCount int // requests in the last bucket
|
||||
curCount int // requests in the current bucket
|
||||
bucketStarts time.Time // start of the current bucket
|
||||
currentDelay int // current delay in seconds
|
||||
}
|
||||
|
||||
func (t *retryAfterTracker) retryAfterS() int {
|
||||
now := time.Now()
|
||||
t.mut.Lock()
|
||||
if durS := now.Sub(t.bucketStarts).Seconds(); durS > float64(t.currentDelay) {
|
||||
t.bucketStarts = now
|
||||
t.lastCount = t.curCount
|
||||
lastRate := float64(t.lastCount) / durS
|
||||
|
||||
switch {
|
||||
case t.currentDelay > notFoundRetryUnknownMinSeconds &&
|
||||
lastRate < 0.75*t.desiredRate:
|
||||
t.currentDelay = max(8*t.currentDelay/10, notFoundRetryUnknownMinSeconds)
|
||||
case t.currentDelay < notFoundRetryUnknownMaxSeconds &&
|
||||
lastRate > 1.25*t.desiredRate:
|
||||
t.currentDelay = min(3*t.currentDelay/2, notFoundRetryUnknownMaxSeconds)
|
||||
}
|
||||
|
||||
t.curCount = 0
|
||||
}
|
||||
if t.curCount == 0 {
|
||||
retryAfterLevel.WithLabelValues(t.name).Set(float64(t.currentDelay))
|
||||
}
|
||||
t.curCount++
|
||||
t.mut.Unlock()
|
||||
return t.currentDelay + rand.Intn(t.currentDelay/4)
|
||||
}
|
||||
|
||||
@@ -7,20 +7,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
@@ -105,79 +94,3 @@ func addr(host string, port int) *net.TCPAddr {
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAPIRequests(b *testing.B) {
|
||||
db := newInMemoryStore(b.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go db.Serve(ctx)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true, 1000)
|
||||
srv := httptest.NewServer(http.HandlerFunc(api.handler))
|
||||
|
||||
kf := b.TempDir() + "/cert"
|
||||
crt, err := tlsutil.NewCertificate(kf+".crt", kf+".key", "localhost", 7)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs, err := os.ReadFile(kf + ".crt")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs = regexp.MustCompile(`---[^\n]+---\n`).ReplaceAll(certBs, nil)
|
||||
certString := string(strings.ReplaceAll(string(certBs), "\n", " "))
|
||||
|
||||
devID := protocol.NewDeviceID(crt.Certificate[0])
|
||||
devIDString := devID.String()
|
||||
|
||||
b.Run("Announce", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(`{"addresses":["tcp://10.10.10.10:42000"]}`))
|
||||
req.Header.Set("X-Forwarded-Tls-Client-Cert", certString)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Lookup", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LookupNoCompression", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req.Header.Set("Accept-Encoding", "identity") // disable compression
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,31 +4,23 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -42,401 +34,380 @@ func (defaultClock) Now() time.Time {
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error
|
||||
merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error
|
||||
get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error)
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type inMemoryStore struct {
|
||||
m *xsync.MapOf[protocol.DeviceID, *discosrv.DatabaseRecord]
|
||||
dir string
|
||||
flushInterval time.Duration
|
||||
blobs blob.Store
|
||||
objKey string
|
||||
clock clock
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
}
|
||||
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, blobs blob.Store) *inMemoryStore {
|
||||
hn, err := os.Hostname()
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
if err != nil {
|
||||
hn = rand.String(8)
|
||||
}
|
||||
s := &inMemoryStore{
|
||||
m: xsync.NewMapOf[protocol.DeviceID, *discosrv.DatabaseRecord](),
|
||||
dir: dir,
|
||||
flushInterval: flushInterval,
|
||||
blobs: blobs,
|
||||
objKey: hn + ".db",
|
||||
clock: defaultClock{},
|
||||
}
|
||||
nr, err := s.read()
|
||||
if os.IsNotExist(err) && blobs != nil {
|
||||
// Try to read from blob storage
|
||||
latestKey, cerr := blobs.LatestKey(context.Background())
|
||||
if cerr != nil {
|
||||
log.Println("Error finding database from blob storage:", cerr)
|
||||
return s
|
||||
}
|
||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||
if cerr != nil {
|
||||
log.Println("Error creating database file:", cerr)
|
||||
return s
|
||||
}
|
||||
if cerr := blobs.Download(context.Background(), latestKey, fd); cerr != nil {
|
||||
log.Printf("Error downloading database from blob storage: %v", cerr)
|
||||
}
|
||||
_ = fd.Close()
|
||||
nr, err = s.read()
|
||||
return nil, err
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMemoryLevelDBStore() (*levelDBStore, error) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
log.Println("Error reading database:", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("Read %d records from database", nr)
|
||||
s.expireAndCalculateStatistics()
|
||||
return s
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error {
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
s.m.Store(*key, rec)
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
return nil
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error {
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
newRec := &discosrv.DatabaseRecord{
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
if oldRec, ok := s.m.Load(*key); ok {
|
||||
newRec = merge(oldRec, newRec)
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
s.m.Store(*key, newRec)
|
||||
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error) {
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rec, ok := s.m.Load(*key)
|
||||
if !ok {
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return &discosrv.DatabaseRecord{}, nil
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now())
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) Serve(ctx context.Context) error {
|
||||
if s.flushInterval <= 0 {
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
t := time.NewTimer(s.flushInterval)
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
log.Println("Calculating statistics")
|
||||
s.expireAndCalculateStatistics()
|
||||
log.Println("Flushing database")
|
||||
if err := s.write(); err != nil {
|
||||
log.Println("Error writing database:", err)
|
||||
}
|
||||
log.Println("Finished flushing database")
|
||||
t.Reset(s.flushInterval)
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return s.write()
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) expireAndCalculateStatistics() {
|
||||
now := s.clock.Now()
|
||||
cutoff24h := now.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, currentIPv6GUA, last24h, last1w := 0, 0, 0, 0, 0, 0
|
||||
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, rec *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
addresses := expire(rec.Addresses, now)
|
||||
if len(addresses) == 0 {
|
||||
rec.Addresses = nil
|
||||
s.m.Store(key, rec)
|
||||
} else if len(addresses) != len(rec.Addresses) {
|
||||
rec.Addresses = addresses
|
||||
s.m.Store(key, rec)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(rec.Addresses) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6, seenIPv6GUA := false, false, false
|
||||
for _, addr := range rec.Addresses {
|
||||
// We do fast and loose matching on strings here instead of
|
||||
// parsing the address and the IP and doing "proper" checks,
|
||||
// to keep things fast and generate less garbage.
|
||||
if strings.Contains(addr.Address, "[") {
|
||||
seenIPv6 = true
|
||||
if strings.Contains(addr.Address, "[2") {
|
||||
seenIPv6GUA = true
|
||||
}
|
||||
} else {
|
||||
seenIPv4 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 && seenIPv6GUA {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
if seenIPv6GUA {
|
||||
currentIPv6GUA++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
default:
|
||||
// drop the record if it's older than a week
|
||||
s.m.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("currentIPv6GUA").Set(float64(currentIPv6GUA))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseStatisticsSeconds.Set(time.Since(now).Seconds())
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) write() (err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
databaseWriteSeconds.Set(time.Since(t0).Seconds())
|
||||
databaseLastWritten.Set(float64(t0.Unix()))
|
||||
}
|
||||
}()
|
||||
|
||||
dbf := path.Join(s.dir, "records.db")
|
||||
fd, err := os.Create(dbf + ".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bw := bufio.NewWriter(fd)
|
||||
|
||||
var buf []byte
|
||||
var rangeErr error
|
||||
now := s.clock.Now()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, value *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
if value.Seen < cutoff1w {
|
||||
// drop the record if it's older than a week
|
||||
return true
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: value.Addresses,
|
||||
Seen: value.Seen,
|
||||
}
|
||||
s := proto.Size(rec)
|
||||
if s+4 > len(buf) {
|
||||
buf = make([]byte, s+4)
|
||||
}
|
||||
n, err := protoutil.MarshalTo(buf[4:], rec)
|
||||
if err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
if _, err := bw.Write(buf[:n+4]); err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if rangeErr != nil {
|
||||
_ = fd.Close()
|
||||
return rangeErr
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
_ = fd.Close
|
||||
return err
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(dbf+".tmp", dbf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload to blob storage
|
||||
if s.blobs != nil {
|
||||
fd, err = os.Open(dbf)
|
||||
if err != nil {
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := s.blobs.Upload(context.Background(), s.objKey, fd); err != nil {
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
}
|
||||
log.Println("Finished uploading database")
|
||||
}
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) read() (int, error) {
|
||||
fd, err := os.Open(path.Join(s.dir, "records.db"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.Close()
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
|
||||
br := bufio.NewReader(fd)
|
||||
var buf []byte
|
||||
nr := 0
|
||||
for {
|
||||
var n uint32
|
||||
if err := binary.Read(br, binary.BigEndian, &n); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
addrs := expire(rec.Addresses, nowNanos)
|
||||
switch {
|
||||
case len(addrs) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6 := false, false
|
||||
for _, addr := range addrs {
|
||||
uri, err := url.Parse(addr.Address)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
host, _, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil && ip.To4() != nil {
|
||||
seenIPv4 = true
|
||||
} else if ip != nil {
|
||||
seenIPv6 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
}
|
||||
return nr, err
|
||||
}
|
||||
if int(n) > len(buf) {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
if _, err := io.ReadFull(br, buf[:n]); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{}
|
||||
if err := proto.Unmarshal(buf[:n], rec); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
key, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
key, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Bad device ID:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
slices.SortFunc(rec.Addresses, Cmp)
|
||||
rec.Addresses = slices.CompactFunc(rec.Addresses, Equal)
|
||||
s.m.Store(key, &discosrv.DatabaseRecord{
|
||||
Addresses: expire(rec.Addresses, s.clock.Now()),
|
||||
Seen: rec.Seen,
|
||||
})
|
||||
nr++
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
}
|
||||
return nr, nil
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates. The address list in a is overwritten and
|
||||
// reused for the result.
|
||||
func merge(a, b *discosrv.DatabaseRecord) *discosrv.DatabaseRecord {
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
a.Seen = max(a.Seen, b.Seen)
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
for aIdx < len(a.Addresses) && bIdx < len(b.Addresses) {
|
||||
switch cmp.Compare(a.Addresses[aIdx].Address, b.Addresses[bIdx].Address) {
|
||||
case 0:
|
||||
// a == b, choose the newer expiry time
|
||||
a.Addresses[aIdx].Expires = max(a.Addresses[aIdx].Expires, b.Addresses[bIdx].Expires)
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
aIdx++
|
||||
bIdx++
|
||||
case -1:
|
||||
// a < b, keep a and move on
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
aIdx++
|
||||
case 1:
|
||||
// a > b, insert b before a
|
||||
a.Addresses = append(a.Addresses[:aIdx], append([]*discosrv.DatabaseAddress{b.Addresses[bIdx]}, a.Addresses[aIdx:]...)...)
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
if bIdx < len(b.Addresses) {
|
||||
a.Addresses = append(a.Addresses, b.Addresses[bIdx:]...)
|
||||
}
|
||||
|
||||
return a
|
||||
return res
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is preserved.
|
||||
func expire(addrs []*discosrv.DatabaseAddress, now time.Time) []*discosrv.DatabaseAddress {
|
||||
cutoff := now.UnixNano()
|
||||
naddrs := addrs[:0]
|
||||
for i := range addrs {
|
||||
if i > 0 && addrs[i].Address == addrs[i-1].Address {
|
||||
// Skip duplicates
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
addrs = sliceutil.RemoveAndZero(addrs, i)
|
||||
continue
|
||||
}
|
||||
if addrs[i].Expires >= cutoff {
|
||||
naddrs = append(naddrs, addrs[i])
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(naddrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return naddrs
|
||||
return addrs
|
||||
}
|
||||
|
||||
func Cmp(d, other *discosrv.DatabaseAddress) (n int) {
|
||||
if c := cmp.Compare(d.Address, other.Address); c != 0 {
|
||||
return c
|
||||
}
|
||||
return cmp.Compare(d.Expires, other.Expires)
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
}
|
||||
|
||||
func Equal(d, other *discosrv.DatabaseAddress) bool {
|
||||
return d.Address == other.Address
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
847
cmd/stdiscosrv/database.pb.go
Normal file
847
cmd/stdiscosrv/database.pb.go
Normal file
@@ -0,0 +1,847 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
36
cmd/stdiscosrv/database.proto
Normal file
36
cmd/stdiscosrv/database.proto
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
@@ -11,26 +11,29 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
db := newInMemoryStore(t.TempDir(), 0, nil)
|
||||
db, err := newMemoryLevelDBStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get(&protocol.EmptyDeviceID)
|
||||
rec, err := db.get("abcd")
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
@@ -40,16 +43,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []*discosrv.DatabaseAddress{
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put(&protocol.EmptyDeviceID, rec); err != nil {
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -66,16 +69,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []*discosrv.DatabaseAddress{
|
||||
addrs := []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge(&protocol.EmptyDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -98,7 +101,7 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -111,18 +114,40 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
// Put a record with misses
|
||||
|
||||
addrs = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge(&protocol.GlobalDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get(&protocol.GlobalDeviceID)
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -130,126 +155,48 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []*discosrv.DatabaseAddress
|
||||
b []*discosrv.DatabaseAddress
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []*discosrv.DatabaseAddress{},
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, time.Unix(0, 10))
|
||||
res := expire(tc.a, 10)
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
cases := []struct {
|
||||
a, b, res []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{nil, nil, nil},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "d", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}, {Address: "d", Expires: 10}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
rec := merge(&discosrv.DatabaseRecord{Addresses: tc.a}, &discosrv.DatabaseRecord{Addresses: tc.b})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
rec = merge(&discosrv.DatabaseRecord{Addresses: tc.b}, &discosrv.DatabaseRecord{Addresses: tc.a})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMergeEqual(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ar := []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}}
|
||||
br := []*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 10}}
|
||||
res := merge(&discosrv.DatabaseRecord{Addresses: ar}, &discosrv.DatabaseRecord{Addresses: br})
|
||||
if len(res.Addresses) != 2 {
|
||||
b.Fatal("wrong length")
|
||||
}
|
||||
if res.Addresses[0].Address != "a" || res.Addresses[1].Address != "b" {
|
||||
b.Fatal("wrong address")
|
||||
}
|
||||
if res.Addresses[0].Expires != 15 || res.Addresses[1].Expires != 15 {
|
||||
b.Fatal("wrong expiry")
|
||||
}
|
||||
}
|
||||
b.ReportAllocs() // should be zero per operation
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
@@ -9,26 +9,22 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,12 +38,17 @@ const (
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is notFoundRetrySeenSeconds for records we have
|
||||
// seen an announcement for (but it's not active right now) and
|
||||
// notFoundRetryUnknownSeconds for records we have never seen (or not
|
||||
// seen within the last week).
|
||||
notFoundRetryUnknownMinSeconds = 60
|
||||
notFoundRetryUnknownMaxSeconds = 3600
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
@@ -57,123 +58,164 @@ const (
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
var debug = false
|
||||
|
||||
type CLI struct {
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
DesiredNotFoundRate float64 `group:"Listen" help:"Desired maximum rate of not-found replies (/s)" default:"1000"`
|
||||
|
||||
DBDir string `group:"Database" help:"Database directory" default:"." env:"DISCOVERY_DB_DIR"`
|
||||
DBFlushInterval time.Duration `group:"Database" help:"Interval between database flushes" default:"5m" env:"DISCOVERY_DB_FLUSH_INTERVAL"`
|
||||
|
||||
DBS3Endpoint string `name:"db-s3-endpoint" group:"Database (S3 backup)" hidden:"true" help:"S3 endpoint for database" env:"DISCOVERY_DB_S3_ENDPOINT"`
|
||||
DBS3Region string `name:"db-s3-region" group:"Database (S3 backup)" hidden:"true" help:"S3 region for database" env:"DISCOVERY_DB_S3_REGION"`
|
||||
DBS3Bucket string `name:"db-s3-bucket" group:"Database (S3 backup)" hidden:"true" help:"S3 bucket for database" env:"DISCOVERY_DB_S3_BUCKET"`
|
||||
DBS3AccessKeyID string `name:"db-s3-access-key-id" group:"Database (S3 backup)" hidden:"true" help:"S3 access key ID for database" env:"DISCOVERY_DB_S3_ACCESS_KEY_ID"`
|
||||
DBS3SecretKey string `name:"db-s3-secret-key" group:"Database (S3 backup)" hidden:"true" help:"S3 secret key for database" env:"DISCOVERY_DB_S3_SECRET_KEY"`
|
||||
|
||||
DBAzureBlobAccount string `name:"db-azure-blob-account" env:"DISCOVERY_DB_AZUREBLOB_ACCOUNT"`
|
||||
DBAzureBlobKey string `name:"db-azure-blob-key" env:"DISCOVERY_DB_AZUREBLOB_KEY"`
|
||||
DBAzureBlobContainer string `name:"db-azure-blob-container" env:"DISCOVERY_DB_AZUREBLOB_CONTAINER"`
|
||||
|
||||
AMQPAddress string `group:"AMQP replication" hidden:"true" help:"Address to AMQP broker" env:"DISCOVERY_AMQP_ADDRESS"`
|
||||
|
||||
Debug bool `short:"d" help:"Print debug output" env:"DISCOVERY_DEBUG"`
|
||||
Version bool `short:"v" help:"Print version and exit"`
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
var debug = false
|
||||
|
||||
var cli CLI
|
||||
kong.Parse(&cli)
|
||||
debug = cli.Debug
|
||||
func main() {
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var replCertFile string
|
||||
var replKeyFile string
|
||||
var useHTTP bool
|
||||
var largeDB bool
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
|
||||
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
|
||||
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if cli.Version {
|
||||
if *showVersion {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo.WithLabelValues(build.Version, runtime.Version(), build.User, build.Date.UTC().Format("2006-01-02T15:04:05Z")).Set(1)
|
||||
|
||||
var cert tls.Certificate
|
||||
if !cli.HTTP {
|
||||
var err error
|
||||
cert, err = tls.LoadX509KeyPair(cli.Cert, cli.Key)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(cli.Cert, cli.Key, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
if largeDB {
|
||||
levelDBOptions.BlockCacheCapacity = 64 << 20
|
||||
levelDBOptions.BlockSize = 64 << 10
|
||||
levelDBOptions.CompactionTableSize = 16 << 20
|
||||
levelDBOptions.CompactionTableSizeMultiplier = 2.0
|
||||
levelDBOptions.WriteBuffer = 64 << 20
|
||||
levelDBOptions.CompactionL0Trigger = 8
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
replCert := cert
|
||||
if replCertFile != "" && replKeyFile != "" {
|
||||
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load replication keypair:", err)
|
||||
}
|
||||
}
|
||||
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
|
||||
log.Println("Replication device ID is", replDevID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
Timeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
// If configured, use blob storage for database backups.
|
||||
var blobs blob.Store
|
||||
var err error
|
||||
if cli.DBS3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
} else if cli.DBAzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.DBAzureBlobAccount, cli.DBAzureBlobKey, cli.DBAzureBlobContainer)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create blob store: %v", err)
|
||||
}
|
||||
|
||||
// Start the database.
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, blobs)
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("Open database:", err)
|
||||
}
|
||||
main.Add(db)
|
||||
|
||||
// If we have an AMQP broker for replication, start that
|
||||
var repl replicator
|
||||
if cli.AMQPAddress != "" {
|
||||
clientID := rand.String(10)
|
||||
kr := newAMQPReplicator(cli.AMQPAddress, clientID, db)
|
||||
main.Add(kr)
|
||||
repl = kr
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, replCert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
}
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression, cli.DesiredNotFoundRate)
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if cli.MetricsListen != "" {
|
||||
if metricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(cli.MetricsListen, mux))
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Cancel on signal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
go func() {
|
||||
sig := <-signalChan
|
||||
log.Printf("Received signal %s; shutting down", sig)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Engage!
|
||||
main.Serve(ctx)
|
||||
main.Serve(context.Background())
|
||||
}
|
||||
|
||||
324
cmd/stdiscosrv/replication.go
Normal file
324
cmd/stdiscosrv/replication.go
Normal file
@@ -0,0 +1,324 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
replicationReadTimeout = time.Minute
|
||||
replicationWriteTimeout = 30 * time.Second
|
||||
replicationHeartbeatInterval = time.Second * 30
|
||||
)
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// The replication stream is not especially latency sensitive, but it is
|
||||
// quite a lot of data in small writes. Make it more efficient.
|
||||
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
|
||||
_ = tcpc.SetNoDelay(false)
|
||||
}
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -7,7 +7,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -96,28 +99,13 @@ var (
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
|
||||
databaseWriteSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_write_seconds",
|
||||
Help: "Time spent writing the database.",
|
||||
})
|
||||
databaseLastWritten = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_last_written",
|
||||
Help: "Timestamp of the last successful database write.",
|
||||
})
|
||||
|
||||
retryAfterLevel = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
}, []string{"name"})
|
||||
retryAfterHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
Buckets: prometheus.ExponentialBuckets(60, 2, 7), // 60, 120, 240, 480, 960, 1920, 3840
|
||||
})
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -138,6 +126,16 @@ func init() {
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds,
|
||||
databaseWriteSeconds, databaseLastWritten,
|
||||
retryAfterLevel)
|
||||
retryAfterHistogram)
|
||||
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to set up a private or a public relay, please
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
@@ -21,3 +21,4 @@ See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../../script/genassets.go -o gui.files.go ../gui
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -259,7 +259,7 @@
|
||||
return a.value > b.value ? 1 : -1;
|
||||
}
|
||||
|
||||
$http.get("/endpoint/full").then(function(response) {
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
@@ -338,7 +338,7 @@
|
||||
relay.showMarker = function() {
|
||||
relay.marker.openPopup();
|
||||
}
|
||||
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.closePopup();
|
||||
}
|
||||
@@ -347,7 +347,7 @@
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
console.log(relay.location.latitude)
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
{
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
|
||||
color: "FF0000",
|
||||
@@ -21,13 +21,13 @@ import (
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
@@ -51,10 +51,6 @@ type relay struct {
|
||||
StatsRetrieved time.Time `json:"statsRetrieved"`
|
||||
}
|
||||
|
||||
type relayShort struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
UptimeSeconds int `json:"uptimeSeconds"`
|
||||
@@ -99,19 +95,16 @@ var (
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
metricsListen = ":8081"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute
|
||||
requestQueueLen = 64
|
||||
requestProcessors = 8
|
||||
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
|
||||
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
|
||||
maxRelaysReturned = 100
|
||||
|
||||
requests chan request
|
||||
|
||||
@@ -131,46 +124,40 @@ func main() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
|
||||
flag.IntVar(&maxRelaysReturned, "max-relays-returned", maxRelaysReturned, "Maximum number of relays returned for a normal endpoint query")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
requests = make(chan request, requestQueueLen)
|
||||
geoip, err := geoip.NewGeoLite2CityProvider(context.Background(), geoipAccountID, geoipLicenseKey, os.TempDir())
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create GeoIP provider:", err)
|
||||
}
|
||||
go geoip.Serve(context.TODO())
|
||||
|
||||
var listener net.Listener
|
||||
var err error
|
||||
|
||||
if permRelaysFile != "" {
|
||||
permanentRelays = loadRelays(permRelaysFile, geoip)
|
||||
permanentRelays = loadRelays(permRelaysFile)
|
||||
}
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
for i := 0; i < requestProcessors; i++ {
|
||||
go requestProcessor(geoip)
|
||||
go requestProcessor()
|
||||
}
|
||||
|
||||
// Load relays from cache in the background.
|
||||
// Load them in a serial fashion to make sure any genuine requests
|
||||
// are not dropped.
|
||||
go func() {
|
||||
for _, relay := range loadRelays(knownRelaysFile, geoip) {
|
||||
for _, relay := range loadRelays(knownRelaysFile) {
|
||||
resultChan := make(chan result)
|
||||
requests <- request{relay, resultChan, nil}
|
||||
result := <-resultChan
|
||||
@@ -226,40 +213,15 @@ func main() {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
if metricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.HandleFunc("/metrics", handleMetrics)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
getMux := http.NewServeMux()
|
||||
getMux.HandleFunc("/", handleAssets)
|
||||
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
|
||||
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
|
||||
|
||||
postMux := http.NewServeMux()
|
||||
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead, http.MethodOptions:
|
||||
getMux.ServeHTTP(w, r)
|
||||
case http.MethodPost:
|
||||
postMux.ServeHTTP(w, r)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second))
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
@@ -293,24 +255,39 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
next(w, r)
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// handleEndpointFull returns the relay list with full metadata and
|
||||
// statistics. Large, and expensive.
|
||||
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
|
||||
func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
||||
@@ -318,42 +295,17 @@ func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
|
||||
copy(relays[n:], knownRelays)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
// handleEndpointShort returns the relay list with only the URL.
|
||||
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
|
||||
for _, r := range append(permanentRelays, knownRelays...) {
|
||||
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
||||
}
|
||||
mut.RUnlock()
|
||||
if len(relays) > maxRelaysReturned {
|
||||
rand.Shuffle(relays)
|
||||
relays = relays[:maxRelaysReturned]
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handleRegister(w http.ResponseWriter, r *http.Request) {
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
rhost = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
@@ -473,19 +425,19 @@ func handleRegister(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func requestProcessor(geoip *geoip.Provider) {
|
||||
func requestProcessor() {
|
||||
for request := range requests {
|
||||
if request.queueTimer != nil {
|
||||
request.queueTimer.ObserveDuration()
|
||||
}
|
||||
|
||||
timer := prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("test"))
|
||||
handleRelayTest(request, geoip)
|
||||
handleRelayTest(request)
|
||||
timer.ObserveDuration()
|
||||
}
|
||||
}
|
||||
|
||||
func handleRelayTest(request request, geoip *geoip.Provider) {
|
||||
func handleRelayTest(request request) {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
@@ -498,7 +450,7 @@ func handleRelayTest(request request, geoip *geoip.Provider) {
|
||||
}
|
||||
|
||||
stats := fetchStats(request.relay)
|
||||
location := getLocation(request.relay.uri.Host, geoip)
|
||||
location := getLocation(request.relay.uri.Host)
|
||||
|
||||
mut.Lock()
|
||||
if stats != nil {
|
||||
@@ -571,7 +523,7 @@ func evict(relay *relay) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func loadRelays(file string, geoip *geoip.Provider) []*relay {
|
||||
func loadRelays(file string) []*relay {
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Println("Failed to load relays: " + err.Error())
|
||||
@@ -595,7 +547,7 @@ func loadRelays(file string, geoip *geoip.Provider) []*relay {
|
||||
|
||||
relays = append(relays, &relay{
|
||||
URL: line,
|
||||
Location: getLocation(uri.Host, geoip),
|
||||
Location: getLocation(uri.Host),
|
||||
uri: uri,
|
||||
})
|
||||
if debug {
|
||||
@@ -628,16 +580,21 @@ func createTestCertificate() tls.Certificate {
|
||||
return cert
|
||||
}
|
||||
|
||||
func getLocation(host string, geoip *geoip.Provider) location {
|
||||
func getLocation(host string) location {
|
||||
timer := prometheus.NewTimer(locationLookupSeconds)
|
||||
defer timer.ObserveDuration()
|
||||
db, err := geoip2.Open(geoipPath)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
city, err := geoip.City(addr.IP)
|
||||
city, err := db.City(addr.IP)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
@@ -703,16 +660,3 @@ func (b *errorTracker) IsBlocked(host string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func slimURL(u string) string {
|
||||
p, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return u
|
||||
}
|
||||
newQuery := url.Values{}
|
||||
if id := p.Query().Get("id"); id != "" {
|
||||
newQuery.Set("id", id)
|
||||
}
|
||||
p.RawQuery = newQuery.Encode()
|
||||
return p.String()
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestHandleGetRequest(t *testing.T) {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = new(bytes.Buffer)
|
||||
handleEndpointFull(w, httptest.NewRequest("GET", "/", nil))
|
||||
handleGetRequest(w, httptest.NewRequest("GET", "/", nil))
|
||||
|
||||
result := make(map[string][]*relay)
|
||||
err := json.NewDecoder(w.Body).Decode(&result)
|
||||
@@ -92,18 +92,3 @@ func TestCanonicalizeQueryValues(t *testing.T) {
|
||||
t.Errorf("expected %q, got %q", exp, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlimURL(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"http://example.com/", "http://example.com/"},
|
||||
{"relay://192.0.2.42:22067/?globalLimitBps=0&id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M&networkTimeout=2m0s&pingInterval=1m0s&providedBy=Test&sessionLimitBps=0&statusAddr=%3A22070", "relay://192.0.2.42:22067/?id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if got := slimURL(c.in); got != c.out {
|
||||
t.Errorf("expected %q, got %q", c.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,12 +6,27 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
statusClient = http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
@@ -12,8 +12,9 @@ import (
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -184,7 +185,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimitBps, globalLimiter)
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -32,6 +30,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -51,6 +50,7 @@ var (
|
||||
globalLimitBps int
|
||||
overLimit atomic.Bool
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
@@ -227,6 +227,9 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
bytesProxied atomic.Int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionLimitBps int, globalRateLimit *rate.Limiter) *session {
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
@@ -40,17 +40,12 @@ func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionLimitBps i
|
||||
return nil
|
||||
}
|
||||
|
||||
var sessionRateLimit *rate.Limiter
|
||||
if sessionLimitBps > 0 {
|
||||
sessionRateLimit = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
limiter: sessionRateLimit,
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
@@ -73,6 +68,7 @@ func findSession(key string) *session {
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
@@ -114,7 +110,6 @@ type session struct {
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int)
|
||||
limiter *rate.Limiter
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
|
||||
149
cmd/stupgrades/main.go
Normal file
149
cmd/stupgrades/main.go
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
return http.ListenAndServe(params.Listen, nil)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
rels := upgrade.FetchLatestReleases(p.url, "")
|
||||
if rels == nil {
|
||||
http.Error(w, "no releases", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = json.NewEncoder(buf).Encode(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -7,7 +7,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -15,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -11,10 +11,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -37,19 +33,19 @@ func indexDump() error {
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", &f)
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv dbproto.VersionList
|
||||
proto.Unmarshal(it.Value(), &flv)
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, &flv)
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
@@ -98,11 +94,11 @@ func indexDump() error {
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d", folder)
|
||||
var cs dbproto.CountsSet
|
||||
if err := proto.Unmarshal(it.Value(), &cs); err != nil {
|
||||
var cs db.CountsSet
|
||||
if err := cs.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", &cs)
|
||||
fmt.Printf(" V:%v\n", cs)
|
||||
}
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
@@ -129,20 +125,20 @@ func indexDump() error {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x", key[1:])
|
||||
var v bep.Vector
|
||||
err := proto.Unmarshal(it.Value(), &v)
|
||||
var v protocol.Vector
|
||||
err := v.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", &v)
|
||||
fmt.Printf(" V:%v\n", v)
|
||||
}
|
||||
|
||||
case db.KeyTypePendingFolder:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := string(key[5:])
|
||||
var of dbproto.ObservedFolder
|
||||
proto.Unmarshal(it.Value(), &of)
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, &of)
|
||||
var of db.ObservedFolder
|
||||
of.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, of)
|
||||
|
||||
case db.KeyTypePendingDevice:
|
||||
device := "<invalid>"
|
||||
@@ -150,9 +146,9 @@ func indexDump() error {
|
||||
if err == nil {
|
||||
device = dev.String()
|
||||
}
|
||||
var od dbproto.ObservedDevice
|
||||
proto.Unmarshal(it.Value(), &od)
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, &od)
|
||||
var od db.ObservedDevice
|
||||
od.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, od)
|
||||
|
||||
default:
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
|
||||
@@ -7,10 +7,9 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
@@ -78,8 +77,8 @@ func indexDumpSize() error {
|
||||
elems = append(elems, ele)
|
||||
}
|
||||
|
||||
slices.SortFunc(elems, func(a, b sizedElement) int {
|
||||
return cmp.Compare(b.size, a.size)
|
||||
sort.Slice(elems, func(i, j int) bool {
|
||||
return elems[i].size > elems[j].size
|
||||
})
|
||||
for _, ele := range elems {
|
||||
fmt.Println(ele.key, ele.size)
|
||||
|
||||
@@ -8,16 +8,11 @@ package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -47,12 +42,12 @@ func indexCheck() (err error) {
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
fileInfos := make(map[fileInfoKey]*bep.FileInfo)
|
||||
globals := make(map[globalKey]*dbproto.VersionList)
|
||||
fileInfos := make(map[fileInfoKey]protocol.FileInfo)
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]*bep.Vector)
|
||||
versions := make(map[string]protocol.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
@@ -79,26 +74,26 @@ func indexCheck() (err error) {
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
|
||||
var f bep.FileInfo
|
||||
err := proto.Unmarshal(it.Value(), &f)
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Println("Unable to unmarshal FileInfo:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfos[fileInfoKey{folder, device, name}] = &f
|
||||
fileInfos[fileInfoKey{folder, device, name}] = f
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv dbproto.VersionList
|
||||
if err := proto.Unmarshal(it.Value(), &flv); err != nil {
|
||||
var flv db.VersionList
|
||||
if err := flv.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal VersionList:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
globals[globalKey{folder, name}] = &flv
|
||||
globals[globalKey{folder, name}] = flv
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
@@ -129,13 +124,13 @@ func indexCheck() (err error) {
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v bep.Vector
|
||||
if err := proto.Unmarshal(it.Value(), &v); err != nil {
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = &v
|
||||
versions[hash] = v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,11 +203,11 @@ func indexCheck() (err error) {
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
|
||||
slices.SortFunc(missingSeq, func(a, b sequenceKey) int {
|
||||
if a.folder != b.folder {
|
||||
return cmp.Compare(a.folder, b.folder)
|
||||
sort.Slice(missingSeq, func(a, b int) bool {
|
||||
if missingSeq[a].folder != missingSeq[b].folder {
|
||||
return missingSeq[a].folder < missingSeq[b].folder
|
||||
}
|
||||
return cmp.Compare(a.sequence, b.sequence)
|
||||
return missingSeq[a].sequence < missingSeq[b].sequence
|
||||
})
|
||||
|
||||
var folder uint32
|
||||
@@ -253,27 +248,25 @@ func indexCheck() (err error) {
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !protocol.VectorFromWire(fiv).Equal(version) {
|
||||
if !fiv.Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
ffi := protocol.FileInfoFromDB(fi)
|
||||
if ffi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, ffi.IsInvalid())
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if ffi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, ffi.IsDeleted())
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.Versions {
|
||||
ver := protocol.VectorFromWire(fv.Version)
|
||||
for i, fv := range vl.RawVersions {
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, ver, false, fv.Deleted)
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, ver, true, fv.Deleted)
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -283,10 +276,10 @@ func indexCheck() (err error) {
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
fv, _ := vlGetGlobal(vl)
|
||||
devB, _ := fvFirstDevice(fv)
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := protocol.FileInfoFromDB(fileInfos[fileInfoKey{gk.folder, dev, gk.name}])
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
}
|
||||
@@ -352,84 +345,11 @@ func indexCheck() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl *dbproto.VersionList) bool {
|
||||
gfv, gok := vlGetGlobal(vl)
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
gfv, gok := vl.GetGlobal()
|
||||
if !gok { // That's weird, but we hardly need something non-existent
|
||||
return false
|
||||
}
|
||||
fv, ok := vlGet(vl, protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, protocol.VectorFromWire(fv.Version))
|
||||
}
|
||||
|
||||
// Get returns a FileVersion that contains the given device and whether it has
|
||||
// been found at all.
|
||||
func vlGet(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool) {
|
||||
_, i, _, ok := vlFindDevice(vl, device)
|
||||
if !ok {
|
||||
return &dbproto.FileVersion{}, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// GetGlobal returns the current global FileVersion. The returned FileVersion
|
||||
// may be invalid, if all FileVersions are invalid. Returns false only if
|
||||
// VersionList is empty.
|
||||
func vlGetGlobal(vl *dbproto.VersionList) (*dbproto.FileVersion, bool) {
|
||||
i := vlFindGlobal(vl)
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
return vl.Versions[i], true
|
||||
}
|
||||
|
||||
// findGlobal returns the first version that isn't invalid, or if all versions are
|
||||
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
|
||||
func vlFindGlobal(vl *dbproto.VersionList) int {
|
||||
for i := range vl.Versions {
|
||||
if !fvIsInvalid(vl.Versions[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// findDevice returns whether the device is in InvalidVersions or Versions and
|
||||
// in InvalidDevices or Devices (true for invalid), the positions in the version
|
||||
// and device slices and whether it has been found at all.
|
||||
func vlFindDevice(vl *dbproto.VersionList, device []byte) (bool, int, int, bool) {
|
||||
for i, v := range vl.Versions {
|
||||
if j := deviceIndex(v.Devices, device); j != -1 {
|
||||
return false, i, j, true
|
||||
}
|
||||
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
|
||||
return true, i, j, true
|
||||
}
|
||||
}
|
||||
return false, -1, -1, false
|
||||
}
|
||||
|
||||
func deviceIndex(devices [][]byte, device []byte) int {
|
||||
for i, dev := range devices {
|
||||
if bytes.Equal(device, dev) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func fvFirstDevice(fv *dbproto.FileVersion) ([]byte, bool) {
|
||||
if len(fv.Devices) != 0 {
|
||||
return fv.Devices[0], true
|
||||
}
|
||||
if len(fv.InvalidDevices) != 0 {
|
||||
return fv.InvalidDevices[0], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func fvIsInvalid(fv *dbproto.FileVersion) bool {
|
||||
return fv == nil || len(fv.Devices) == 0
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, fv.Version)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/kballard/go-shellquote"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -67,7 +67,7 @@ func (*stdinCommand) Run() error {
|
||||
fmt.Println("Reading commands from stdin...", args)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shellquote.Split(scanner.Text())
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing input: %w", err)
|
||||
}
|
||||
|
||||
@@ -9,14 +9,15 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,9 +38,7 @@ func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
return
|
||||
}
|
||||
|
||||
slices.SortFunc(files, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, ".reported.") {
|
||||
// We've already sent this file. It'll be cleaned out at some
|
||||
|
||||
@@ -10,4 +10,6 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
)
|
||||
|
||||
@@ -17,9 +17,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
@@ -283,11 +280,10 @@ func loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi bep.FileInfo
|
||||
if err := proto.Unmarshal(trailer, &encFi); err != nil {
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi := protocol.FileInfoFromWire(&encFi)
|
||||
|
||||
return &fi, nil
|
||||
return &encFi, nil
|
||||
}
|
||||
|
||||
@@ -23,14 +23,14 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/pprof"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/gofrs/flock"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"github.com/willabides/kongplete"
|
||||
|
||||
@@ -38,10 +38,10 @@ import (
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/generate"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
@@ -92,6 +92,11 @@ above.
|
||||
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
STVERSIONEXTRA Add extra information to the version string in logs and the
|
||||
version line in the GUI. Can be set to the name of a wrapper
|
||||
or tool controlling syncthing to communicate this to the end
|
||||
@@ -349,12 +354,10 @@ func (options serveOptions) Run() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure that our config and data directories exist.
|
||||
for _, loc := range []locations.BaseDirEnum{locations.ConfigBaseDir, locations.DataBaseDir} {
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(loc), 0o700); err != nil {
|
||||
l.Warnln("Failed to ensure directory exists:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
// Ensure that our home directory exists.
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0o700); err != nil {
|
||||
l.Warnln("Failure on home directory:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
if options.UpgradeTo != "" {
|
||||
@@ -378,18 +381,15 @@ func (options serveOptions) Run() error {
|
||||
if options.Upgrade {
|
||||
release, err := checkUpgrade()
|
||||
if err == nil {
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
var ldb backend.Backend
|
||||
ldb, err = syncthing.OpenDBBackend(locations.Get(locations.Database), config.TuningAuto)
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
os.Exit(1)
|
||||
} else if locked {
|
||||
err = upgradeViaRest()
|
||||
} else {
|
||||
_ = ldb.Close()
|
||||
err = upgrade.To(release)
|
||||
}
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
@@ -443,7 +443,7 @@ func debugFacilities() string {
|
||||
maxLen = len(name)
|
||||
}
|
||||
}
|
||||
slices.Sort(names)
|
||||
sort.Strings(names)
|
||||
|
||||
// Format the choices
|
||||
b := new(bytes.Buffer)
|
||||
@@ -549,17 +549,6 @@ func syncthingMain(options serveOptions) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Ensure we are the only running instance
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
if err != nil {
|
||||
l.Warnln("Failed to acquire lock:", err)
|
||||
os.Exit(1)
|
||||
} else if !locked {
|
||||
l.Warnln("Failed to acquire lock: is another Syncthing instance already running?")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -578,7 +567,6 @@ func syncthingMain(options serveOptions) {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
earlyService.Add(cfgWrapper)
|
||||
config.RegisterInfoMetrics(cfgWrapper)
|
||||
|
||||
// Candidate builds should auto upgrade. Make sure the option is set,
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
@@ -641,21 +629,9 @@ func syncthingMain(options serveOptions) {
|
||||
DBRecheckInterval: options.DebugDBRecheckInterval,
|
||||
DBIndirectGCInterval: options.DebugDBIndirectGCInterval,
|
||||
}
|
||||
|
||||
if options.Audit || cfgWrapper.Options().AuditEnabled {
|
||||
l.Infoln("Auditing is enabled.")
|
||||
|
||||
auditFile := cfgWrapper.Options().AuditFile
|
||||
|
||||
// Ignore config option if command-line option is set
|
||||
if options.AuditFile != "" {
|
||||
l.Debugln("Using the audit file from the command-line parameter.")
|
||||
auditFile = options.AuditFile
|
||||
}
|
||||
|
||||
appOpts.AuditWriter = auditWriter(auditFile)
|
||||
if options.Audit {
|
||||
appOpts.AuditWriter = auditWriter(options.AuditFile)
|
||||
}
|
||||
|
||||
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
|
||||
appOpts.DBRecheckInterval = dur
|
||||
}
|
||||
@@ -709,10 +685,6 @@ func syncthingMain(options serveOptions) {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
// Best effort remove lockfile, doesn't matter if it succeeds
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
|
||||
os.Exit(int(status))
|
||||
}
|
||||
|
||||
@@ -863,10 +835,6 @@ func initialAutoUpgradeCheck(misc *db.NamespacedKV) (upgrade.Release, error) {
|
||||
if err != nil {
|
||||
return upgrade.Release{}, err
|
||||
}
|
||||
if upgrade.CompareVersions(release.Tag, build.Version) == upgrade.MajorNewer {
|
||||
return upgrade.Release{}, errors.New("higher major version")
|
||||
}
|
||||
|
||||
if lastVersion, ok, err := misc.String(upgradeVersionKey); err == nil && ok && lastVersion == release.Tag {
|
||||
// Only check time if we try to upgrade to the same release.
|
||||
if lastTime, ok, err := misc.Time(upgradeTimeKey); err == nil && ok && time.Since(lastTime) < upgradeRetryInterval {
|
||||
|
||||
@@ -64,7 +64,7 @@ func monitorMain(options serveOptions) {
|
||||
fileDst, err = open(logFile)
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Failed to set up logging to file, proceeding with logging to stdout only:", err)
|
||||
l.Warnln("Failed to setup logging to file, proceeding with logging to stdout only:", err)
|
||||
} else {
|
||||
if build.IsWindows {
|
||||
// Translate line breaks to Windows standard
|
||||
@@ -238,18 +238,35 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
return
|
||||
}
|
||||
|
||||
dst.Write([]byte(line))
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if panicFd == nil && (strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:")) {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
if strings.Contains(line, "SIGILL") {
|
||||
l.Warnln(`
|
||||
*******************************************************************************
|
||||
* Crash due to illegal instruction detected. This is most likely due to a CPU *
|
||||
* incompatibility with the high performance hashing package. Switching to the *
|
||||
* standard hashing package instead. Please report this issue at: *
|
||||
* *
|
||||
* https://github.com/syncthing/syncthing/issues *
|
||||
* *
|
||||
* Include the details of your CPU. *
|
||||
*******************************************************************************
|
||||
`)
|
||||
os.Setenv("STHASHING", "standard")
|
||||
return
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
*********************************************************************************
|
||||
* Crash due to corrupt database. *
|
||||
* *
|
||||
@@ -262,20 +279,21 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
*********************************************************************************
|
||||
`)
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
|
||||
panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
|
||||
}
|
||||
|
||||
@@ -140,7 +140,7 @@ func checkNotExist(t *testing.T, name string) {
|
||||
func TestAutoClosedFile(t *testing.T) {
|
||||
os.RemoveAll("_autoclose")
|
||||
defer os.RemoveAll("_autoclose")
|
||||
os.Mkdir("_autoclose", 0o755)
|
||||
os.Mkdir("_autoclose", 0755)
|
||||
file := filepath.FromSlash("_autoclose/tmp")
|
||||
data := []byte("hello, world\n")
|
||||
|
||||
|
||||
226
cmd/ursrv/aggregate/aggregate.go
Normal file
226
cmd/ursrv/aggregate/aggregate.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package aggregate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
DBConn string `env:"UR_DB_URL" default:"postgres://user:password@localhost/ur?sslmode=disable"`
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
log.SetFlags(log.Ltime | log.Ldate)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
db, err := sql.Open("postgres", cli.DBConn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
err = setupDB(db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("database: %w", err)
|
||||
}
|
||||
|
||||
for {
|
||||
runAggregation(db)
|
||||
// Sleep until one minute past next midnight
|
||||
sleepUntilNext(24*time.Hour, 1*time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func runAggregation(db *sql.DB) {
|
||||
since := maxIndexedDay(db, "VersionSummary")
|
||||
log.Println("Aggregating VersionSummary data since", since)
|
||||
rows, err := aggregateVersionSummary(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "Performance")
|
||||
log.Println("Aggregating Performance data since", since)
|
||||
rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
since = maxIndexedDay(db, "BlockStats")
|
||||
log.Println("Aggregating BlockStats data since", since)
|
||||
rows, err = aggregateBlockStats(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
}
|
||||
|
||||
func sleepUntilNext(intv, margin time.Duration) {
|
||||
now := time.Now().UTC()
|
||||
next := now.Truncate(intv).Add(intv).Add(margin)
|
||||
log.Println("Sleeping until", next)
|
||||
time.Sleep(next.Sub(now))
|
||||
}
|
||||
|
||||
func setupDB(db *sql.DB) error {
|
||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS VersionSummary (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Version VARCHAR(8) NOT NULL,
|
||||
Count INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
TotFiles INTEGER NOT NULL,
|
||||
TotMiB INTEGER NOT NULL,
|
||||
SHA256Perf DOUBLE PRECISION NOT NULL,
|
||||
MemorySize INTEGER NOT NULL,
|
||||
MemoryUsageMiB INTEGER NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Reports INTEGER NOT NULL,
|
||||
Total BIGINT NOT NULL,
|
||||
Renamed BIGINT NOT NULL,
|
||||
Reused BIGINT NOT NULL,
|
||||
Pulled BIGINT NOT NULL,
|
||||
CopyOrigin BIGINT NOT NULL,
|
||||
CopyOriginShifted BIGINT NOT NULL,
|
||||
CopyElsewhere BIGINT NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var t string
|
||||
|
||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
||||
var t time.Time
|
||||
row := db.QueryRow("SELECT MAX(DATE_TRUNC('day', Day)) FROM " + table)
|
||||
err := row.Scan(&t)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO VersionSummary (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
SUBSTRING(Report->>'version' FROM '^v\d.\d+') AS Ver,
|
||||
COUNT(*) AS Count
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day, Ver
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO Performance (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
AVG((Report->>'totFiles')::numeric) As TotFiles,
|
||||
AVG((Report->>'totMiB')::numeric) As TotMiB,
|
||||
AVG((Report->>'sha256Perf')::numeric) As SHA256Perf,
|
||||
AVG((Report->>'memorySize')::numeric) As MemorySize,
|
||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
/* Some custom implementation reported bytes when we expect megabytes, cap at petabyte */
|
||||
AND (Report->>'memorySize')::numeric < 1073741824
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
|
||||
func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
||||
// Filter out anything prior 0.14.41 as that has sum aggregations which
|
||||
// made no sense.
|
||||
res, err := db.Exec(`INSERT INTO BlockStats (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
COUNT(1) As Reports,
|
||||
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND (Report->>'urVersion')::numeric >= 3
|
||||
AND Report->>'version' like 'v_.%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.39%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.38%'
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return res.RowsAffected()
|
||||
}
|
||||
@@ -8,22 +8,22 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/aggregate"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/serve"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
Aggregate aggregate.CLI `cmd:""`
|
||||
}
|
||||
|
||||
func main() {
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
var cli CLI
|
||||
ctx := kong.Parse(&cli)
|
||||
276
cmd/ursrv/serve/analytics.go
Normal file
276
cmd/ursrv/serve/analytics.go
Normal file
@@ -0,0 +1,276 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type analytic struct {
|
||||
Key string
|
||||
Count int
|
||||
Percentage float64
|
||||
Items []analytic `json:",omitempty"`
|
||||
}
|
||||
|
||||
type analyticList []analytic
|
||||
|
||||
func (l analyticList) Less(a, b int) bool {
|
||||
if l[a].Key == "Others" {
|
||||
return false
|
||||
}
|
||||
if l[b].Key == "Others" {
|
||||
return true
|
||||
}
|
||||
return l[b].Count < l[a].Count // inverse
|
||||
}
|
||||
|
||||
func (l analyticList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l analyticList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
// Returns a list of frequency analytics for a given list of strings.
|
||||
func analyticsFor(ss []string, cutoff int) []analytic {
|
||||
m := make(map[string]int)
|
||||
t := 0
|
||||
for _, s := range ss {
|
||||
m[s]++
|
||||
t++
|
||||
}
|
||||
|
||||
l := make([]analytic, 0, len(m))
|
||||
for k, c := range m {
|
||||
l = append(l, analytic{
|
||||
Key: k,
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(l))
|
||||
|
||||
if cutoff > 0 && len(l) > cutoff {
|
||||
c := 0
|
||||
for _, i := range l[cutoff:] {
|
||||
c += i.Count
|
||||
}
|
||||
l = append(l[:cutoff], analytic{
|
||||
Key: "Others",
|
||||
Count: c,
|
||||
Percentage: 100 * float64(c) / float64(t),
|
||||
})
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Find the points at which certain penetration levels are met
|
||||
func penetrationLevels(as []analytic, points []float64) []analytic {
|
||||
sort.Slice(as, func(a, b int) bool {
|
||||
return versionLess(as[b].Key, as[a].Key)
|
||||
})
|
||||
|
||||
var res []analytic
|
||||
|
||||
idx := 0
|
||||
sum := 0.0
|
||||
for _, a := range as {
|
||||
sum += a.Percentage
|
||||
if sum >= points[idx] {
|
||||
a.Count = int(points[idx])
|
||||
a.Percentage = sum
|
||||
res = append(res, a)
|
||||
idx++
|
||||
if idx == len(points) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInts(data []int) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Ints(data)
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInt64s(data []int64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(data, func(a, b int) bool {
|
||||
return data[a] < data[b]
|
||||
})
|
||||
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForFloats(data []float64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Float64s(data)
|
||||
res[0] = data[int(float64(len(data))*0.05)]
|
||||
res[1] = data[len(data)/2]
|
||||
res[2] = data[int(float64(len(data))*0.95)]
|
||||
res[3] = data[len(data)-1]
|
||||
return res
|
||||
}
|
||||
|
||||
func group(by func(string) string, as []analytic, perGroup int, otherPct float64) []analytic {
|
||||
var res []analytic
|
||||
|
||||
next:
|
||||
for _, a := range as {
|
||||
group := by(a.Key)
|
||||
for i := range res {
|
||||
if res[i].Key == group {
|
||||
res[i].Count += a.Count
|
||||
res[i].Percentage += a.Percentage
|
||||
if len(res[i].Items) < perGroup {
|
||||
res[i].Items = append(res[i].Items, a)
|
||||
}
|
||||
continue next
|
||||
}
|
||||
}
|
||||
res = append(res, analytic{
|
||||
Key: group,
|
||||
Count: a.Count,
|
||||
Percentage: a.Percentage,
|
||||
Items: []analytic{a},
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(analyticList(res))
|
||||
|
||||
if otherPct > 0 {
|
||||
// Groups with less than otherPCt go into "Other"
|
||||
other := analytic{
|
||||
Key: "Other",
|
||||
}
|
||||
for i := 0; i < len(res); i++ {
|
||||
if res[i].Percentage < otherPct || res[i].Key == "Other" {
|
||||
other.Count += res[i].Count
|
||||
other.Percentage += res[i].Percentage
|
||||
res = append(res[:i], res[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if other.Count > 0 {
|
||||
res = append(res, other)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func byVersion(s string) string {
|
||||
parts := strings.Split(s, ".")
|
||||
if len(parts) >= 2 {
|
||||
return strings.Join(parts[:2], ".")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func byPlatform(s string) string {
|
||||
parts := strings.Split(s, "-")
|
||||
if len(parts) >= 2 {
|
||||
return parts[0]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var numericGoVersion = regexp.MustCompile(`^go[0-9]\.[0-9]+`)
|
||||
|
||||
func byCompiler(s string) string {
|
||||
if m := numericGoVersion.FindString(s); m != "" {
|
||||
return m
|
||||
}
|
||||
return "Other"
|
||||
}
|
||||
|
||||
func versionLess(a, b string) bool {
|
||||
arel, apre := versionParts(a)
|
||||
brel, bpre := versionParts(b)
|
||||
|
||||
minlen := len(arel)
|
||||
if l := len(brel); l < minlen {
|
||||
minlen = l
|
||||
}
|
||||
|
||||
for i := 0; i < minlen; i++ {
|
||||
if arel[i] != brel[i] {
|
||||
return arel[i] < brel[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Longer version is newer, when the preceding parts are equal
|
||||
if len(arel) != len(brel) {
|
||||
return len(arel) < len(brel)
|
||||
}
|
||||
|
||||
if apre != bpre {
|
||||
// "(+dev)" versions are ahead
|
||||
if apre == plusStr {
|
||||
return false
|
||||
}
|
||||
if bpre == plusStr {
|
||||
return true
|
||||
}
|
||||
return apre < bpre
|
||||
}
|
||||
|
||||
// don't actually care how the prerelease stuff compares for our purposes
|
||||
return false
|
||||
}
|
||||
|
||||
// Split a version as returned from transformVersion into parts.
|
||||
// "1.2.3-beta.2" -> []int{1, 2, 3}, "beta.2"}
|
||||
func versionParts(v string) ([]int, string) {
|
||||
parts := strings.SplitN(v[1:], " ", 2) // " (+dev)" versions
|
||||
if len(parts) == 1 {
|
||||
parts = strings.SplitN(parts[0], "-", 2) // "-rc.1" type versions
|
||||
}
|
||||
fields := strings.Split(parts[0], ".")
|
||||
|
||||
release := make([]int, len(fields))
|
||||
for i, s := range fields {
|
||||
v, _ := strconv.Atoi(s)
|
||||
release[i] = v
|
||||
}
|
||||
|
||||
var prerelease string
|
||||
if len(parts) > 1 {
|
||||
prerelease = parts[1]
|
||||
}
|
||||
|
||||
return release, prerelease
|
||||
}
|
||||
131
cmd/ursrv/serve/formatting.go
Normal file
131
cmd/ursrv/serve/formatting.go
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NumberType int
|
||||
|
||||
const (
|
||||
NumberMetric NumberType = iota
|
||||
NumberBinary
|
||||
NumberDuration
|
||||
)
|
||||
|
||||
func number(ntype NumberType, v float64) string {
|
||||
switch ntype {
|
||||
case NumberMetric:
|
||||
return metric(v)
|
||||
case NumberDuration:
|
||||
return duration(v)
|
||||
case NumberBinary:
|
||||
return binary(v)
|
||||
default:
|
||||
return metric(v)
|
||||
}
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
Suffix string
|
||||
Multiplier float64
|
||||
}
|
||||
|
||||
var metricSuffixes = []suffix{
|
||||
{"G", 1e9},
|
||||
{"M", 1e6},
|
||||
{"k", 1e3},
|
||||
}
|
||||
|
||||
var binarySuffixes = []suffix{
|
||||
{"Gi", 1 << 30},
|
||||
{"Mi", 1 << 20},
|
||||
{"Ki", 1 << 10},
|
||||
}
|
||||
|
||||
var durationSuffix = []suffix{
|
||||
{"year", 365 * 24 * 60 * 60},
|
||||
{"month", 30 * 24 * 60 * 60},
|
||||
{"day", 24 * 60 * 60},
|
||||
{"hour", 60 * 60},
|
||||
{"minute", 60},
|
||||
{"second", 1},
|
||||
}
|
||||
|
||||
func metric(v float64) string {
|
||||
return withSuffix(v, metricSuffixes, false)
|
||||
}
|
||||
|
||||
func binary(v float64) string {
|
||||
return withSuffix(v, binarySuffixes, false)
|
||||
}
|
||||
|
||||
func duration(v float64) string {
|
||||
return withSuffix(v, durationSuffix, true)
|
||||
}
|
||||
|
||||
func withSuffix(v float64, ps []suffix, pluralize bool) string {
|
||||
for _, p := range ps {
|
||||
if v >= p.Multiplier {
|
||||
suffix := p.Suffix
|
||||
if pluralize && v/p.Multiplier != 1.0 {
|
||||
suffix += "s"
|
||||
}
|
||||
// If the number only has decimal zeroes, strip em off.
|
||||
num := strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v/p.Multiplier), "0"), ".")
|
||||
return fmt.Sprintf("%s %s", num, suffix)
|
||||
}
|
||||
}
|
||||
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v), "0"), ".")
|
||||
}
|
||||
|
||||
// commatize returns a number with sep as thousands separators. Handles
|
||||
// integers and plain floats.
|
||||
func commatize(sep, s string) string {
|
||||
// If no dot, don't do anything.
|
||||
if !strings.ContainsRune(s, '.') {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
fs := strings.SplitN(s, ".", 2)
|
||||
|
||||
l := len(fs[0])
|
||||
for i := range fs[0] {
|
||||
b.Write([]byte{s[i]})
|
||||
if i < l-1 && (l-i)%3 == 1 {
|
||||
b.WriteString(sep)
|
||||
}
|
||||
}
|
||||
|
||||
if len(fs) > 1 && len(fs[1]) > 0 {
|
||||
b.WriteString(".")
|
||||
b.WriteString(fs[1])
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func proportion(m map[string]int, count int) float64 {
|
||||
total := 0
|
||||
isMax := true
|
||||
for _, n := range m {
|
||||
total += n
|
||||
if n > count {
|
||||
isMax = false
|
||||
}
|
||||
}
|
||||
pct := (100 * float64(count)) / float64(total)
|
||||
// To avoid rounding errors in the template, surpassing 100% and breaking
|
||||
// the progress bars.
|
||||
if isMax && len(m) > 1 && count != total {
|
||||
pct -= 0.01
|
||||
}
|
||||
return pct
|
||||
}
|
||||
26
cmd/ursrv/serve/metrics.go
Normal file
26
cmd/ursrv/serve/metrics.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv",
|
||||
Name: "reports_total",
|
||||
}, []string{"version"})
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("duplicate")
|
||||
metricReportsTotal.WithLabelValues("v1")
|
||||
metricReportsTotal.WithLabelValues("v2")
|
||||
metricReportsTotal.WithLabelValues("v3")
|
||||
}
|
||||
1135
cmd/ursrv/serve/serve.go
Normal file
1135
cmd/ursrv/serve/serve.go
Normal file
File diff suppressed because it is too large
Load Diff
BIN
cmd/ursrv/serve/static/assets/img/favicon.png
Normal file
BIN
cmd/ursrv/serve/static/assets/img/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.8 KiB |
7
cmd/ursrv/serve/static/bootstrap/css/bootstrap-theme.min.css
vendored
Executable file
7
cmd/ursrv/serve/static/bootstrap/css/bootstrap-theme.min.css
vendored
Executable file
File diff suppressed because one or more lines are too long
7
cmd/ursrv/serve/static/bootstrap/css/bootstrap.min.css
vendored
Normal file
7
cmd/ursrv/serve/static/bootstrap/css/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
7
cmd/ursrv/serve/static/bootstrap/js/bootstrap.min.js
vendored
Executable file
7
cmd/ursrv/serve/static/bootstrap/js/bootstrap.min.js
vendored
Executable file
File diff suppressed because one or more lines are too long
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.eot
Executable file
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.eot
Executable file
Binary file not shown.
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.svg
Executable file
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.svg
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 61 KiB |
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.ttf
Executable file
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.ttf
Executable file
Binary file not shown.
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.woff
Executable file
BIN
cmd/ursrv/serve/static/fonts/glyphicons-halflings-regular.woff
Executable file
Binary file not shown.
623
cmd/ursrv/serve/static/index.html
Normal file
623
cmd/ursrv/serve/static/index.html
Normal file
@@ -0,0 +1,623 @@
|
||||
<!DOCTYPE html>
|
||||
<!--
|
||||
Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
Use of this source code is governed by an MIT-style license that can be
|
||||
found in the LICENSE file.
|
||||
-->
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
<link rel="shortcut icon" href="static/assets/img/favicon.png">
|
||||
|
||||
<title>Syncthing Usage Reports</title>
|
||||
<link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
|
||||
<script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/heatmapjs@2.0.2/heatmap.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/leaflet-heatmap@1.0.0/leaflet-heatmap.js"></script>
|
||||
|
||||
<style type="text/css">
|
||||
body {
|
||||
margin: 40px;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
}
|
||||
tr.main td {
|
||||
font-weight: bold;
|
||||
}
|
||||
tr.child td.first {
|
||||
padding-left: 2em;
|
||||
}
|
||||
.progress-bar {
|
||||
overflow:hidden;
|
||||
white-space:nowrap;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript"
|
||||
src='https://www.google.com/jsapi?autoload={
|
||||
"modules":[{
|
||||
"name":"visualization",
|
||||
"version":"1",
|
||||
"packages":["corechart"]
|
||||
}]
|
||||
}'></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
google.setOnLoadCallback(drawVersionChart);
|
||||
google.setOnLoadCallback(drawBlockStatsChart);
|
||||
google.setOnLoadCallback(drawPerformanceCharts);
|
||||
|
||||
function drawVersionChart() {
|
||||
// Summary version chart for versions that at some point in the chart
|
||||
// reaches 250 devices. This filters out versions that are old and
|
||||
// uninteresting yet linger forever with like four users.
|
||||
var jsonData = $.ajax({url: "summary.json?min=250", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
isStacked: true,
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('versionChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function formatGibibytes(gibibytes, decimals) {
|
||||
if(gibibytes == 0) return '0 GiB';
|
||||
var k = 1024,
|
||||
dm = decimals || 2,
|
||||
sizes = ['GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
|
||||
i = Math.floor(Math.log(gibibytes) / Math.log(k));
|
||||
if (i < 0) {
|
||||
sizes = 'MiB';
|
||||
} else {
|
||||
sizes = sizes[i];
|
||||
}
|
||||
return parseFloat((gibibytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes;
|
||||
}
|
||||
|
||||
|
||||
function drawBlockStatsChart() {
|
||||
var jsonData = $.ajax({url: "blockstats.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
|
||||
var totals = [0, 0, 0, 0, 0, 0];
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
for (var j = 2; j < rows[i].length; j++) {
|
||||
totals[j-2] += rows[i][j];
|
||||
}
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var totalTotals = totals.reduce(function(a, b) { return a + b; }, 0);
|
||||
|
||||
if (totalTotals > 0) {
|
||||
var content = "<table class='table'>\n"
|
||||
for (var j = 2; j < rows[0].length; j++) {
|
||||
content += "<tr><td><b>" + rows[0][j].replace(' (GiB)', '') + "</b></td><td>" + formatGibibytes(totals[j-2].toFixed(2)) + " (" + ((100*totals[j-2])/totalTotals).toFixed(2) +"%)</td></tr>\n";
|
||||
}
|
||||
content += "</table>";
|
||||
document.getElementById("data-to-date").innerHTML = content;
|
||||
} else {
|
||||
// No data, hide it.
|
||||
document.getElementById("block-stats").outerHTML = "";
|
||||
return;
|
||||
}
|
||||
|
||||
var options = {
|
||||
focusTarget: 'category',
|
||||
vAxes: {0: {}, 1: {}},
|
||||
series: {0: {type: 'line', targetAxisIndex:1}},
|
||||
isStacked: true,
|
||||
legend: {position: 'none'},
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('blockStatsChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function drawPerformanceCharts() {
|
||||
var jsonData = $.ajax({url: "/performance.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
}
|
||||
|
||||
drawChart(rows, 1, 'Total Number of Files', 'totFilesChart', 1e6, 1);
|
||||
drawChart(rows, 2, 'Total Folder Size (GiB)', 'totMiBChart', 1e6, 1024);
|
||||
drawChart(rows, 3, 'Hash Performance (MiB/s)', 'hashPerfChart', 1000, 1);
|
||||
drawChart(rows, 4, 'System RAM Size (GiB)', 'memSizeChart', 1e6, 1024);
|
||||
drawChart(rows, 5, 'Memory Usage (MiB)', 'memUsageChart', 250, 1);
|
||||
}
|
||||
|
||||
function drawChart(rows, index, title, id, cutoff, divisor) {
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
data.addColumn('number', title);
|
||||
|
||||
var row;
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
row = [rows[i][0], rows[i][index] / divisor];
|
||||
if (row[1] > cutoff) {
|
||||
row[1] = null;
|
||||
}
|
||||
data.addRow(row);
|
||||
}
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
vAxes: {0: {minValue: 0}},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.LineChart(document.getElementById(id));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
var locations = [];
|
||||
{{range $location, $weight := .locations}}
|
||||
locations.push({lat:{{- $location.Latitude -}},lng:{{- $location.Longitude -}},count:Math.min(100, {{- $weight -}})});
|
||||
{{- end}}
|
||||
|
||||
function drawHeatMap() {
|
||||
if (locations.length == 0) {
|
||||
return;
|
||||
}
|
||||
var testData = {
|
||||
data: locations
|
||||
};
|
||||
|
||||
var baseLayer = L.tileLayer(
|
||||
'https://tile.openstreetmap.org/{z}/{x}/{y}.png',{
|
||||
attribution: '...',
|
||||
maxZoom: 18
|
||||
}
|
||||
);
|
||||
var cfg = {
|
||||
"radius": 1,
|
||||
"minOpacity": .25,
|
||||
"maxOpacity": .8,
|
||||
"scaleRadius": true,
|
||||
"useLocalExtrema": true,
|
||||
latField: 'lat',
|
||||
lngField: 'lng',
|
||||
valueField: 'count',
|
||||
gradient: {
|
||||
'.1': 'cyan',
|
||||
'.8': 'blue',
|
||||
'.95': 'red'
|
||||
}
|
||||
};
|
||||
var heatmapLayer = new HeatmapOverlay(cfg);
|
||||
|
||||
var map = new L.Map('map', {
|
||||
center: new L.LatLng(25, 0),
|
||||
zoom: 1,
|
||||
layers: [baseLayer, heatmapLayer]
|
||||
});
|
||||
heatmapLayer.setData(testData);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1>Syncthing Usage Data</h1>
|
||||
|
||||
<h4 id="active-users">Active Users per Day and Version</h4>
|
||||
<p>
|
||||
This is the total number of unique users with reporting enabled, per day. Area color represents the major version.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<div id="block-stats">
|
||||
<h4>Data Transfers per Day</h4>
|
||||
<p>
|
||||
This is total data transferred per day. Also shows how much data was saved (not transferred) by each of the methods syncthing uses.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="blockStatsChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<h4 id="totals-to-date">Totals to date</h4>
|
||||
<p id="data-to-date">
|
||||
No data
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<h4 id="metrics">Usage Metrics</h4>
|
||||
<p>
|
||||
This is the aggregated usage report data for the last 24 hours. Data based on <b>{{.nodes}}</b> devices that have reported in.
|
||||
</p>
|
||||
|
||||
{{if .locations}}
|
||||
<div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<p class="text-muted">
|
||||
Heatmap max intensity is capped at 100 reports within a location.
|
||||
</p>
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading">
|
||||
<h4 class="panel-title">
|
||||
<a data-toggle="collapse" href="#collapseTwo">Break down per country</a>
|
||||
</h4>
|
||||
</div>
|
||||
<div id="collapseTwo" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 1}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<tbody>
|
||||
{{range .countries | slice 2 2}}
|
||||
<tr>
|
||||
<td style="width: 45%">{{.Key}}</td>
|
||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
||||
<td>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th colspan="4" class="text-center">
|
||||
<a href="https://en.wikipedia.org/wiki/Percentile">Percentile</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th class="text-right">5%</th>
|
||||
<th class="text-right">50%</th>
|
||||
<th class="text-right">95%</th>
|
||||
<th class="text-right">100%</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .categories}}
|
||||
<tr>
|
||||
<td>{{.Descr}}</td>
|
||||
<td class="text-right">{{index .Values 0 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 1 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 2 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
<td class="text-right">{{index .Values 3 | number .Type | commatize " "}}{{.Unit}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Version</th><th class="text-right">Devices</th><th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versions}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if gt .Percentage 0.1}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Penetration Level</th>
|
||||
<th>Version</th>
|
||||
<th class="text-right">Actual</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .versionPenetrations}}
|
||||
<tr>
|
||||
<td>{{.Count}}%</td>
|
||||
<td>≥ {{.Key}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Platform</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .platforms}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="row">
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Compiler</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .compilers}}
|
||||
<tr class="main">
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{range .Items}}
|
||||
{{if or (gt .Percentage 0.1) (eq .Key "Others")}}
|
||||
<tr class="child">
|
||||
<td class="first">{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Distribution Channel</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .distributions}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Builder</th>
|
||||
<th class="text-right">Devices</th>
|
||||
<th class="text-right">Share</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range .builders}}
|
||||
<tr>
|
||||
<td>{{.Key}}</td>
|
||||
<td class="text-right">{{.Count}}</td>
|
||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage. Some features are reported per report, some are per sum of units within report (eg. devices with static addresses among all known devices per report).
|
||||
Currently there are <b>{{.versionNodes.v2}}</b> devices reporting for version 2 and <b>{{.versionNodes.v3}}</b> for version 3.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.features $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
<tr>
|
||||
<td style="width: 50%">{{.Key}} ({{.Version}})</td>
|
||||
<td style="width: 10%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
||||
<td style="width: 40%" {{if lt .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}>
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px" {{if ge .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}></div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h4 id="features">Feature Group Usage</h4>
|
||||
<p>
|
||||
The following lists feature usage groups, which might include multiple occourances of a feature use per report.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
{{$i := counter}}
|
||||
{{range $featureName := .featureOrder}}
|
||||
{{$featureValues := index $.featureGroups $featureName }}
|
||||
{{if $i.DrawTwoDivider}}
|
||||
</div>
|
||||
<div class="row">
|
||||
{{end}}
|
||||
{{ $i.Increment }}
|
||||
<div class="col-md-6">
|
||||
<table class="table table-striped">
|
||||
<thead><tr>
|
||||
<th>{{$featureName}} Group Features</th><th colspan="2" class="text-center">Usage</th>
|
||||
</tr></thead>
|
||||
<tbody>
|
||||
{{range $featureValues}}
|
||||
{{$counts := .Counts}}
|
||||
<tr>
|
||||
<td style="width: 50%">
|
||||
<div data-toggle="tooltip" title='{{range $key, $value := .Counts}}{{$key}} ({{$value | proportion $counts | printf "%.02f"}}% - {{$value}})</br>{{end}}'>
|
||||
{{.Key}} ({{.Version}})
|
||||
</div>
|
||||
</td>
|
||||
<td style="width: 50%">
|
||||
<div class="progress" role="progressbar" style="width: 100%">
|
||||
{{$j := counter}}
|
||||
{{range $key, $value := .Counts}}
|
||||
{{with $valuePct := $value | proportion $counts}}
|
||||
<div class="progress-bar {{ $j.Current | progressBarClassByIndex }}" style='width: {{$valuePct | printf "%.02f"}}%' data-toggle="tooltip" title='{{$key}} ({{$valuePct | printf "%.02f"}}% - {{$value}})'>
|
||||
{{if ge $valuePct 30.0}}{{$key}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
{{ $j.Increment }}
|
||||
{{end}}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h1 id="performance-charts">Historical Performance Data</h1>
|
||||
<p>These charts are all the average of the corresponding metric, for the entire population of a given day.</p>
|
||||
|
||||
<h4 id="hash-performance">Hash Performance (MiB/s)</h4>
|
||||
<div class="img-thumbnail" id="hashPerfChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="memory-usage">Memory Usage (MiB)</h4>
|
||||
<div class="img-thumbnail" id="memUsageChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-files">Total Number of Files</h4>
|
||||
<div class="img-thumbnail" id="totFilesChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="total-size">Total Folder Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="totMiBChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="system-ram">System RAM Size (GiB)</h4>
|
||||
<div class="img-thumbnail" id="memSizeChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
<a href="https://github.com/syncthing/syncthing/tree/main/cmd/ursrv">Source code</a>.
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
</p>
|
||||
<script type="text/javascript">
|
||||
$('[data-toggle="tooltip"]').tooltip({html:true});
|
||||
drawHeatMap();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
34
compat.yaml
34
compat.yaml
@@ -1,34 +0,0 @@
|
||||
- runtime: go1.21
|
||||
requirements:
|
||||
# See https://en.wikipedia.org/wiki/MacOS_version_history#Releases
|
||||
#
|
||||
# macOS 10.15 (Catalina) per https://go.dev/doc/go1.22#darwin
|
||||
darwin: "19"
|
||||
# Per https://go.dev/doc/go1.23#linux
|
||||
linux: "2.6.32"
|
||||
# Windows 10's initial release was 10.0.10240.16405, per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/release-information
|
||||
# and Windows 11's initial release was 10.0.22000.194 per
|
||||
# https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
|
||||
#
|
||||
# Windows 10/Windows Server 2016 per https://go.dev/doc/go1.21#windows
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.22
|
||||
requirements:
|
||||
darwin: "19"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.23
|
||||
requirements:
|
||||
# macOS 11 (Big Sur) per https://tip.golang.org/doc/go1.23#darwin
|
||||
darwin: "20"
|
||||
linux: "2.6.32"
|
||||
windows: "10.0"
|
||||
|
||||
- runtime: go1.24
|
||||
requirements:
|
||||
darwin: "20"
|
||||
linux: "3.2"
|
||||
windows: "10.0"
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Start Syncthing
|
||||
GenericName=File synchronization
|
||||
Comment=Starts the main syncthing process in the background.
|
||||
Exec=syncthing serve --no-browser --logfile=default
|
||||
Exec=/usr/bin/syncthing serve --no-browser --logfile=default
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Syncthing Web UI
|
||||
GenericName=File synchronization UI
|
||||
Comment=Opens Syncthing's Web UI in the default browser (Syncthing must already be started).
|
||||
Exec=syncthing --browser-only
|
||||
Exec=/usr/bin/syncthing --browser-only
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user