Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3a793ce91 |
20
.codecov.yml
@@ -1,20 +0,0 @@
|
||||
comment: false
|
||||
|
||||
coverage:
|
||||
range: "40...100"
|
||||
precision: 1
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
ignore:
|
||||
- "**.pb.go"
|
||||
- "**_mocked.go"
|
||||
- "**/mocks/*"
|
||||
@@ -1,12 +0,0 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = ["**/*.pb.go"]
|
||||
test_patterns = ["**/*_test.go"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/syncthing/syncthing"]
|
||||
build_tags = ["noassets"]
|
||||
8
.gitattributes
vendored
@@ -1,8 +0,0 @@
|
||||
# Text files use LF line endings in this repository
|
||||
* text=auto
|
||||
|
||||
# Except the dependencies, which we leave alone
|
||||
vendor/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
*.svg -diff
|
||||
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
11
.github/FUNDING.yml
vendored
@@ -1,11 +0,0 @@
|
||||
github: syncthing
|
||||
custom: "https://syncthing.net/donations/"
|
||||
|
||||
# patreon: # Replace with a single Patreon username
|
||||
# open_collective: # Replace with a single Open Collective username
|
||||
# ko_fi: # Replace with a single Ko-fi username
|
||||
# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
# liberapay: # Replace with a single Liberapay username
|
||||
# issuehunt: # Replace with a single IssueHunt username
|
||||
# otechie: # Replace with a single Otechie username
|
||||
13
.github/ISSUE_TEMPLATE/01-feature.md
vendored
@@ -1,13 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: If you're just not sure how to do something, see "ask a question".
|
||||
labels: enhancement, needs-triage
|
||||
---
|
||||
|
||||
### Include required information
|
||||
|
||||
Please be sure to include at least:
|
||||
|
||||
- what problem your new feature would solve
|
||||
- how or why you think it is generally useful (i.e., not just for you)
|
||||
- what alternatives or workarounds you considered
|
||||
23
.github/ISSUE_TEMPLATE/02-bug.md
vendored
@@ -1,23 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: If you're actually looking for support, see "ask a question".
|
||||
labels: bug, needs-triage
|
||||
---
|
||||
|
||||
### Does your log mention database corruption?
|
||||
|
||||
If your Syncthing log reports panics because of database corruption it is
|
||||
most likely a fault with your system's storage or memory. Affected log
|
||||
entries will contain lines starting with `panic: leveldb`. You will need to
|
||||
delete the index database to clear this, by running `syncthing
|
||||
-reset-database`.
|
||||
|
||||
### Include required information
|
||||
|
||||
Please be sure to include at least:
|
||||
|
||||
- which version of Syncthing and what operating system you are using
|
||||
- browser and version, if applicable
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +0,0 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: I need help / I have a question
|
||||
url: https://forum.syncthing.net/
|
||||
about: Ask questions, get support, and discuss with other community members.
|
||||
- name: Android issues
|
||||
url: https://github.com/syncthing/syncthing-android/issues/
|
||||
about: The Android app has its own issue tracker.
|
||||
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,27 +0,0 @@
|
||||
### Purpose
|
||||
|
||||
Describe the purpose of this change. If there is an existing issue that is
|
||||
resolved by this pull request, ensure that the commit subject is on the form
|
||||
`Some short description (fixes #1234)` where 1234 is the issue number.
|
||||
|
||||
### Testing
|
||||
|
||||
Describe what testing has been done, and how the reviewer can test the change
|
||||
if new tests are not included.
|
||||
|
||||
### Screenshots
|
||||
|
||||
If this is a GUI change, include screenshots of the change. If not, please
|
||||
feel free to just delete this section.
|
||||
|
||||
### Documentation
|
||||
|
||||
If this is a user visible change (including API and protocol changes), add a link here
|
||||
to the corresponding pull request on https://github.com/syncthing/docs or describe
|
||||
the documentation changes necessary.
|
||||
|
||||
## Authorship
|
||||
|
||||
Your name and email will be added automatically to the AUTHORS file
|
||||
based on the commit metadata.
|
||||
|
||||
10
.github/SECURITY.md
vendored
@@ -1,10 +0,0 @@
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe that you've found a Syncthing-related security vulnerability,
|
||||
please report it by sending email to the address security@syncthing.net. The
|
||||
[PGP key for security@syncthing.net
|
||||
(B683AD7B76CAB013)](https://syncthing.net/security-key.txt) can be used to
|
||||
send encrypted mail or to verify responses received from that address.
|
||||
|
||||
You can read more about Syncthing security at
|
||||
https://syncthing.net/security/.
|
||||
13
.github/dependabot.yml
vendored
@@ -1,13 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
54
.github/workflows/build-infra-dockers.yaml
vendored
@@ -1,54 +0,0 @@
|
||||
name: Build Infrastructure Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- infrastructure
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.21.1"
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
jobs:
|
||||
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
environment: docker
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
- stcrashreceiver
|
||||
- strelaypoolsrv
|
||||
- stupgrades
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in arm64 amd64; do
|
||||
go run build.go -goos linux -goarch "$arch" build ${{ matrix.pkg }}
|
||||
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
|
||||
done
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.${{ matrix.pkg }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: syncthing/${{ matrix.pkg }}:latest,syncthing/${{ matrix.pkg }}:${{ github.sha }}
|
||||
795
.github/workflows/build-syncthing.yaml
vendored
@@ -1,795 +0,0 @@
|
||||
name: Build Syncthing
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# The go version to use for builds. We set check-latest to true when
|
||||
# installing, so we get the latest patch version that matches the
|
||||
# expression.
|
||||
GO_VERSION: "~1.21.1"
|
||||
|
||||
# Optimize compatibility on the slow archictures.
|
||||
GO386: softfloat
|
||||
GOARM: "5"
|
||||
GOMIPS: softfloat
|
||||
|
||||
# Avoid hilarious amounts of obscuring log output when running tests.
|
||||
LOGGER_DISCARD: "1"
|
||||
|
||||
# Our build metadata
|
||||
BUILD_USER: builder
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
# A note on actions and third party code... The actions under actions/ (like
|
||||
# `uses: actions/checkout`) are maintained by GitHub, and we need to trust
|
||||
# GitHub to maintain their code and infrastructure or we're in deep shit in
|
||||
# general. The same doesn't necessarily apply to other actions authors, so
|
||||
# some care needs to be taken when adding steps, especially in the paths
|
||||
# that lead up to code being packaged and signed.
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Tests for all platforms. Runs a matrix build on Windows, Linux and Mac,
|
||||
# with the list of expected supported Go versions (current, previous).
|
||||
#
|
||||
|
||||
build-test:
|
||||
name: Build and test
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
|
||||
# The oldest version in this list should match what we have in our go.mod.
|
||||
# Variables don't seem to be supported here, or we could have done something nice.
|
||||
go: ["1.20", "1.21"]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
if: matrix.runner == 'windows-latest'
|
||||
# Without this, the Windows checkout will happen with CRLF line
|
||||
# endings, which is fine for the source code but messes up tests
|
||||
# that depend on data on disk being as expected. Ideally, those
|
||||
# tests should be fixed, but not today.
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
cache: true
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
go run build.go
|
||||
|
||||
- name: Install go-test-json-to-loki
|
||||
run: |
|
||||
go install calmh.dev/go-test-json-to-loki@latest
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
go run build.go test | go-test-json-to-loki
|
||||
env:
|
||||
GOFLAGS: "-json"
|
||||
LOKI_URL: ${{ vars.LOKI_URL }}
|
||||
LOKI_USER: ${{ vars.LOKI_USER }}
|
||||
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
|
||||
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
correctness:
|
||||
name: Check correctness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: Check correctness
|
||||
run: |
|
||||
go test -v ./meta
|
||||
|
||||
#
|
||||
# The basic checks job is a virtual one that depends on the matrix tests,
|
||||
# the correctness checks, and various builds that we always do. This makes
|
||||
# it easy to have the PR process have a single test as a gatekeeper for
|
||||
# merging, instead of having to add all the matrix tests and update them
|
||||
# each time the version changes. (The top level test is not available for
|
||||
# choosing there, only the matrix "children".)
|
||||
#
|
||||
|
||||
basics:
|
||||
name: Basic checks passed
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-test
|
||||
- correctness
|
||||
- package-linux
|
||||
- package-cross
|
||||
- package-source
|
||||
- package-debian
|
||||
- govulncheck
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
#
|
||||
# Windows
|
||||
#
|
||||
|
||||
package-windows:
|
||||
name: Package for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
# Without this, the checkout will happen with CRLF line endings,
|
||||
# which is fine for the source code but messes up tests that depend
|
||||
# on data on disk being as expected. Ideally, those tests should be
|
||||
# fixed, but not today.
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
go run build.go -goarch arm zip
|
||||
go run build.go -goarch arm64 zip
|
||||
go run build.go -goarch 386 zip
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
|
||||
CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }}
|
||||
CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-windows
|
||||
path: syncthing-windows-*.zip
|
||||
|
||||
#
|
||||
# Linux
|
||||
#
|
||||
|
||||
package-linux:
|
||||
name: Package for Linux
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
archs=$(go tool dist list | grep linux | sed 's#linux/##')
|
||||
for goarch in $archs ; do
|
||||
go run build.go -goarch "$goarch" tar
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-linux
|
||||
path: syncthing-linux-*.tar.gz
|
||||
|
||||
#
|
||||
# macOS
|
||||
#
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Import signing certificate
|
||||
run: |
|
||||
# Set up a run-specific keychain, making it available for the
|
||||
# `codesign` tool.
|
||||
umask 066
|
||||
KEYCHAIN_PATH=$RUNNER_TEMP/codesign.keychain
|
||||
KEYCHAIN_PASSWORD=$(uuidgen)
|
||||
security create-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
|
||||
security default-keychain -s "$KEYCHAIN_PATH"
|
||||
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
|
||||
security set-keychain-settings -lut 21600 "$KEYCHAIN_PATH"
|
||||
|
||||
# Import the certificate
|
||||
CERTIFICATE_PATH=$RUNNER_TEMP/codesign.p12
|
||||
echo "$DEVELOPER_ID_CERTIFICATE_BASE64" | base64 -d -o "$CERTIFICATE_PATH"
|
||||
security import "$CERTIFICATE_PATH" -k "$KEYCHAIN_PATH" -P "$DEVELOPER_ID_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/productsign
|
||||
security set-key-partition-list -S apple-tool:,apple: -s -k actions "$KEYCHAIN_PATH"
|
||||
|
||||
# Set the codesign identity for following steps
|
||||
echo "CODESIGN_IDENTITY=$CODESIGN_IDENTITY" >> $GITHUB_ENV
|
||||
env:
|
||||
DEVELOPER_ID_CERTIFICATE_BASE64: ${{ secrets.DEVELOPER_ID_CERTIFICATE_BASE64 }}
|
||||
DEVELOPER_ID_CERTIFICATE_PASSWORD: ${{ secrets.DEVELOPER_ID_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_IDENTITY: ${{ secrets.CODESIGN_IDENTITY }}
|
||||
|
||||
- name: Create package (amd64)
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
- name: Create package (arm64 cross)
|
||||
run: |
|
||||
cat <<EOT > xgo.sh
|
||||
#!/bin/bash
|
||||
CGO_ENABLED=1 \
|
||||
CGO_CFLAGS="-target arm64-apple-macos10.15" \
|
||||
CGO_LDFLAGS="-target arm64-apple-macos10.15" \
|
||||
go "\$@"
|
||||
EOT
|
||||
chmod 755 xgo.sh
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
- name: Create package (universal)
|
||||
run: |
|
||||
rm -rf _tmp
|
||||
mkdir _tmp
|
||||
pushd _tmp
|
||||
|
||||
unzip ../syncthing-macos-amd64-*.zip
|
||||
unzip ../syncthing-macos-arm64-*.zip
|
||||
lipo -create syncthing-macos-amd64-*/syncthing syncthing-macos-arm64-*/syncthing -o syncthing
|
||||
|
||||
amd64=(syncthing-macos-amd64-*)
|
||||
universal="${amd64/amd64/universal}"
|
||||
mv "$amd64" "$universal"
|
||||
mv syncthing "$universal"
|
||||
zip -r "../$universal.zip" "$universal"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-macos
|
||||
path: syncthing-*.zip
|
||||
|
||||
notarize-macos:
|
||||
name: Notarize for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
needs:
|
||||
- package-macos
|
||||
- basics
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: packages-macos
|
||||
|
||||
- name: Notarize binaries
|
||||
run: |
|
||||
APPSTORECONNECT_API_KEY_PATH="$RUNNER_TEMP/apikey.p8"
|
||||
echo "$APPSTORECONNECT_API_KEY" | base64 -d -o "$APPSTORECONNECT_API_KEY_PATH"
|
||||
for file in syncthing-macos-*.zip ; do
|
||||
xcrun notarytool submit \
|
||||
-k "$APPSTORECONNECT_API_KEY_PATH" \
|
||||
-d "$APPSTORECONNECT_API_KEY_ID" \
|
||||
-i "$APPSTORECONNECT_API_KEY_ISSUER" \
|
||||
$file
|
||||
done
|
||||
env:
|
||||
APPSTORECONNECT_API_KEY: ${{ secrets.APPSTORECONNECT_API_KEY }}
|
||||
APPSTORECONNECT_API_KEY_ID: ${{ secrets.APPSTORECONNECT_API_KEY_ID }}
|
||||
APPSTORECONNECT_API_KEY_ISSUER: ${{ secrets.APPSTORECONNECT_API_KEY_ISSUER }}
|
||||
|
||||
#
|
||||
# Cross compile other unixes
|
||||
#
|
||||
|
||||
package-cross:
|
||||
name: Package cross compiled
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-cross-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
platforms=$(go tool dist list \
|
||||
| grep -v aix/ppc64 \
|
||||
| grep -v android/ \
|
||||
| grep -v darwin/ \
|
||||
| grep -v ios/ \
|
||||
| grep -v js/ \
|
||||
| grep -v linux/ \
|
||||
| grep -v nacl/ \
|
||||
| grep -v plan9/ \
|
||||
| grep -v windows/ \
|
||||
| grep -v /wasm \
|
||||
)
|
||||
|
||||
# Build for each platform with errors silenced, because we expect
|
||||
# some oddball platforms to fail. This avoids a bunch of errors in
|
||||
# the GitHub Actions output, instead summarizing each build
|
||||
# failure as a warning.
|
||||
for plat in $platforms; do
|
||||
goos="${plat%/*}"
|
||||
goarch="${plat#*/}"
|
||||
echo "::group ::$plat"
|
||||
if ! go run build.go -goos "$goos" -goarch "$goarch" tar 2>/dev/null; then
|
||||
echo "::warning ::Failed to build for $plat"
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-other
|
||||
path: syncthing-*.tar.gz
|
||||
|
||||
#
|
||||
# Source
|
||||
#
|
||||
|
||||
package-source:
|
||||
name: Package source code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: Package source
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "$version" > RELEASE
|
||||
|
||||
go mod vendor
|
||||
go run build.go assets
|
||||
|
||||
cd ..
|
||||
|
||||
tar c -z -f "syncthing-source-$version.tar.gz" \
|
||||
--exclude .git \
|
||||
syncthing
|
||||
|
||||
mv "syncthing-source-$version.tar.gz" syncthing
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-source
|
||||
path: syncthing-source-*.tar.gz
|
||||
|
||||
#
|
||||
# Sign binaries for auto upgrade, generate ASC signature files
|
||||
#
|
||||
|
||||
sign-for-upgrade:
|
||||
name: Sign for upgrade
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: signing
|
||||
needs:
|
||||
- basics
|
||||
- package-windows
|
||||
- package-linux
|
||||
- package-macos
|
||||
- package-cross
|
||||
- package-source
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: syncthing/release-tools
|
||||
path: tools
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
|
||||
- name: Install signing tool
|
||||
run: |
|
||||
go install ./cmd/stsigtool
|
||||
|
||||
- name: Sign archives
|
||||
run: |
|
||||
export PRIVATE_KEY="$RUNNER_TEMP/privkey.pem"
|
||||
export PATH="$PATH:$(go env GOPATH)/bin"
|
||||
echo "$STSIGTOOL_PRIVATE_KEY" | base64 -d > "$PRIVATE_KEY"
|
||||
mkdir packages
|
||||
mv packages-*/* packages
|
||||
pushd packages
|
||||
"$GITHUB_WORKSPACE/tools/sign-only"
|
||||
rm -f "$PRIVATE_KEY"
|
||||
env:
|
||||
STSIGTOOL_PRIVATE_KEY: ${{ secrets.STSIGTOOL_PRIVATE_KEY }}
|
||||
|
||||
- name: Create and sign .asc files
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt -y install gnupg
|
||||
|
||||
export SIGNING_KEY="$RUNNER_TEMP/gpg-secret.asc"
|
||||
echo "$GNUPG_SIGNING_KEY_BASE64" | base64 -d > "$SIGNING_KEY"
|
||||
gpg --import < "$SIGNING_KEY"
|
||||
|
||||
pushd packages
|
||||
files=(*.tar.gz *.zip)
|
||||
sha1sum "${files[@]}" | gpg --clearsign > sha1sum.txt.asc
|
||||
sha256sum "${files[@]}" | gpg --clearsign > sha256sum.txt.asc
|
||||
gpg --sign --armour --detach syncthing-source-*.tar.gz
|
||||
popd
|
||||
rm -f "$SIGNING_KEY" .gnupg
|
||||
env:
|
||||
GNUPG_SIGNING_KEY_BASE64: ${{ secrets.GNUPG_SIGNING_KEY_BASE64 }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages-signed
|
||||
path: packages/*
|
||||
|
||||
#
|
||||
# Debian
|
||||
#
|
||||
|
||||
package-debian:
|
||||
name: Package for Debian
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '3.0'
|
||||
|
||||
- name: Install fpm
|
||||
run: |
|
||||
gem install fpm
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-debian-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Package for Debian
|
||||
run: |
|
||||
for arch in amd64 i386 armhf armel arm64 ; do
|
||||
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb
|
||||
done
|
||||
env:
|
||||
BUILD_USER: debian
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debian-packages
|
||||
path: "*.deb"
|
||||
|
||||
#
|
||||
# Nightlies
|
||||
#
|
||||
|
||||
publish-nightly:
|
||||
name: Publish nightly build
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
|
||||
environment: signing
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- notarize-macos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: syncthing/release-tools
|
||||
path: tools
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: packages-signed
|
||||
path: packages
|
||||
|
||||
- name: Create release json
|
||||
run: |
|
||||
cd packages
|
||||
"$GITHUB_WORKSPACE/tools/generate-release-json" "$BASE_URL" > nightly.json
|
||||
env:
|
||||
BASE_URL: https://syncthing.ams3.digitaloceanspaces.com/nightly/
|
||||
|
||||
- name: Push artifacts
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync packages spaces:syncthing/nightly
|
||||
|
||||
#
|
||||
# Push release artifacts to Spaces
|
||||
#
|
||||
|
||||
publish-release-files:
|
||||
name: Publish release files
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- package-debian
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download signed packages
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: packages-signed
|
||||
path: packages
|
||||
|
||||
- name: Download debian packages
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: debian-packages
|
||||
path: packages
|
||||
|
||||
- name: Set version
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "VERSION=$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Push to Spaces (${{ env.VERSION }})
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync packages spaces:syncthing/release/${{ env.VERSION }}
|
||||
|
||||
- name: Push to Spaces (latest)
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_SPACES_TYPE: s3
|
||||
RCLONE_CONFIG_SPACES_PROVIDER: DigitalOcean
|
||||
RCLONE_CONFIG_SPACES_ACCESS_KEY_ID: ${{ secrets.SPACES_KEY }}
|
||||
RCLONE_CONFIG_SPACES_SECRET_ACCESS_KEY: ${{ secrets.SPACES_SECRET }}
|
||||
RCLONE_CONFIG_SPACES_ENDPOINT: ams3.digitaloceanspaces.com
|
||||
RCLONE_CONFIG_SPACES_ACL: public-read
|
||||
with:
|
||||
args: sync spaces:syncthing/release/${{ env.VERSION }} spaces:syncthing/release/latest
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
#
|
||||
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-'))
|
||||
environment: docker
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
- syncthing
|
||||
- strelaysrv
|
||||
- stdiscosrv
|
||||
include:
|
||||
- pkg: syncthing
|
||||
dockerfile: Dockerfile
|
||||
image: syncthing/syncthing
|
||||
- pkg: strelaysrv
|
||||
dockerfile: Dockerfile.strelaysrv
|
||||
image: syncthing/relaysrv
|
||||
- pkg: stdiscosrv
|
||||
dockerfile: Dockerfile.stdiscosrv
|
||||
image: syncthing/discosrv
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-docker-${{ matrix.pkg }}-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in amd64 arm64 arm; do
|
||||
go run build.go -goos linux -goarch "$arch" -no-upgrade build ${{ matrix.pkg }}
|
||||
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
|
||||
- name: Check if we will be able to push images
|
||||
run: |
|
||||
if [[ "${{ secrets.DOCKERHUB_TOKEN }}" != "" ]]; then
|
||||
echo "DOCKER_PUSH=true" >> $GITHUB_ENV;
|
||||
fi
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
if: env.DOCKER_PUSH == 'true'
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set version tags
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
version=${version#v}
|
||||
if [[ $version == @([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]) ]] ; then
|
||||
echo Release version, pushing to :latest and version tags
|
||||
major=${version%.*.*}
|
||||
minor=${version%.*}
|
||||
tags=${{ matrix.image }}:$version,${{ matrix.image }}:$major,${{ matrix.image }}:$minor,${{ matrix.image }}:latest
|
||||
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
|
||||
echo Release candidate, pushing to :rc
|
||||
tags=${{ matrix.image }}:rc
|
||||
else
|
||||
echo Development version, pushing to :edge
|
||||
tags=${{ matrix.image }}:edge
|
||||
fi
|
||||
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/7
|
||||
push: ${{ env.DOCKER_PUSH == 'true' }}
|
||||
tags: ${{ env.DOCKER_TAGS }}
|
||||
|
||||
#
|
||||
# Check for known vulnerabilities in Go dependencies
|
||||
#
|
||||
|
||||
govulncheck:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run govulncheck
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: run govulncheck
|
||||
run: |
|
||||
go run build.go assets
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
||||
21
.github/workflows/trigger-nightly.yaml
vendored
@@ -1,21 +0,0 @@
|
||||
name: Trigger nightly build & release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Run nightly build at 01:00 UTC
|
||||
- cron: '00 01 * * *'
|
||||
|
||||
jobs:
|
||||
|
||||
trigger-nightly:
|
||||
runs-on: ubuntu-latest
|
||||
name: Push to release-nightly to trigger build
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- run: |
|
||||
git push origin main:release-nightly
|
||||
28
.github/workflows/update-docs-translations.yaml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Update translations and documentation
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '42 3 * * 1'
|
||||
|
||||
jobs:
|
||||
|
||||
update_transifex_docs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Update translations and documentation
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ^1.19.6
|
||||
- run: |
|
||||
set -euo pipefail
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
bash build.sh translate
|
||||
bash build.sh prerelease
|
||||
git push
|
||||
env:
|
||||
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
|
||||
21
.gitignore
vendored
@@ -1,20 +1,3 @@
|
||||
/syncthing
|
||||
/stdiscosrv
|
||||
syncthing
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.deb
|
||||
*.exe
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
syncthing.sig
|
||||
RELEASE
|
||||
deb
|
||||
*.bz2
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
dist
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- goimports
|
||||
- depguard
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- funlen
|
||||
- wsl
|
||||
- gocognit
|
||||
- godox
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
4
.yamlfmt
@@ -1,4 +0,0 @@
|
||||
line_ending: lf
|
||||
formatter:
|
||||
type: basic
|
||||
retain_line_breaks: true
|
||||
329
AUTHORS
@@ -1,329 +0,0 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
#
|
||||
# THIS FILE IS MOSTLY AUTO GENERATED. IF YOU'VE MADE A COMMIT TO THE
|
||||
# REPOSITORY YOU WILL BE ADDED HERE AUTOMATICALLY WITHOUT THE NEED FOR
|
||||
# ANY MANUAL ACTION.
|
||||
#
|
||||
# That said, you are welcome to correct your name or add a nickname / GitHub
|
||||
# user name as appropriate. The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The in-GUI authors list is periodically automatically updated from the
|
||||
# contents of this file.
|
||||
#
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexander Seiler <seileralex@gmail.com>
|
||||
Alexandre Alves <alexandrealvesdb.contact@gmail.com>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Aman Gupta <aman@tmm1.net>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andreas Sommer <andreas.sommer87@googlemail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrew Meyer <andrewm.bpi@gmail.com>
|
||||
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
andyleap <andyleap@gmail.com>
|
||||
Anjan Momi <anjan@momi.ca>
|
||||
Anthony Goeckner <agoeckner@users.noreply.github.com>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Anur <anurnomeru@163.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benjamin Nater <17193640+bn4t@users.noreply.github.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
boomsquared <54829195+boomsquared@users.noreply.github.com>
|
||||
Boqin Qin <bobbqqin@bupt.edu.cn>
|
||||
Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
bt90 <btom1990@googlemail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
chenrui <rui@meetup.com>
|
||||
Chih-Hsuan Yen <yan12125@gmail.com> <1937689+yan12125@users.noreply.github.com>
|
||||
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Chris Tonkinson <chris@masterbran.ch>
|
||||
Christian Kujau <ckujau@users.noreply.github.com>
|
||||
Christian Prescott <me@christianprescott.com>
|
||||
chucic <chucic@seznam.cz>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
|
||||
cui fliter <imcusg@gmail.com>
|
||||
Cyprien Devillez <cypx@users.noreply.github.com>
|
||||
d-volution <49024624+d-volution@users.noreply.github.com>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Dan <benda.daniel@gmail.com>
|
||||
Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com> <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
desbma <desbma@users.noreply.github.com>
|
||||
Devon G. Redekopp <devon@redekopp.com>
|
||||
digital <didev@dinid.net>
|
||||
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Emil Lundberg <emil@emlun.se>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain>
|
||||
Eric Lesiuta <elesiuta@gmail.com>
|
||||
Eric P <eric@kastelo.net>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Evan Spensley <94762716+0evan@users.noreply.github.com>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix <53702818+f-eliks@users.noreply.github.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Lampe <mail@flampe.de>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gahl Saraf <saraf.gahl@gmail.com> <gahl@raftt.io>
|
||||
georgespatton <georgespatton@users.noreply.github.com>
|
||||
ghjklw <malo@jaffre.info>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Gleb Sinyavskiy <zhulik.gleb@gmail.com>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
guangwu <guoguangwu@magic-shield.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
ignacy123 <ignacy.buczek@onet.pl>
|
||||
Ikko Ashimine <eltociear@gmail.com>
|
||||
Ilya Brin <464157+ilyabrin@users.noreply.github.com>
|
||||
Iskander Sharipov (Alex) <quasilyte@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jack Croft <jccroft1@users.noreply.github.com>
|
||||
Jacob <jyundt@gmail.com>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James O'Beirne <wild-github@au92.org>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jauder Ho <jauderho@users.noreply.github.com>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jaya Kumar <jaya.kumar@ict.nl>
|
||||
Jeffery To <jeffery.to@gmail.com>
|
||||
jelle van der Waa <jelle@vdwaa.nl>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
Jesse Lucas <jesse@jesselucas.com>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan <artback@protonmail.com> <jonagn@gmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jonta <359397+Jonta@users.noreply.github.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Harrison <keithh@protonmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin Bushiri (keevBush) <keevbush@gmail.com> <36192217+keevBush@users.noreply.github.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Lars Lehtonen <lars.lehtonen@gmail.com>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Martchus <martchus@gmx.net>
|
||||
Martin Polehla <p0l0us@users.noreply.github.com>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Maximilian <maxi.rostock@outlook.de>
|
||||
mclang <1721600+mclang@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Rienstra <mrienstra@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
MichaIng <micha@dietpi.com>
|
||||
Migelo <miha@filetki.si>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
MikeLund <MikeLund@users.noreply.github.com>
|
||||
MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
|
||||
Mingxuan Lin <gdlmx@users.noreply.github.com>
|
||||
mv1005 <49659413+mv1005@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Naveen <172697+naveensrinivasan@users.noreply.github.com>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nick Busey <NickBusey@users.noreply.github.com>
|
||||
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
|
||||
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Nicolas Perraut <n.perraut@gmail.com>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
|
||||
NinoM4ster <ninom4ster@gmail.com>
|
||||
Nitroretro <43112364+Nitroretro@users.noreply.github.com>
|
||||
NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oliver Freyermuth <o.freyermuth@googlemail.com>
|
||||
orangekame3 <miya.org.0309@gmail.com>
|
||||
otbutz <tbutz@optitool.de>
|
||||
Otiel <Otiel@users.noreply.github.com>
|
||||
overkill <22098433+0verk1ll@users.noreply.github.com>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
Peter Badida <KeyWeeUsr@users.noreply.github.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Peter Marquardt (wwwutz) <wwwutz@gmail.com> <wwwutz@googlemail.com>
|
||||
Phani Rithvij <phanirithvij2000@gmail.com>
|
||||
Phil Davis <phil.davis@inf.org>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roberto Santalla <roobre@users.noreply.github.com>
|
||||
Robin Schoonover <robin@cornhooves.org>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
|
||||
Ryan Qian <i@bitbili.net>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
sec65 <106604020+sec65@users.noreply.github.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vik <63919734+ViktorOn@users.noreply.github.com>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
villekalliomaki <53118179+villekalliomaki@users.noreply.github.com>
|
||||
Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
Will Rouesnel <wrouesnel@wrouesnel.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
wouter bolsterlee <wouter@bolsterl.ee>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
xarx00 <xarx00@users.noreply.github.com>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
佛跳墙 <daoquan@qq.com>
|
||||
落心 <luoxin.ttt@gmail.com>
|
||||
73
CONDUCT.md
@@ -1,73 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at security@syncthing.net. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
@@ -1,68 +1,22 @@
|
||||
## Reporting Bugs
|
||||
Please do contribute!
|
||||
|
||||
Please file bugs in the [GitHub Issue
|
||||
Tracker](https://github.com/syncthing/syncthing/issues). Include at
|
||||
least the following:
|
||||
## Building
|
||||
|
||||
- What happened
|
||||
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
|
||||
|
||||
- What did you expect to happen instead of what *did* happen, if it's
|
||||
not crazy obvious
|
||||
## Tests
|
||||
|
||||
- What operating system, operating system version and version of
|
||||
Syncthing you are running
|
||||
Yes please!
|
||||
|
||||
- The same for other connected devices, where relevant
|
||||
## Style
|
||||
|
||||
- Screenshot if the issue concerns something visible in the GUI
|
||||
`go fmt`
|
||||
|
||||
- Console log entries, where possible and relevant
|
||||
## Documentation
|
||||
|
||||
If you're not sure whether something is relevant, erring on the side of
|
||||
too much information will never get you yelled at. :)
|
||||
[Hack it here](https://github.com/calmh/syncthing/wiki)
|
||||
|
||||
## Contributing Translations
|
||||
## License
|
||||
|
||||
All translations are done via
|
||||
[Weblate](https://hosted.weblate.org/projects/syncthing/). If you wish
|
||||
to contribute to a translation, just head over there and sign up.
|
||||
Before every release, the language resources are updated from the
|
||||
latest info on Weblate.
|
||||
|
||||
Note that the previously used service at
|
||||
[Transifex](https://www.transifex.com/projects/p/syncthing/) is being
|
||||
retired and we kindly ask you to sign up on Weblate for continued
|
||||
involvement.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! See the [Contribution
|
||||
Guidelines](https://docs.syncthing.net/dev/contributing.html) for the full
|
||||
story on committing code.
|
||||
|
||||
## Contributing Documentation
|
||||
|
||||
Updates to the [documentation site](https://docs.syncthing.net/) can be
|
||||
made as pull requests on the [documentation
|
||||
repository](https://github.com/syncthing/docs).
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made available under the same license as the already
|
||||
existing material being contributed to. For most of the project and unless
|
||||
otherwise stated this means MPLv2, but there are exceptions:
|
||||
|
||||
- Certain commands (under cmd/...) may have a separate license, indicated by
|
||||
the presence of a LICENSE file in the corresponding directory.
|
||||
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
MIT
|
||||
|
||||
|
||||
49
Dockerfile
@@ -1,49 +0,0 @@
|
||||
ARG GOVERSION=latest
|
||||
|
||||
#
|
||||
# Maybe build Syncthing. This is a bit ugly as we can't make an entire
|
||||
# section of the Dockerfile conditional, so we end up always pulling the
|
||||
# golang image as builder. Then we check if the executable we need already
|
||||
# exists (pre-built) otherwise we build it.
|
||||
#
|
||||
|
||||
FROM golang:$GOVERSION AS builder
|
||||
ARG BUILD_USER
|
||||
ARG BUILD_HOST
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
RUN if [ ! -f syncthing-linux-$TARGETARCH ] ; then \
|
||||
go run build.go -no-upgrade build syncthing ; \
|
||||
mv syncthing syncthing-linux-$TARGETARCH ; \
|
||||
fi
|
||||
|
||||
#
|
||||
# The rest of the Dockerfile uses the binary from the builder, prebuilt or
|
||||
# not.
|
||||
#
|
||||
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
|
||||
|
||||
COPY --from=builder /src/syncthing-linux-$TARGETARCH /bin/syncthing
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENV STHOMEDIR=/var/syncthing/config
|
||||
RUN chmod 755 /bin/entrypoint.sh
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing"]
|
||||
@@ -1,9 +0,0 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION
|
||||
|
||||
# FPM to build Debian packages
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
locales rubygems ruby-dev build-essential git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& gem install fpm
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
COPY stcrashreceiver-linux-${TARGETARCH} /bin/stcrashreceiver
|
||||
|
||||
ENTRYPOINT [ "/bin/stcrashreceiver" ]
|
||||
@@ -1,34 +0,0 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
ARG BUILD_USER
|
||||
ARG BUILD_HOST
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
RUN if [ ! -f stdiscosrv-linux-$TARGETARCH ] ; then \
|
||||
go run build.go -no-upgrade build stdiscosrv ; \
|
||||
mv stdiscosrv stdiscosrv-linux-$TARGETARCH ; \
|
||||
fi
|
||||
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 19200 8443
|
||||
|
||||
VOLUME ["/var/stdiscosrv"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /src/stdiscosrv-linux-$TARGETARCH /bin/stdiscosrv
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/stdiscosrv
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 8443 || exit 1
|
||||
|
||||
WORKDIR /var/stdiscosrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/stdiscosrv"]
|
||||
@@ -1,16 +0,0 @@
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec curl
|
||||
ENV PUID=1000 PGID=1000 MAXMIND_KEY=
|
||||
|
||||
RUN mkdir /var/strelaypoolsrv && chown 1000 /var/strelaypoolsrv
|
||||
USER 1000
|
||||
|
||||
COPY strelaypoolsrv-linux-${TARGETARCH} /bin/strelaypoolsrv
|
||||
COPY script/strelaypoolsrv-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
WORKDIR /var/strelaypoolsrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaypoolsrv", "-listen", ":8080"]
|
||||
@@ -1,34 +0,0 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
ARG BUILD_USER
|
||||
ARG BUILD_HOST
|
||||
ARG TARGETARCH
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
RUN if [ ! -f strelaysrv-linux-$TARGETARCH ] ; then \
|
||||
go run build.go -no-upgrade build strelaysrv ; \
|
||||
mv strelaysrv strelaysrv-linux-$TARGETARCH ; \
|
||||
fi
|
||||
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 22067 22070
|
||||
|
||||
VOLUME ["/var/strelaysrv"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /src/strelaysrv-linux-$TARGETARCH /bin/strelaysrv
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/strelaysrv
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 22067 || exit 1
|
||||
|
||||
WORKDIR /var/strelaysrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaysrv"]
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
COPY stupgrades-linux-${TARGETARCH} /bin/stupgrades
|
||||
|
||||
ENTRYPOINT [ "/bin/stupgrades" ]
|
||||
83
GOALS.md
@@ -1,83 +0,0 @@
|
||||
# The Syncthing Goals
|
||||
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important one being
|
||||
the first.
|
||||
|
||||
> "Syncing files" here is precise. It means we specifically exclude things
|
||||
> that are not files - calendar items, instant messages, and so on. If those
|
||||
> are in fact stored as files on disk, they can of course be synced as
|
||||
> files.
|
||||
|
||||
Syncthing should be:
|
||||
|
||||
### 1. Safe From Data Loss
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable precaution
|
||||
to avoid corrupting the user's files.
|
||||
|
||||
> This is the overriding goal, without which synchronizing files becomes
|
||||
> pointless. This means that we do not make unsafe trade offs for the sake
|
||||
> of performance or, in some cases, even usability.
|
||||
|
||||
### 2. Secure Against Attackers
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals, we must never allow the user's data to be susceptible to eavesdropping
|
||||
or modification by unauthorized parties.
|
||||
|
||||
> This should be understood in context. It is not necessarily reasonable to
|
||||
> expect Syncthing to be resistant against well equipped state level
|
||||
> attackers. We will, however, do our best. Note also that this is different
|
||||
> from anonymity which is not, currently, a goal.
|
||||
|
||||
### 3. Easy to Use
|
||||
|
||||
Syncthing should be approachable, understandable, and inclusive.
|
||||
|
||||
> Complex concepts and maths form the base of Syncthing's functionality.
|
||||
> This should nonetheless be abstracted or hidden to a degree where
|
||||
> Syncthing is usable by the general public.
|
||||
|
||||
### 4. Automatic
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
> Specifically this means that changes to files are picked up without
|
||||
> prompting, conflicts are resolved without prompting and connections are
|
||||
> maintained without prompting. We only prompt the user when it is required
|
||||
> to fulfill one of the (overriding) Secure, Safe or Easy goals.
|
||||
|
||||
### 5. Universally Available
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to every individual.
|
||||
|
||||
> Computers include desktops, laptops, servers, virtual machines, small
|
||||
> general purpose computers such as Raspberry Pis and, *where possible*,
|
||||
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats,
|
||||
> and so on may include computing capabilities but it is not our goal for
|
||||
> Syncthing to run smoothly on these devices.
|
||||
|
||||
### 6. For Individuals
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure, and easy to use file synchronization.
|
||||
|
||||
> We acknowledge that it's also useful in an enterprise setting and include
|
||||
> functionality to support that. If this is in conflict with the
|
||||
> requirements of the individual, those will however take priority.
|
||||
|
||||
### 7. Everything Else
|
||||
|
||||
There are many things we care about that don't make it on to the list. It is
|
||||
fine to optimize for these values as well, as long as they are not in
|
||||
conflict with the stated goals above.
|
||||
|
||||
> For example, performance is a thing we care about. We just don't care more
|
||||
> about it than safety, security, etc. Maintainability of the code base and
|
||||
> providing entertainment value for the maintainers are also things that
|
||||
> matter. It is understood that there are aspects of Syncthing that are
|
||||
> suboptimal or even in opposition with the goals above. However, we
|
||||
> continuously strive to align Syncthing more and more with these goals.
|
||||
392
LICENSE
@@ -1,373 +1,19 @@
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
109
README-Docker.md
@@ -1,109 +0,0 @@
|
||||
# Docker Container for Syncthing
|
||||
|
||||
Use the Dockerfile in this repo, or pull the `syncthing/syncthing` image
|
||||
from Docker Hub.
|
||||
|
||||
Use the `/var/syncthing` volume to have the synchronized files available on the
|
||||
host. You can add more folders and map them as you prefer.
|
||||
|
||||
Note that Syncthing runs as UID 1000 and GID 1000 by default. These may be
|
||||
altered with the `PUID` and `PGID` environment variables. In addition
|
||||
the name of the Syncthing instance can be optionally defined by using
|
||||
`--hostname=syncthing` parameter.
|
||||
|
||||
To grant Syncthing additional capabilities without running as root, use the
|
||||
`PCAP` environment variable with the same syntax as that for `setcap(8)`.
|
||||
For example, `PCAP=cap_chown,cap_fowner+ep`.
|
||||
|
||||
## Example Usage
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -p 8384:8384 -p 22000:22000/tcp -p 22000:22000/udp -p 21027:21027/udp \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
--hostname=my-syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```yml
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
ports:
|
||||
- 8384:8384 # Web UI
|
||||
- 22000:22000/tcp # TCP file transfers
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Note that Docker's default network mode prevents local IP addresses from
|
||||
being discovered, as Syncthing is only able to see the internal IP of the
|
||||
container on the `172.17.0.0/16` subnet. This will result in poor transfer rates
|
||||
if local device addresses are not manually configured.
|
||||
|
||||
It is therefore advisable to use the [host network mode](https://docs.docker.com/network/host/) instead:
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run --network=host \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```yml
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
listens on. You can edit the syncthing configuration to change the defaults if
|
||||
there are conflicts.
|
||||
|
||||
## GUI Security
|
||||
|
||||
By default Syncthing inside the Docker image listens on 0.0.0.0:8384 to
|
||||
allow GUI connections via the Docker proxy. This is set by the
|
||||
`STGUIADDRESS` environment variable in the Dockerfile, as it differs from
|
||||
what Syncthing would otherwise use by default. This means you should set up
|
||||
authentication in the GUI, like for any other externally reachable Syncthing
|
||||
instance. If you do not require the GUI, or you use host networking, you can
|
||||
unset the `STGUIADDRESS` variable to have Syncthing fall back to listening
|
||||
on 127.0.0.1:
|
||||
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -e STGUIADDRESS= \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
With the environment variable unset Syncthing will follow what is set in the
|
||||
configuration file / GUI settings dialog.
|
||||
137
README.md
@@ -1,119 +1,38 @@
|
||||
[![Syncthing][14]][15]
|
||||
syncthing
|
||||
=========
|
||||
|
||||
---
|
||||
This is `syncthing`, an open BitTorrent Sync alternative. It is
|
||||
currently far from ready for mass consumption, but it is a usable proof
|
||||
of concept and tech demo. The following are the project goals:
|
||||
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/88)
|
||||
[](https://goreportcard.com/report/github.com/syncthing/syncthing)
|
||||
1. Define an open, secure, language neutral protocol usable for
|
||||
efficient synchronization of a file repository between an arbitrary
|
||||
number of nodes. This is the [Block Exchange
|
||||
Protocol](https://github.com/calmh/syncthing/blob/master/protocol/PROTOCOL.md)
|
||||
(BEP).
|
||||
|
||||
## Goals
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility.
|
||||
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important ones first.
|
||||
This is the summary version of the goal list - for more
|
||||
commentary, see the full [Goals document][13].
|
||||
The two are evolving together; the protocol is not to be considered
|
||||
stable until syncthing 1.0 is released, at which point it is locked down
|
||||
for incompatible changes.
|
||||
|
||||
Syncthing should be:
|
||||
Syncthing does not use the BitTorrent protocol. The reasons for this are
|
||||
1) we don't know if BitTorrent Sync does either, so there's nothing to
|
||||
be compatible with, 2) BitTorrent includes a lot of functionality for
|
||||
making sure large swarms of selfish agents behave and somehow work
|
||||
towards a common goal. Here we have a much smaller swarm of cooperative
|
||||
agents and a simpler approach will suffice.
|
||||
|
||||
1. **Safe From Data Loss**
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable
|
||||
precaution to avoid corrupting the user's files.
|
||||
The syncthing documentation is kept on the
|
||||
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
|
||||
|
||||
2. **Secure Against Attackers**
|
||||
License
|
||||
=======
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals, we must never allow the user's data to be susceptible to
|
||||
eavesdropping or modification by unauthorized parties.
|
||||
MIT
|
||||
|
||||
3. **Easy to Use**
|
||||
|
||||
Syncthing should be approachable, understandable, and inclusive.
|
||||
|
||||
4. **Automatic**
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
5. **Universally Available**
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to every individual.
|
||||
|
||||
6. **For Individuals**
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure, and easy to use file synchronization.
|
||||
|
||||
7. **Everything Else**
|
||||
|
||||
There are many things we care about that don't make it on to the list. It
|
||||
is fine to optimize for these values, as long as they are not in conflict
|
||||
with the stated goals above.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Take a look at the [getting started guide][2].
|
||||
|
||||
There are a few examples for keeping Syncthing running in the background
|
||||
on your system in [the etc directory][3]. There are also several [GUI
|
||||
implementations][11] for Windows, Mac, and Linux.
|
||||
|
||||
## Docker
|
||||
|
||||
To run Syncthing in Docker, see [the Docker README][16].
|
||||
|
||||
## Vote on features/bugs
|
||||
|
||||
We'd like to encourage you to [vote][12] on issues that matter to you.
|
||||
This helps the team understand what are the biggest pain points for our
|
||||
users, and could potentially influence what is being worked on next.
|
||||
|
||||
## Getting in Touch
|
||||
|
||||
The first and best point of contact is the [Forum][8].
|
||||
If you've found something that is clearly a
|
||||
bug, feel free to report it in the [GitHub issue tracker][10].
|
||||
|
||||
If you believe that you’ve found a Syncthing-related security vulnerability,
|
||||
please report it by emailing security@syncthing.net. Do not report it in the
|
||||
Forum or issue tracker.
|
||||
|
||||
## Building
|
||||
|
||||
Building Syncthing from source is easy. After extracting the source bundle from
|
||||
a release or checking out git, you just need to run `go run build.go` and the
|
||||
binaries are created in `./bin`. There's [a guide][5] with more details on the
|
||||
build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
As of v0.10.15 and onwards, release binaries are GPG signed with the key
|
||||
D26E6ED000654A3E, available from https://syncthing.net/security.html and
|
||||
most key servers.
|
||||
|
||||
There is also a built-in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
Please see the Syncthing [documentation site][6] [[source]][17].
|
||||
|
||||
All code is licensed under the [MPLv2 License][7].
|
||||
|
||||
[1]: https://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: https://docs.syncthing.net/intro/getting-started.html
|
||||
[3]: https://github.com/syncthing/syncthing/blob/main/etc
|
||||
[5]: https://docs.syncthing.net/dev/building.html
|
||||
[6]: https://docs.syncthing.net/
|
||||
[7]: https://github.com/syncthing/syncthing/blob/main/LICENSE
|
||||
[8]: https://forum.syncthing.net/
|
||||
[10]: https://github.com/syncthing/syncthing/issues
|
||||
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
[13]: https://github.com/syncthing/syncthing/blob/main/GOALS.md
|
||||
[14]: assets/logo-text-128.png
|
||||
[15]: https://syncthing.net/
|
||||
[16]: https://github.com/syncthing/syncthing/blob/main/README-Docker.md
|
||||
[17]: https://github.com/syncthing/docs
|
||||
|
||||
|
Before Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 3.6 KiB |
|
Before Width: | Height: | Size: 1.7 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 3.7 KiB |
|
Before Width: | Height: | Size: 8.1 KiB |
1263
assets/logo.ai
BIN
assets/logo.ico
|
Before Width: | Height: | Size: 160 KiB |
BIN
assets/logo.pdf
BIN
assets/st-logo.pxm
Normal file
|
Before Width: | Height: | Size: 1.4 KiB |
|
Before Width: | Height: | Size: 1.9 KiB |
|
Before Width: | Height: | Size: 1.9 KiB |
|
Before Width: | Height: | Size: 2.1 KiB |
6
auto/gui.files.go
Normal file
74
blocks.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
)
|
||||
|
||||
type Block struct {
|
||||
Offset int64
|
||||
Size uint32
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
// Blocks returns the blockwise hash of the reader.
|
||||
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
|
||||
var blocks []Block
|
||||
var offset int64
|
||||
for {
|
||||
lr := &io.LimitedReader{r, int64(blocksize)}
|
||||
hf := sha256.New()
|
||||
n, err := io.Copy(hf, lr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
b := Block{
|
||||
Offset: offset,
|
||||
Size: uint32(n),
|
||||
Hash: hf.Sum(nil),
|
||||
}
|
||||
blocks = append(blocks, b)
|
||||
offset += int64(n)
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
// Empty file
|
||||
blocks = append(blocks, Block{
|
||||
Offset: 0,
|
||||
Size: 0,
|
||||
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// BlockDiff returns lists of common and missing (to transform src into tgt)
|
||||
// blocks. Both block lists must have been created with the same block size.
|
||||
func BlockDiff(src, tgt []Block) (have, need []Block) {
|
||||
if len(tgt) == 0 && len(src) != 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(tgt) != 0 && len(src) == 0 {
|
||||
// Copy the entire file
|
||||
return nil, tgt
|
||||
}
|
||||
|
||||
for i := range tgt {
|
||||
if i >= len(src) || bytes.Compare(tgt[i].Hash, src[i].Hash) != 0 {
|
||||
// Copy differing block
|
||||
need = append(need, tgt[i])
|
||||
} else {
|
||||
have = append(have, tgt[i])
|
||||
}
|
||||
}
|
||||
|
||||
return have, need
|
||||
}
|
||||
116
blocks_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var blocksTestData = []struct {
|
||||
data []byte
|
||||
blocksize int
|
||||
hash []string
|
||||
}{
|
||||
{[]byte(""), 1024, []string{
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
|
||||
{[]byte("contents"), 1024, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 9, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 8, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 7, []string{
|
||||
"ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
"043a718774c572bd8a25adbeb1bfcd5c0256ae11cecf9f9c3f925d0e52beaf89"},
|
||||
},
|
||||
{[]byte("contents"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
|
||||
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
|
||||
},
|
||||
{[]byte("conconts"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
|
||||
},
|
||||
{[]byte("contenten"), 3, []string{
|
||||
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
|
||||
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3"},
|
||||
},
|
||||
}
|
||||
|
||||
func TestBlocks(t *testing.T) {
|
||||
for _, test := range blocksTestData {
|
||||
buf := bytes.NewBuffer(test.data)
|
||||
blocks, err := Blocks(buf, test.blocksize)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if l := len(blocks); l != len(test.hash) {
|
||||
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
|
||||
} else {
|
||||
i := 0
|
||||
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
|
||||
if blocks[i].Offset != off {
|
||||
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
|
||||
}
|
||||
|
||||
bs := test.blocksize
|
||||
if rem := len(test.data) - int(off); bs > rem {
|
||||
bs = rem
|
||||
}
|
||||
if int(blocks[i].Size) != bs {
|
||||
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
|
||||
}
|
||||
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
|
||||
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var diffTestData = []struct {
|
||||
a string
|
||||
b string
|
||||
s int
|
||||
d []Block
|
||||
}{
|
||||
{"contents", "contents", 1024, []Block{}},
|
||||
{"", "", 1024, []Block{}},
|
||||
{"contents", "contents", 3, []Block{}},
|
||||
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
|
||||
{"contents", "contants", 3, []Block{{3, 3, nil}}},
|
||||
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
|
||||
{"contents", "", 3, []Block{{0, 0, nil}}},
|
||||
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
|
||||
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
|
||||
{"contents", "con", 3, nil},
|
||||
{"contents", "cont", 3, []Block{{3, 1, nil}}},
|
||||
{"cont", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
for i, test := range diffTestData {
|
||||
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
|
||||
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
|
||||
_, d := BlockDiff(a, b)
|
||||
if len(d) != len(test.d) {
|
||||
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
||||
} else {
|
||||
for j := range test.d {
|
||||
if d[j].Offset != test.d[j].Offset {
|
||||
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
|
||||
}
|
||||
if d[j].Size != test.d[j].Size {
|
||||
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
45
buffers/buffers.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package buffers
|
||||
|
||||
const (
|
||||
largeMin = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
smallBuffers = make(chan []byte, 32)
|
||||
largeBuffers = make(chan []byte, 32)
|
||||
)
|
||||
|
||||
func Get(size int) []byte {
|
||||
var ch = largeBuffers
|
||||
if size < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
select {
|
||||
case buf = <-ch:
|
||||
default:
|
||||
}
|
||||
|
||||
if len(buf) < size {
|
||||
return make([]byte, size)
|
||||
}
|
||||
return buf[:size]
|
||||
}
|
||||
|
||||
func Put(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var ch = largeBuffers
|
||||
if len(buf) < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- buf:
|
||||
default:
|
||||
}
|
||||
}
|
||||
20
build.ps1
@@ -1,20 +0,0 @@
|
||||
function build {
|
||||
go run build.go @args
|
||||
}
|
||||
|
||||
$cmd, $rest = $args
|
||||
switch ($cmd) {
|
||||
"test" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build test
|
||||
}
|
||||
|
||||
"bench" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build bench
|
||||
}
|
||||
|
||||
default {
|
||||
build @rest
|
||||
}
|
||||
}
|
||||
76
build.sh
@@ -1,35 +1,51 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
#!/bin/bash
|
||||
|
||||
script() {
|
||||
name="$1"
|
||||
shift
|
||||
go run "script/$name.go" "$@"
|
||||
}
|
||||
export COPYFILE_DISABLE=true
|
||||
|
||||
build() {
|
||||
go run build.go "$@"
|
||||
}
|
||||
version=$(git describe --always)
|
||||
buildDir=dist
|
||||
|
||||
case "${1:-default}" in
|
||||
test)
|
||||
LOGGER_DISCARD=1 build test
|
||||
;;
|
||||
if [[ $fast != yes ]] ; then
|
||||
go get -d
|
||||
go test ./...
|
||||
fi
|
||||
|
||||
bench)
|
||||
LOGGER_DISCARD=1 build bench
|
||||
;;
|
||||
if [[ -z $1 ]] ; then
|
||||
go build -ldflags "-X main.Version $version"
|
||||
elif [[ $1 == "embed" ]] ; then
|
||||
embedder auto gui > auto/gui.files.go \
|
||||
&& go build -ldflags "-X main.Version $version"
|
||||
elif [[ $1 == "tar" ]] ; then
|
||||
go build -ldflags "-X main.Version $version" \
|
||||
&& mkdir syncthing-dist \
|
||||
&& cp syncthing README.md LICENSE syncthing-dist \
|
||||
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
|
||||
&& rm -rf syncthing-dist
|
||||
elif [[ $1 == "all" ]] ; then
|
||||
rm -rf "$buildDir"
|
||||
mkdir -p "$buildDir" || exit 1
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
*)
|
||||
build "$@"
|
||||
;;
|
||||
esac
|
||||
export GOARM=7
|
||||
for os in darwin-amd64 linux-386 linux-amd64 linux-arm freebsd-386 freebsd-amd64 windows-386 windows-amd64 ; do
|
||||
echo "$os"
|
||||
export name="syncthing-$os"
|
||||
export GOOS=${os%-*}
|
||||
export GOARCH=${os#*-}
|
||||
go build -ldflags "-X main.Version $version"
|
||||
mkdir -p "$name"
|
||||
cp README.md LICENSE "$name"
|
||||
case $GOOS in
|
||||
windows)
|
||||
cp syncthing.exe "$buildDir/$name.exe"
|
||||
mv syncthing.exe "$name"
|
||||
zip -qr "$buildDir/$name.zip" "$name"
|
||||
;;
|
||||
*)
|
||||
cp syncthing "$buildDir/$name"
|
||||
mv syncthing "$name"
|
||||
tar zcf "$buildDir/$name.tar.gz" "$name"
|
||||
;;
|
||||
esac
|
||||
rm -r "$name"
|
||||
done
|
||||
fi
|
||||
|
||||
1
cmd/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
!syncthing
|
||||
@@ -1,163 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
log.Println(compareDirectories(flag.Args()...))
|
||||
}
|
||||
|
||||
// Compare a number of directories. Returns nil if the contents are identical,
|
||||
// otherwise an error describing the first found difference.
|
||||
func compareDirectories(dirs ...string) error {
|
||||
chans := make([]chan fileInfo, len(dirs))
|
||||
for i := range chans {
|
||||
chans[i] = make(chan fileInfo)
|
||||
}
|
||||
errcs := make([]chan error, len(dirs))
|
||||
abort := make(chan struct{})
|
||||
|
||||
for i := range dirs {
|
||||
errcs[i] = startWalker(dirs[i], chans[i], abort)
|
||||
}
|
||||
|
||||
res := make([]fileInfo, len(dirs))
|
||||
for {
|
||||
numDone := 0
|
||||
for i := range chans {
|
||||
fi, ok := <-chans[i]
|
||||
if !ok {
|
||||
err, hasError := <-errcs[i]
|
||||
if hasError {
|
||||
close(abort)
|
||||
return err
|
||||
}
|
||||
numDone++
|
||||
}
|
||||
res[i] = fi
|
||||
}
|
||||
|
||||
for i := 1; i < len(res); i++ {
|
||||
if res[i] != res[0] {
|
||||
close(abort)
|
||||
if res[i].name < res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[0], res[i], dirs[i])
|
||||
} else if res[i].name > res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
|
||||
}
|
||||
return fmt.Errorf("mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
|
||||
}
|
||||
}
|
||||
|
||||
if numDone == len(dirs) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
mod int64
|
||||
hash [sha256.Size]byte
|
||||
}
|
||||
|
||||
func (f fileInfo) String() string {
|
||||
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
|
||||
}
|
||||
|
||||
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
|
||||
walker := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rn, _ := filepath.Rel(dir, path)
|
||||
if rn == "." {
|
||||
return nil
|
||||
}
|
||||
if rn == ".stversions" || rn == ".stfolder" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
var f fileInfo
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: os.ModeSymlink,
|
||||
}
|
||||
|
||||
tgt, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.hash = sha256.Sum256([]byte(tgt))
|
||||
} else if info.IsDir() {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
// hash and modtime zero for directories
|
||||
}
|
||||
} else {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
mod: info.ModTime().Unix(),
|
||||
}
|
||||
sum, err := sha256file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.hash = sum
|
||||
}
|
||||
|
||||
select {
|
||||
case res <- f:
|
||||
return nil
|
||||
case <-abort:
|
||||
return errors.New("abort")
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
err := filepath.Walk(dir, walker)
|
||||
close(res)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
close(errc)
|
||||
}()
|
||||
return errc
|
||||
}
|
||||
|
||||
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type diskStore struct {
|
||||
dir string
|
||||
inbox chan diskEntry
|
||||
maxBytes int64
|
||||
maxFiles int
|
||||
|
||||
currentFiles []currentFile
|
||||
currentSize int64
|
||||
}
|
||||
|
||||
type diskEntry struct {
|
||||
path string
|
||||
data []byte
|
||||
}
|
||||
|
||||
type currentFile struct {
|
||||
path string
|
||||
size int64
|
||||
mtime int64
|
||||
}
|
||||
|
||||
func (d *diskStore) Serve(ctx context.Context) {
|
||||
if err := os.MkdirAll(d.dir, 0750); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := d.inventory(); err != nil {
|
||||
log.Println("Failed to inventory disk store:", err)
|
||||
}
|
||||
d.clean()
|
||||
|
||||
cleanTimer := time.NewTicker(time.Minute)
|
||||
inventoryTimer := time.NewTicker(24 * time.Hour)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
for {
|
||||
select {
|
||||
case entry := <-d.inbox:
|
||||
path := d.fullPath(entry.path)
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
gw.Reset(buf)
|
||||
if _, err := gw.Write(entry.data); err != nil {
|
||||
log.Println("Failed to compress crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
log.Println("Failed to compress crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := os.WriteFile(path, buf.Bytes(), 0644); err != nil {
|
||||
log.Printf("Failed to write %s: %v", entry.path, err)
|
||||
_ = os.Remove(path)
|
||||
continue
|
||||
}
|
||||
|
||||
d.currentSize += int64(buf.Len())
|
||||
d.currentFiles = append(d.currentFiles, currentFile{
|
||||
size: int64(len(entry.data)),
|
||||
path: path,
|
||||
})
|
||||
|
||||
case <-cleanTimer.C:
|
||||
d.clean()
|
||||
|
||||
case <-inventoryTimer.C:
|
||||
if err := d.inventory(); err != nil {
|
||||
log.Println("Failed to inventory disk store:", err)
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *diskStore) Put(path string, data []byte) bool {
|
||||
select {
|
||||
case d.inbox <- diskEntry{
|
||||
path: path,
|
||||
data: data,
|
||||
}:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *diskStore) Get(path string) ([]byte, error) {
|
||||
path = d.fullPath(path)
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gr, err := gzip.NewReader(bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gr.Close()
|
||||
return io.ReadAll(gr)
|
||||
}
|
||||
|
||||
func (d *diskStore) Exists(path string) bool {
|
||||
path = d.fullPath(path)
|
||||
_, err := os.Lstat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (d *diskStore) clean() {
|
||||
for len(d.currentFiles) > 0 && (len(d.currentFiles) > d.maxFiles || d.currentSize > d.maxBytes) {
|
||||
f := d.currentFiles[0]
|
||||
log.Println("Removing", f.path)
|
||||
if err := os.Remove(f.path); err != nil {
|
||||
log.Println("Failed to remove file:", err)
|
||||
}
|
||||
d.currentFiles = d.currentFiles[1:]
|
||||
d.currentSize -= f.size
|
||||
}
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
|
||||
}
|
||||
log.Printf("Clean complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
|
||||
}
|
||||
|
||||
func (d *diskStore) inventory() error {
|
||||
d.currentFiles = nil
|
||||
d.currentSize = 0
|
||||
err := filepath.Walk(d.dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if filepath.Ext(path) != ".gz" {
|
||||
return nil
|
||||
}
|
||||
d.currentSize += info.Size()
|
||||
d.currentFiles = append(d.currentFiles, currentFile{
|
||||
path: path,
|
||||
size: info.Size(),
|
||||
mtime: info.ModTime().Unix(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
|
||||
}
|
||||
log.Printf("Inventory complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *diskStore) fullPath(path string) string {
|
||||
return filepath.Join(d.dir, path[0:2], path[2:]) + ".gz"
|
||||
}
|
||||
@@ -1,154 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Command stcrashreceiver is a trivial HTTP server that allows two things:
|
||||
//
|
||||
// - uploading files (crash reports) named like a SHA256 hash using a PUT request
|
||||
// - checking whether such file exists using a HEAD request
|
||||
//
|
||||
// Typically this should be deployed behind something that manages HTTPS.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
type cli struct {
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
CleanInterval time.Duration `help:"Interval between cleaning up old reports" default:"12h" env:"CLEAN_INTERVAL"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
ds := &diskStore{
|
||||
dir: filepath.Join(params.Dir, "crash_reports"),
|
||||
inbox: make(chan diskEntry, params.DiskQueue),
|
||||
maxFiles: params.MaxDiskFiles,
|
||||
maxBytes: params.MaxDiskSizeMB << 20,
|
||||
}
|
||||
go ds.Serve(context.Background())
|
||||
|
||||
ss := &sentryService{
|
||||
dsn: params.DSN,
|
||||
inbox: make(chan sentryRequest, params.SentryQueue),
|
||||
}
|
||||
go ss.Serve(context.Background())
|
||||
|
||||
cr := &crashReceiver{
|
||||
store: ds,
|
||||
sentry: ss,
|
||||
}
|
||||
|
||||
mux.Handle("/", cr)
|
||||
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
|
||||
if params.DSN != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports")))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if err := http.ListenAndServe(params.Listen, mux); err != nil {
|
||||
log.Fatalln("HTTP serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := io.ReadAll(lr)
|
||||
req.Body.Close()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
var reports []ur.FailureReport
|
||||
err = json.Unmarshal(bs, &reports)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
if len(reports) == 0 {
|
||||
// Shouldn't happen
|
||||
log.Printf("Got zero failure reports")
|
||||
return
|
||||
}
|
||||
|
||||
version, err := parseVersion(reports[0].Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
for _, r := range reports {
|
||||
pkt := packet(version, "failure")
|
||||
pkt.Message = r.Description
|
||||
pkt.Extra = raven.Extra{
|
||||
"count": r.Count,
|
||||
}
|
||||
for k, v := range r.Extra {
|
||||
pkt.Extra[k] = v
|
||||
}
|
||||
if r.Goroutines != "" {
|
||||
url, err := saveFailureWithGoroutines(r.FailureData, failureDir)
|
||||
if err != nil {
|
||||
log.Println("Saving failure report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
pkt.Extra["goroutinesURL"] = url
|
||||
}
|
||||
message := sanitizeMessageLDB(r.Description)
|
||||
pkt.Fingerprint = []string{message}
|
||||
|
||||
if err := sendReport(dsn, pkt, userIDFor(req)); err != nil {
|
||||
log.Println("Failed to send failure report:", err)
|
||||
} else {
|
||||
log.Println("Sent failure report:", r.Description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string, error) {
|
||||
bs := make([]byte, len(data.Description)+len(data.Goroutines))
|
||||
copy(bs, data.Description)
|
||||
copy(bs[len(data.Description):], data.Goroutines)
|
||||
id := fmt.Sprintf("%x", sha256.Sum256(bs))
|
||||
path := fullPathCompressed(failureDir, id)
|
||||
err := compressAndWrite(bs, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return reportServer + path, nil
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/v2/stack"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
|
||||
var loader = newGithubSourceCodeLoader()
|
||||
|
||||
func init() {
|
||||
raven.SetSourceCodeLoader(loader)
|
||||
}
|
||||
|
||||
var (
|
||||
clients = make(map[string]*raven.Client)
|
||||
clientsMut sync.Mutex
|
||||
)
|
||||
|
||||
type sentryService struct {
|
||||
dsn string
|
||||
inbox chan sentryRequest
|
||||
}
|
||||
|
||||
type sentryRequest struct {
|
||||
reportID string
|
||||
userID string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (s *sentryService) Serve(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case req := <-s.inbox:
|
||||
pkt, err := parseCrashReport(req.reportID, req.data)
|
||||
if err != nil {
|
||||
log.Println("Failed to parse crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := sendReport(s.dsn, pkt, req.userID); err != nil {
|
||||
log.Println("Failed to send crash report:", err)
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sentryService) Send(reportID, userID string, data []byte) bool {
|
||||
select {
|
||||
case s.inbox <- sentryRequest{reportID, userID, data}:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func sendReport(dsn string, pkt *raven.Packet, userID string) error {
|
||||
pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID})
|
||||
|
||||
clientsMut.Lock()
|
||||
defer clientsMut.Unlock()
|
||||
|
||||
cli, ok := clients[dsn]
|
||||
if !ok {
|
||||
var err error
|
||||
cli, err = raven.New(dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clients[dsn] = cli
|
||||
}
|
||||
|
||||
// The client sets release and such on the packet before sending, in the
|
||||
// misguided idea that it knows this better than than the packet we give
|
||||
// it. So we copy the values from the packet to the client first...
|
||||
cli.SetRelease(pkt.Release)
|
||||
cli.SetEnvironment(pkt.Environment)
|
||||
|
||||
defer cli.Wait()
|
||||
_, errC := cli.Capture(pkt, nil)
|
||||
return <-errC
|
||||
}
|
||||
|
||||
func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
parts := bytes.SplitN(report, []byte("\n"), 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("no first line")
|
||||
}
|
||||
|
||||
version, err := parseVersion(string(parts[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
report = parts[1]
|
||||
|
||||
foundPanic := false
|
||||
var subjectLine []byte
|
||||
for {
|
||||
parts = bytes.SplitN(report, []byte("\n"), 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("no panic line found")
|
||||
}
|
||||
|
||||
line := parts[0]
|
||||
report = parts[1]
|
||||
|
||||
if foundPanic {
|
||||
// The previous line was our "Panic at ..." header. We are now
|
||||
// at the beginning of the real panic trace and this is our
|
||||
// subject line.
|
||||
subjectLine = line
|
||||
break
|
||||
} else if bytes.HasPrefix(line, []byte("Panic at")) {
|
||||
foundPanic = true
|
||||
}
|
||||
}
|
||||
|
||||
r := bytes.NewReader(report)
|
||||
ctx, _, err := stack.ScanSnapshot(r, io.Discard, stack.DefaultOpts())
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if ctx == nil || len(ctx.Goroutines) == 0 {
|
||||
return nil, errors.New("no goroutines found")
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.commit != "" {
|
||||
// We have a commit hash, so we know exactly which source to use
|
||||
loader.LockWithVersion(version.commit)
|
||||
} else if strings.HasPrefix(version.tag, "v") {
|
||||
// Lets hope the tag is close enough
|
||||
loader.LockWithVersion(version.tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("main")
|
||||
}
|
||||
defer loader.Unlock()
|
||||
|
||||
var trace raven.Stacktrace
|
||||
for _, gr := range ctx.Goroutines {
|
||||
if gr.First {
|
||||
trace.Frames = make([]*raven.StacktraceFrame, len(gr.Stack.Calls))
|
||||
for i, sc := range gr.Stack.Calls {
|
||||
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name, sc.RemoteSrcPath, sc.Line, 3, nil)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pkt := packet(version, "crash")
|
||||
pkt.Message = string(subjectLine)
|
||||
pkt.Extra = raven.Extra{
|
||||
"url": reportServer + path,
|
||||
}
|
||||
pkt.Interfaces = []raven.Interface{&trace}
|
||||
pkt.Fingerprint = crashReportFingerprint(pkt.Message)
|
||||
|
||||
return pkt, nil
|
||||
}
|
||||
|
||||
var (
|
||||
indexRe = regexp.MustCompile(`\[[-:0-9]+\]`)
|
||||
sizeRe = regexp.MustCompile(`(length|capacity) [0-9]+`)
|
||||
ldbPosRe = regexp.MustCompile(`(\(pos=)([0-9]+)\)`)
|
||||
ldbChecksumRe = regexp.MustCompile(`(want=0x)([a-z0-9]+)( got=0x)([a-z0-9]+)`)
|
||||
ldbFileRe = regexp.MustCompile(`(\[file=)([0-9]+)(\.ldb\])`)
|
||||
ldbInternalKeyRe = regexp.MustCompile(`(internal key ")[^"]+(", len=)[0-9]+`)
|
||||
ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `)
|
||||
)
|
||||
|
||||
func sanitizeMessageLDB(message string) string {
|
||||
message = ldbPosRe.ReplaceAllString(message, "${1}x)")
|
||||
message = ldbFileRe.ReplaceAllString(message, "${1}x${3}")
|
||||
message = ldbChecksumRe.ReplaceAllString(message, "${1}X${3}X")
|
||||
message = ldbInternalKeyRe.ReplaceAllString(message, "${1}x${2}x")
|
||||
message = ldbPathRe.ReplaceAllString(message, "$1 x: ")
|
||||
return message
|
||||
}
|
||||
|
||||
func crashReportFingerprint(message string) []string {
|
||||
// Do not fingerprint on the stack in case of db corruption or fatal
|
||||
// db io error - where it occurs doesn't matter.
|
||||
orig := message
|
||||
message = sanitizeMessageLDB(message)
|
||||
if message != orig {
|
||||
return []string{message}
|
||||
}
|
||||
|
||||
message = indexRe.ReplaceAllString(message, "[x]")
|
||||
message = sizeRe.ReplaceAllString(message, "$1 x")
|
||||
|
||||
// {{ default }} is what sentry uses as a fingerprint by default. While
|
||||
// never specified, the docs point at this being some hash derived from the
|
||||
// stack trace. Here we include the filtered panic message on top of that.
|
||||
// https://docs.sentry.io/platforms/go/data-management/event-grouping/sdk-fingerprinting/#basic-example
|
||||
return []string{"{{ default }}", message}
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
// or, somewhere along the way the "+" in the version tag disappeared:
|
||||
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
|
||||
var (
|
||||
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
|
||||
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
|
||||
)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
if v.commit != "" {
|
||||
return "Development"
|
||||
}
|
||||
if strings.Contains(v.tag, "-rc.") {
|
||||
return "Candidate"
|
||||
}
|
||||
if strings.Contains(v.tag, "-") {
|
||||
return "Beta"
|
||||
}
|
||||
return "Stable"
|
||||
}
|
||||
|
||||
func parseVersion(line string) (version, error) {
|
||||
m := longVersionRE.FindStringSubmatch(line)
|
||||
if len(m) == 0 {
|
||||
return version{}, errors.New("unintelligeble version string")
|
||||
}
|
||||
|
||||
v := version{
|
||||
version: m[1],
|
||||
codename: m[2],
|
||||
runtime: m[3],
|
||||
goos: m[4],
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
// Split the version tag into tag and commit. This is old style
|
||||
// v1.2.3-something.4+11-g12345678 or newer with just dots
|
||||
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
|
||||
parts := []string{v.version}
|
||||
if strings.Contains(v.version, "+") {
|
||||
parts = strings.Split(v.version, "+")
|
||||
} else {
|
||||
idxs := gitExtraRE.FindStringIndex(v.version)
|
||||
if len(idxs) > 0 {
|
||||
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
|
||||
}
|
||||
}
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
fields := gitExtraSepRE.Split(parts[1], -1)
|
||||
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
|
||||
v.commit = fields[1][1:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
}
|
||||
for _, tag := range version.extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
return pkt
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
longVersion string
|
||||
parsed version
|
||||
}{
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
|
||||
parsed: version{
|
||||
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
|
||||
tag: "v1.23.7-dev",
|
||||
commit: "df7b56ae",
|
||||
codename: "Fermium Flea",
|
||||
runtime: "go1.20.5",
|
||||
goos: "darwin",
|
||||
goarch: "arm64",
|
||||
builder: "jb@ok.kastelo.net",
|
||||
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := os.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pkt, err := parseCrashReport("1/2/345", bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs, err = pkt.JSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", bs)
|
||||
}
|
||||
|
||||
func TestCrashReportFingerprint(t *testing.T) {
|
||||
cases := []struct {
|
||||
message, exp string
|
||||
ldb bool
|
||||
}{
|
||||
{
|
||||
message: "panic: leveldb/table: corruption on data-block (pos=51308946): checksum mismatch, want=0xa89f9aa0 got=0xd27cc4c7 [file=004003.ldb]",
|
||||
exp: "panic: leveldb/table: corruption on data-block (pos=x): checksum mismatch, want=0xX got=0xX [file=x.ldb]",
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: "panic: leveldb/table: corruption on table-footer (pos=248): bad magic number [file=001370.ldb]",
|
||||
exp: "panic: leveldb/table: corruption on table-footer (pos=x): bad magic number [file=x.ldb]",
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [4294967283:4194304]",
|
||||
exp: "panic: runtime error: slice bounds out of range [x]",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [-2:]",
|
||||
exp: "panic: runtime error: slice bounds out of range [x]",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [:4294967283] with capacity 32768",
|
||||
exp: "panic: runtime error: slice bounds out of range [x] with capacity x",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: index out of range [0] with length 0",
|
||||
exp: "panic: runtime error: index out of range [x] with length x",
|
||||
},
|
||||
{
|
||||
message: `panic: leveldb: internal key "\x01", len=1: invalid length`,
|
||||
exp: `panic: leveldb: internal key "x", len=x: invalid length`,
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: `panic: write /var/syncthing/config/index-v0.14.0.db/2732813.log: cannot allocate memory`,
|
||||
exp: `panic: write x: cannot allocate memory`,
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: `panic: filling Blocks: read C:\Users\Serv-Resp-Tizayuca\AppData\Local\Syncthing\index-v0.14.0.db\006561.ldb: Error de datos (comprobación de redundancia cíclica).`,
|
||||
exp: `panic: filling Blocks: read x: Error de datos (comprobación de redundancia cíclica).`,
|
||||
ldb: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
fingerprint := crashReportFingerprint(tc.message)
|
||||
|
||||
expLen := 2
|
||||
if tc.ldb {
|
||||
expLen = 1
|
||||
}
|
||||
if l := len(fingerprint); l != expLen {
|
||||
t.Errorf("tc %v: Unexpected fingerprint length: %v != %v", i, l, expLen)
|
||||
} else if msg := fingerprint[expLen-1]; msg != tc.exp {
|
||||
t.Errorf("tc %v:\n\"%v\" !=\n\"%v\"", i, msg, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
urlPrefix = "https://raw.githubusercontent.com/syncthing/syncthing/"
|
||||
httpTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type githubSourceCodeLoader struct {
|
||||
mut sync.Mutex
|
||||
version string
|
||||
cache map[string]map[string][][]byte // version -> file -> lines
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func newGithubSourceCodeLoader() *githubSourceCodeLoader {
|
||||
return &githubSourceCodeLoader{
|
||||
cache: make(map[string]map[string][][]byte),
|
||||
client: &http.Client{Timeout: httpTimeout},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) LockWithVersion(version string) {
|
||||
l.mut.Lock()
|
||||
l.version = version
|
||||
if _, ok := l.cache[version]; !ok {
|
||||
l.cache[version] = make(map[string][][]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) Unlock() {
|
||||
l.mut.Unlock()
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]byte, int) {
|
||||
filename = filepath.ToSlash(filename)
|
||||
lines, ok := l.cache[l.version][filename]
|
||||
if !ok {
|
||||
// Cache whatever we managed to find (or nil if nothing, so we don't try again)
|
||||
defer func() {
|
||||
l.cache[l.version][filename] = lines
|
||||
}()
|
||||
|
||||
knownPrefixes := []string{"/lib/", "/cmd/"}
|
||||
var idx int
|
||||
for _, pref := range knownPrefixes {
|
||||
idx = strings.Index(filename, pref)
|
||||
if idx >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx == -1 {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Loading source:", resp.Status)
|
||||
return nil, 0
|
||||
}
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err.Error())
|
||||
return nil, 0
|
||||
}
|
||||
lines = bytes.Split(data, []byte{'\n'})
|
||||
}
|
||||
|
||||
return getLineFromLines(lines, line, context)
|
||||
}
|
||||
|
||||
func getLineFromLines(lines [][]byte, line, context int) ([][]byte, int) {
|
||||
if lines == nil {
|
||||
// cached error from ReadFile: return no lines
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
line-- // stack trace lines are 1-indexed
|
||||
start := line - context
|
||||
var idx int
|
||||
if start < 0 {
|
||||
start = 0
|
||||
idx = line
|
||||
} else {
|
||||
idx = context
|
||||
}
|
||||
end := line + context + 1
|
||||
if line >= len(lines) {
|
||||
return nil, 0
|
||||
}
|
||||
if end > len(lines) {
|
||||
end = len(lines)
|
||||
}
|
||||
return lines[start:end], idx
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type crashReceiver struct {
|
||||
store *diskStore
|
||||
sentry *sentryService
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// The final path component should be a SHA256 hash in hex, so 64 hex
|
||||
// characters. We don't care about case on the request but use lower
|
||||
// case internally.
|
||||
reportID := strings.ToLower(path.Base(req.URL.Path))
|
||||
if len(reportID) != 64 {
|
||||
http.Error(w, "Bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, c := range reportID {
|
||||
if c >= 'a' && c <= 'f' {
|
||||
continue
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
continue
|
||||
}
|
||||
http.Error(w, "Bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
r.serveGet(reportID, w, req)
|
||||
case http.MethodHead:
|
||||
r.serveHead(reportID, w, req)
|
||||
case http.MethodPut:
|
||||
r.servePut(reportID, w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// serveGet responds to GET requests by serving the uncompressed report.
|
||||
func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
bs, err := r.store.Get(reportID)
|
||||
if err != nil {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
if !r.store.Exists(reportID) {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// servePut accepts and stores the given report.
|
||||
func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *http.Request) {
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := io.ReadAll(lr)
|
||||
if err != nil {
|
||||
log.Println("Reading report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Store the report
|
||||
if !r.store.Put(reportID, bs) {
|
||||
log.Println("Failed to store report (queue full):", reportID)
|
||||
}
|
||||
|
||||
// Send the report to Sentry
|
||||
if !r.sentry.Send(reportID, userIDFor(req), bs) {
|
||||
log.Println("Failed to send report to sentry (queue full):", reportID)
|
||||
}
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
// counting affected users. It's the truncated hash of a salt, the user
|
||||
// remote IP, and the current month.
|
||||
func userIDFor(req *http.Request) string {
|
||||
addr := req.RemoteAddr
|
||||
if fwd := req.Header.Get("x-forwarded-for"); fwd != "" {
|
||||
addr = fwd
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
now := time.Now().Format("200601")
|
||||
salt := "stcrashreporter"
|
||||
hash := sha256.Sum256([]byte(salt + addr + now))
|
||||
return fmt.Sprintf("%x", hash[:8])
|
||||
}
|
||||
|
||||
// 01234567890abcdef... => 01/23
|
||||
func dirFor(base string) string {
|
||||
return filepath.Join(base[0:2], base[2:4])
|
||||
}
|
||||
|
||||
func fullPathCompressed(root, reportID string) string {
|
||||
return filepath.Join(root, dirFor(reportID), reportID) + ".gz"
|
||||
}
|
||||
|
||||
func compressAndWrite(bs []byte, fullPath string) error {
|
||||
// Compress the report for storage
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
_, _ = gw.Write(bs) // can't fail
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
all = false // print all packets, not just first from each device/source
|
||||
fake = false // send fake packets to lure out other devices faster
|
||||
mc = "[ff12::8384]:21027"
|
||||
bc = 21027
|
||||
)
|
||||
|
||||
var (
|
||||
// Static prefix that we use when generating fake device IDs, so that we
|
||||
// can recognize them ourselves. Also makes the device ID start with
|
||||
// "STPROBE-" which is humanly recognizable.
|
||||
randomPrefix = []byte{148, 223, 23, 4, 148}
|
||||
|
||||
// Our random, fake, device ID that we use when sending announcements.
|
||||
myID = randomDeviceID()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.BoolVar(&all, "all", all, "Print all received announcements (not only first)")
|
||||
flag.BoolVar(&fake, "fake", fake, "Send fake announcements")
|
||||
flag.StringVar(&mc, "mc", mc, "IPv6 multicast address")
|
||||
flag.IntVar(&bc, "bc", bc, "IPv4 broadcast port number")
|
||||
flag.Parse()
|
||||
|
||||
if fake {
|
||||
log.Println("My ID:", myID)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
runbeacon(ctx, beacon.NewMulticast(mc), fake)
|
||||
runbeacon(ctx, beacon.NewBroadcast(bc), fake)
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func runbeacon(ctx context.Context, bc beacon.Interface, fake bool) {
|
||||
go bc.Serve(ctx)
|
||||
go recv(bc)
|
||||
if fake {
|
||||
go send(bc)
|
||||
}
|
||||
}
|
||||
|
||||
// receives and prints discovery announcements
|
||||
func recv(bc beacon.Interface) {
|
||||
seen := make(map[string]bool)
|
||||
for {
|
||||
data, src := bc.Recv()
|
||||
if m := binary.BigEndian.Uint32(data); m != discover.Magic {
|
||||
log.Printf("Incorrect magic %x in announcement from %v", m, src)
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
|
||||
if ann.ID == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := ann.ID.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.Marshal()
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// returns a random but recognizable device ID
|
||||
func randomDeviceID() protocol.DeviceID {
|
||||
var id protocol.DeviceID
|
||||
copy(id[:], randomPrefix)
|
||||
rand.Read(id[len(randomPrefix):])
|
||||
return id
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
stdiscosrv
|
||||
==========
|
||||
|
||||
This is the global discovery server for the `syncthing` project.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
https://docs.syncthing.net/users/stdiscosrv.html
|
||||
|
||||
@@ -1,490 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stringutil"
|
||||
)
|
||||
|
||||
// announcement is the format received from and sent to clients
|
||||
type announcement struct {
|
||||
Seen time.Time `json:"seen"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
|
||||
type apiSrv struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
|
||||
func (i requestID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(i))
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return err
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
}
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return err
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
|
||||
http.HandleFunc("/", s.handler)
|
||||
http.HandleFunc("/ping", handlePing)
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
ErrorLog: log.New(io.Discard, "", 0),
|
||||
}
|
||||
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
t0 := time.Now()
|
||||
|
||||
lw := NewLoggingResponseWriter(w)
|
||||
|
||||
defer func() {
|
||||
diff := time.Since(t0)
|
||||
apiRequestsSeconds.WithLabelValues(req.Method).Observe(diff.Seconds())
|
||||
apiRequestsTotal.WithLabelValues(req.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
reqID := requestID(rand.Int63())
|
||||
req = req.WithContext(context.WithValue(req.Context(), idKey, reqID))
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL, req.Proto)
|
||||
}
|
||||
|
||||
remoteAddr := &net.TCPAddr{
|
||||
IP: nil,
|
||||
Port: -1,
|
||||
}
|
||||
|
||||
if s.useHTTP {
|
||||
remoteAddr.IP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
if parsedPort, err := strconv.ParseInt(req.Header.Get("X-Client-Port"), 10, 0); err == nil {
|
||||
remoteAddr.Port = int(parsedPort)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
remoteAddr, err = net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
lw.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(lw, "Internal Server Error", http.StatusInternalServerError)
|
||||
apiRequestsTotal.WithLabelValues("no_remote_addr").Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
s.handleGET(lw, req)
|
||||
case http.MethodPost:
|
||||
s.handlePOST(remoteAddr, lw, req)
|
||||
default:
|
||||
http.Error(lw, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
reqID := req.Context().Value(idKey).(requestID)
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses + 1
|
||||
} else {
|
||||
misses++
|
||||
}
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses%notFoundMissesWriteInterval == 0 {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
w.Header().Set("Retry-After", notFoundRetryAfterString(int(misses)))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
lookupRequestsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
var bw io.Writer = w
|
||||
|
||||
// Use compression if the client asks for it
|
||||
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(bw)
|
||||
defer gw.Close()
|
||||
bw = gw
|
||||
}
|
||||
|
||||
json.NewEncoder(bw).Encode(announcement{
|
||||
Seen: time.Unix(0, rec.Seen).Truncate(time.Second),
|
||||
Addresses: addressStrs(rec.Addresses),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := req.Context().Value(idKey).(requestID)
|
||||
|
||||
rawCert, err := certificateBytes(req)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("no_certificate").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
announceRequestsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw, nil
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
if strings.Contains(hdr, "%") {
|
||||
// Nginx using $ssl_client_escaped_cert
|
||||
// The certificate is in PEM format with url encoding.
|
||||
// We need to decode for the PEM decoder
|
||||
hdr, err := url.QueryUnescape(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs = []byte(hdr)
|
||||
} else {
|
||||
// Nginx using $ssl_client_cert
|
||||
// The certificate is in PEM format but with spaces for newlines. We
|
||||
// need to reinstate the newlines for the PEM decoder. But we need to
|
||||
// leave the spaces in the BEGIN and END lines - the first and last
|
||||
// space - alone.
|
||||
bs = []byte(hdr)
|
||||
firstSpace := bytes.Index(bs, []byte(" "))
|
||||
lastSpace := bytes.LastIndex(bs, []byte(" "))
|
||||
for i := firstSpace + 1; i < lastSpace; i++ {
|
||||
if bs[i] == ' ' {
|
||||
bs[i] = '\n'
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if hdr := req.Header.Get("X-Tls-Client-Cert-Der-Base64"); hdr != "" {
|
||||
// Caddy {tls_client_certificate_der_base64}
|
||||
hdr, err := base64.StdEncoding.DecodeString(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
//
|
||||
// The certificate is in PEM format, maybe with URL encoding
|
||||
// (depends on Traefik version) but without newlines and start/end
|
||||
// statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
|
||||
if strings.Contains(hdr, "%") {
|
||||
if unesc, err := url.QueryUnescape(hdr); err == nil {
|
||||
hdr = unesc
|
||||
}
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
}
|
||||
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr += "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
return nil, errors.New("empty certificate header")
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil, errors.New("certificate decode result is empty")
|
||||
}
|
||||
|
||||
return block.Bytes, nil
|
||||
}
|
||||
|
||||
// fixupAddresses checks the list of addresses, removing invalid ones and
|
||||
// replacing unspecified IPs with the given remote IP.
|
||||
func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
fixed := make([]string, 0, len(addresses))
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
|
||||
// Some classes of IP are no-go.
|
||||
if ip.IsLoopback() || ip.IsMulticast() {
|
||||
continue
|
||||
}
|
||||
|
||||
if remote != nil {
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Replace the unspecified IP with the request source.
|
||||
|
||||
// ... unless the request source is the loopback address or
|
||||
// multicast/unspecified (can't happen, really).
|
||||
if remote.IP.IsLoopback() || remote.IP.IsMulticast() || remote.IP.IsUnspecified() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv6 remote address if requested scheme is ...4
|
||||
// (i.e., tcp4, etc.)
|
||||
if strings.HasSuffix(uri.Scheme, "4") && remote.IP.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is ...6
|
||||
if strings.HasSuffix(uri.Scheme, "6") && remote.IP.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.IP.String()
|
||||
}
|
||||
|
||||
// If zero port was specified, use remote port.
|
||||
if port == "0" && remote.Port > 0 {
|
||||
port = strconv.Itoa(remote.Port)
|
||||
}
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
fixed = append(fixed, uri.String())
|
||||
}
|
||||
|
||||
// Remove duplicate addresses
|
||||
fixed = stringutil.UniqueTrimmedStrings(fixed)
|
||||
|
||||
return fixed
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterString(misses int) string {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return strconv.Itoa(retryAfterS)
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
cases := []struct {
|
||||
remote *net.TCPAddr
|
||||
in []string
|
||||
out []string
|
||||
}{
|
||||
{ // verbatim passthrough
|
||||
in: []string{"tcp://1.2.3.4:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000"},
|
||||
}, { // unspecified replaced by remote
|
||||
remote: addr("1.2.3.4", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000", "tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: addr("0.0.0.0", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: addr("::", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: addr("127.0.0.1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: addr("::1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: addr("224.0.0.1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: addr("ff80::42", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // explicitly announced weirdness is also filtered
|
||||
remote: addr("192.0.2.42", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://127.1.2.3:22000", "tcp://[::1]:22000", "tcp://[ff80::42]:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // port remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://0.0.0.0:0"},
|
||||
out: []string{"tcp://123.123.123.123:9000"},
|
||||
}, { // unspecified port remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://:0"},
|
||||
out: []string{"tcp://123.123.123.123:9000"},
|
||||
}, { // empty remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://"},
|
||||
out: []string{},
|
||||
}, { // port only remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://44.44.44.44:0"},
|
||||
out: []string{"tcp://44.44.44.44:9000"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
out := fixupAddresses(tc.remote, tc.in)
|
||||
if fmt.Sprint(out) != fmt.Sprint(tc.out) {
|
||||
t.Errorf("fixupAddresses(%v, %v) => %v, expected %v", tc.remote, tc.in, out, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addr(host string, port int) *net.TCPAddr {
|
||||
return &net.TCPAddr{
|
||||
IP: net.ParseIP(host),
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
@@ -1,383 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type defaultClock struct{}
|
||||
|
||||
func (defaultClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
}
|
||||
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMemoryLevelDBStore() (*levelDBStore, error) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
}
|
||||
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
switch {
|
||||
case len(expire(rec.Addresses, nowNanos)) > 0:
|
||||
current++
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
}
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
aIdx++
|
||||
bIdx++
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
aIdx++
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
addrs = sliceutil.RemoveAndZero(addrs, i)
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
}
|
||||
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
@@ -1,847 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
db, err := newMemoryLevelDBStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get("abcd")
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
now := time.Now()
|
||||
tc := &testClock{now}
|
||||
db.clock = tc
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://1.2.3.4:5" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Wind the clock one half expiry, and merge in a new address
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 2 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have two addresses")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://1.2.3.4:5" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address[0]")
|
||||
}
|
||||
if rec.Addresses[1].Address != "tcp://6.7.8.9:0" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address[1]")
|
||||
}
|
||||
|
||||
// Pass the first expiry time
|
||||
|
||||
tc.wind(45 * time.Second)
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://6.7.8.9:0" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Put a record with misses
|
||||
|
||||
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, 10)
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (t *testClock) wind(d time.Duration) {
|
||||
t.now = t.now.Add(d)
|
||||
}
|
||||
|
||||
func (t *testClock) Now() time.Time {
|
||||
t.now = t.now.Add(time.Nanosecond)
|
||||
return t.now
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
[stdiscosrv]
|
||||
title=Syncthing discovery server
|
||||
description=Lets syncthing clients discover each other
|
||||
ports=8443/tcp
|
||||
@@ -1,3 +0,0 @@
|
||||
# Default settings for syncthing-discosrv (stdiscosrv).
|
||||
## Add Options here:
|
||||
DISCOSRV_OPTS=
|
||||
@@ -1,25 +0,0 @@
|
||||
[Unit]
|
||||
Description=Syncthing Discovery Server
|
||||
After=network.target
|
||||
Documentation=man:stdiscosrv(1)
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/var/lib/syncthing-discosrv
|
||||
EnvironmentFile=/etc/default/syncthing-discosrv
|
||||
ExecStart=/usr/bin/stdiscosrv $DISCOSRV_OPTS
|
||||
|
||||
# Hardening
|
||||
User=syncthing-discosrv
|
||||
Group=syncthing
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=/var/lib/syncthing-discosrv
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
ProtectHome=true
|
||||
SystemCallArchitectures=native
|
||||
MemoryDenyWriteExecute=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Alias=syncthing-discosrv.service
|
||||
@@ -1,217 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
addressExpiryTime = 2 * time.Hour
|
||||
databaseStatisticsInterval = 5 * time.Minute
|
||||
|
||||
// Reannounce-After is set to reannounceAfterSeconds +
|
||||
// random(reannounzeFuzzSeconds), similar for Retry-After
|
||||
reannounceAfterSeconds = 3300
|
||||
reannounzeFuzzSeconds = 300
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
httpMaxHeaderBytes = 1 << 10
|
||||
|
||||
// Size of the replication outbox channel
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
var debug = false
|
||||
|
||||
func main() {
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var replCertFile string
|
||||
var replKeyFile string
|
||||
var useHTTP bool
|
||||
var largeDB bool
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
|
||||
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
|
||||
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if *showVersion {
|
||||
return
|
||||
}
|
||||
|
||||
if largeDB {
|
||||
levelDBOptions.BlockCacheCapacity = 64 << 20
|
||||
levelDBOptions.BlockSize = 64 << 10
|
||||
levelDBOptions.CompactionTableSize = 16 << 20
|
||||
levelDBOptions.CompactionTableSizeMultiplier = 2.0
|
||||
levelDBOptions.WriteBuffer = 64 << 20
|
||||
levelDBOptions.CompactionL0Trigger = 8
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
replCert := cert
|
||||
if replCertFile != "" && replKeyFile != "" {
|
||||
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load replication keypair:", err)
|
||||
}
|
||||
}
|
||||
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
|
||||
log.Println("Replication device ID is", replDevID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
}
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
})
|
||||
|
||||
// Start the database.
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("Open database:", err)
|
||||
}
|
||||
main.Add(db)
|
||||
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, replCert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
}
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if metricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
// Engage!
|
||||
main.Serve(context.Background())
|
||||
}
|
||||
@@ -1,324 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
replicationReadTimeout = time.Minute
|
||||
replicationWriteTimeout = 30 * time.Second
|
||||
replicationHeartbeatInterval = time.Second * 30
|
||||
)
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// The replication stream is not especially latency sensitive, but it is
|
||||
// quite a lot of data in small writes. Make it more efficient.
|
||||
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
|
||||
_ = tcpc.SetNoDelay(false)
|
||||
}
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
addgroup --system syncthing
|
||||
adduser --system --home /var/lib/syncthing-discosrv --ingroup syncthing syncthing-discosrv
|
||||
@@ -1,124 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
var (
|
||||
apiRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "api_requests_total",
|
||||
Help: "Number of API requests.",
|
||||
}, []string{"type", "result"})
|
||||
apiRequestsSeconds = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "api_requests_seconds",
|
||||
Help: "Latency of API requests.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"type"})
|
||||
|
||||
lookupRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "lookup_requests_total",
|
||||
Help: "Number of lookup requests.",
|
||||
}, []string{"result"})
|
||||
announceRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "announcement_requests_total",
|
||||
Help: "Number of announcement requests.",
|
||||
}, []string{"result"})
|
||||
|
||||
replicationSendsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "replication_sends_total",
|
||||
Help: "Number of replication sends.",
|
||||
}, []string{"result"})
|
||||
replicationRecvsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "replication_recvs_total",
|
||||
Help: "Number of replication receives.",
|
||||
}, []string{"result"})
|
||||
|
||||
databaseKeys = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_keys",
|
||||
Help: "Number of database keys at last count.",
|
||||
}, []string{"category"})
|
||||
databaseStatisticsSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_statistics_seconds",
|
||||
Help: "Time spent running the statistics routine.",
|
||||
})
|
||||
|
||||
databaseOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_operations_total",
|
||||
Help: "Number of database operations.",
|
||||
}, []string{"operation", "result"})
|
||||
databaseOperationSeconds = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_operation_seconds",
|
||||
Help: "Latency of database operations.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
)
|
||||
|
||||
const (
|
||||
dbOpGet = "get"
|
||||
dbOpPut = "put"
|
||||
dbOpMerge = "merge"
|
||||
dbOpDelete = "delete"
|
||||
dbResSuccess = "success"
|
||||
dbResNotFound = "not_found"
|
||||
dbResError = "error"
|
||||
dbResUnmarshalError = "unmarsh_err"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(apiRequestsTotal, apiRequestsSeconds,
|
||||
lookupRequestsTotal, announceRequestsTotal,
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds)
|
||||
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type event struct {
|
||||
ID int `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Time time.Time `json:"time"`
|
||||
Data map[string]interface{} `json:"data"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
target := flag.String("target", "localhost:8384", "Target Syncthing instance")
|
||||
types := flag.String("types", "", "Filter for specific event types (comma-separated)")
|
||||
apikey := flag.String("apikey", "", "Syncthing API key")
|
||||
flag.Parse()
|
||||
|
||||
if *apikey == "" {
|
||||
log.Fatal("Must give -apikey argument")
|
||||
}
|
||||
var eventsArg string
|
||||
if len(*types) > 0 {
|
||||
eventsArg = "&events=" + *types
|
||||
}
|
||||
|
||||
since := 0
|
||||
for {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/rest/events?since=%d%s", *target, since, eventsArg), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
req.Header.Set("X-API-Key", *apikey)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var events []event
|
||||
err = json.NewDecoder(res.Body).Decode(&events)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
for _, event := range events {
|
||||
bs, _ := json.MarshalIndent(event, "", " ")
|
||||
log.Printf("%s", bs)
|
||||
since = event.ID
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
standardBlocks := flag.Bool("s", false, "Use standard block size")
|
||||
flag.Parse()
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
log.Fatal("Need one argument: path to check")
|
||||
}
|
||||
|
||||
log.Println("File:")
|
||||
log.Println(" ", filepath.Clean(path))
|
||||
log.Println()
|
||||
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Lstat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
|
||||
if !fi.Mode().IsDir() && !fi.Mode().IsRegular() {
|
||||
fi, err = os.Stat(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Stat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
}
|
||||
|
||||
if fi.Mode().IsRegular() {
|
||||
log.Println("Blocks:")
|
||||
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
blockSize := int(fi.Size())
|
||||
if *standardBlocks || blockSize < protocol.MinBlockSize {
|
||||
blockSize = protocol.BlockSize(fi.Size())
|
||||
}
|
||||
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, b := range bs {
|
||||
log.Println(" ", b)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var timeout = 5 * time.Second
|
||||
|
||||
func main() {
|
||||
var server string
|
||||
|
||||
flag.StringVar(&server, "server", "", "Announce server (blank for default set)")
|
||||
flag.DurationVar(&timeout, "timeout", timeout, "Query timeout")
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
flag.Usage()
|
||||
os.Exit(64)
|
||||
}
|
||||
|
||||
id, err := protocol.DeviceIDFromString(flag.Args()[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if server != "" {
|
||||
checkServers(id, server)
|
||||
} else {
|
||||
checkServers(id, config.DefaultDiscoveryServers...)
|
||||
}
|
||||
}
|
||||
|
||||
type checkResult struct {
|
||||
server string
|
||||
addresses []string
|
||||
error
|
||||
}
|
||||
|
||||
func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
t0 := time.Now()
|
||||
resc := make(chan checkResult)
|
||||
for _, srv := range servers {
|
||||
srv := srv
|
||||
go func() {
|
||||
res := checkServer(deviceID, srv)
|
||||
res.server = srv
|
||||
resc <- res
|
||||
}()
|
||||
}
|
||||
|
||||
for range servers {
|
||||
res := <-resc
|
||||
|
||||
u, _ := url.Parse(res.server)
|
||||
fmt.Printf("%s (%v):\n", u.Host, time.Since(t0))
|
||||
|
||||
if res.error != nil {
|
||||
fmt.Println(" " + res.error.Error())
|
||||
}
|
||||
for _, addr := range res.addresses {
|
||||
fmt.Println(" address:", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, events.NoopLogger, nil)
|
||||
if err != nil {
|
||||
return checkResult{error: err}
|
||||
}
|
||||
|
||||
res := make(chan checkResult, 1)
|
||||
|
||||
time.AfterFunc(timeout, func() {
|
||||
res <- checkResult{error: errors.New("timeout")}
|
||||
})
|
||||
|
||||
go func() {
|
||||
addresses, err := disco.Lookup(context.Background(), deviceID)
|
||||
res <- checkResult{addresses: addresses, error: err}
|
||||
}()
|
||||
|
||||
return <-res
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Printf("Usage:\n\t%s [options] <device ID>\n\nOptions:\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Command stfindignored lists ignored files under a given folder root.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/ignore"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
root := flag.Arg(0)
|
||||
if root == "" {
|
||||
root = "."
|
||||
}
|
||||
|
||||
vfs := fs.NewWalkFilesystem(fs.NewFilesystem(fs.FilesystemTypeBasic, root))
|
||||
|
||||
ign := ignore.New(vfs)
|
||||
if err := ign.Load(".stignore"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Fatal: loading ignores: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
vfs.Walk(".", func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: %s: %v\n", path, err)
|
||||
return fs.SkipDir
|
||||
}
|
||||
if ign.Match(path).IsIgnored() {
|
||||
fmt.Println(path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", "~/files", "Directory to generate into")
|
||||
files := flag.Int("files", 1000, "Number of files to create")
|
||||
maxExp := flag.Int("maxexp", 20, "Max size exponent")
|
||||
src := flag.String("src", "/dev/urandom", "Source of file data")
|
||||
flag.Parse()
|
||||
if err := generateFiles(*dir, *files, *maxExp, *src); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateFiles(dir string, files, maxexp int, srcname string) error {
|
||||
fd, err := os.Open(srcname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
n := randomName()
|
||||
|
||||
if rand.Float64() < 0.05 {
|
||||
// Some files and directories are dotfiles
|
||||
n = "." + n
|
||||
}
|
||||
|
||||
p0 := filepath.Join(dir, string(n[0]), n[0:2])
|
||||
err = os.MkdirAll(p0, 0755)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
p1 := filepath.Join(p0, n)
|
||||
|
||||
s := int64(1 << uint(rand.Intn(maxexp)))
|
||||
a := int64(128 * 1024)
|
||||
if a > s {
|
||||
a = s
|
||||
}
|
||||
s += rand.Int63n(a)
|
||||
|
||||
if err := generateOneFile(fd, p1, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
src := io.LimitReader(&infiniteReader{fd}, s)
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dst.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
return os.Chtimes(p1, t, t)
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
var b [16]byte
|
||||
readRand(b[:])
|
||||
return fmt.Sprintf("%x", b[:])
|
||||
}
|
||||
|
||||
func readRand(bs []byte) (int, error) {
|
||||
var r uint32
|
||||
for i := range bs {
|
||||
if i%4 == 0 {
|
||||
r = uint32(rand.Int63())
|
||||
}
|
||||
bs[i] = byte(r >> uint((i%4)*8))
|
||||
}
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
type infiniteReader struct {
|
||||
rd io.ReadSeeker
|
||||
}
|
||||
|
||||
func (i *infiniteReader) Read(bs []byte) (int, error) {
|
||||
n, err := i.rd.Read(bs)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
i.rd.Seek(0, 0)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 The Syncthing Project
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# relaypoolsrv
|
||||
|
||||
This is the relay pool server for the `syncthing` project, which allows
|
||||
community hosted [relaysrv](https://github.com/syncthing/relaysrv)'s to join
|
||||
the public pool.
|
||||
|
||||
Servers that join the pool are then advertised to users of `syncthing` as
|
||||
potential connection points for those who are unable to connect directly due
|
||||
to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
|
||||
See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
##### Third-party attributions
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
1
cmd/strelaypoolsrv/auto/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
gui.files.go
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -1,16 +0,0 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build noassets
|
||||
// +build noassets
|
||||
|
||||
package auto
|
||||
|
||||
import "github.com/syncthing/syncthing/lib/assets"
|
||||
|
||||
func Assets() map[string]assets.Asset {
|
||||
return nil
|
||||
}
|
||||
@@ -1,402 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang="en" ng-app="syncthing" ng-controller="relayDataController">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
||||
<meta name="description" content=""/>
|
||||
<meta name="author" content=""/>
|
||||
|
||||
<title>Relay stats</title>
|
||||
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.6.0/dist/leaflet.css"
|
||||
integrity="sha512-xwE/Az9zrjBIphAcBb3F6JVqxf46+CDLwfLMHloNu6KEQCAWi6HcDUbeOfBIptF7tcCzusKFjFw2yuvEpDL9wQ=="
|
||||
crossorigin=""/>
|
||||
<script src="https://unpkg.com/leaflet@1.6.0/dist/leaflet.js"
|
||||
integrity="sha512-gZwIG9x3wUXg2hdXF6+rVkLF/0Vi9U8D2Ntg4Ga5I5BZpVkVxlJWbSQtXPSiUTtC0TjtGOmxa1AJPuV0CPthew=="
|
||||
crossorigin=""></script>
|
||||
|
||||
<style>
|
||||
#map {
|
||||
height: 600px;
|
||||
}
|
||||
.ng-cloak {
|
||||
display: none;
|
||||
}
|
||||
table {
|
||||
font-size: 11px !important;
|
||||
width: 100%;
|
||||
border: 1px;
|
||||
|
||||
}
|
||||
td {
|
||||
padding: 0px !important;
|
||||
}
|
||||
tfoot td {
|
||||
font-weight: bold;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="ng-cloak">
|
||||
<div class="container">
|
||||
<h1>Relay Pool Data</h1>
|
||||
<div ng-if="relays === undefined" class="text-center">
|
||||
<img src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
|
||||
<p>Please wait while we gather data…</p>
|
||||
</div>
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
The relays listed on this page are not managed or vetted by the Syncthing project.
|
||||
Each relay is the responsibility of the relay operator.
|
||||
Currently {{ relays.length }} relays are online.
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
<p>The circle size represents how much bytes the relay has transferred relatively to other relays.</p>
|
||||
</div>
|
||||
<div>
|
||||
<table class="table table-striped table-condensed table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th rowspan="2">Address</td>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'stats.numActiveSessions'; sortReverse = !sortReverse">
|
||||
Sessions
|
||||
<span ng-show="sortType == 'stats.numActiveSessions' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.numActiveSessions' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'stats.numConnections'; sortReverse = !sortReverse">
|
||||
Connections
|
||||
<span ng-show="sortType == 'stats.numConnections' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.numConnections' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'stats.bytesProxied'; sortReverse = !sortReverse">
|
||||
Data relayed
|
||||
<span ng-show="sortType == 'stats.bytesProxied' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.bytesProxied' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th colspan="6" class="text-center">Transfer rate in the last period</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'stats.uptimeSeconds'; sortReverse = !sortReverse">
|
||||
Uptime hours
|
||||
<span ng-show="sortType == 'stats.uptimeSeconds' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'stats.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
|
||||
Provided by
|
||||
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[0]'; sortReverse = !sortReverse">
|
||||
10s
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[1]'; sortReverse = !sortReverse">
|
||||
1m
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[2]'; sortReverse = !sortReverse">
|
||||
5m
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[3]'; sortReverse = !sortReverse">
|
||||
15m
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[4]'; sortReverse = !sortReverse">
|
||||
30m
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[5]'; sortReverse = !sortReverse">
|
||||
60m
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr ng-repeat="relay in relays | orderBy:sortType:sortReverse:sortCompare" ng-mouseover="relay.showMarker()" ng-mouseleave="relay.hideMarker()">
|
||||
<td>{{ relay.address }}</td>
|
||||
<td ng-if="!relay.stats" colspan="11"></td>
|
||||
<td ng-if-start="relay.stats">{{ relay.stats.numActiveSessions }}</td>
|
||||
<td>{{ relay.stats.numConnections }}</td>
|
||||
<td>{{ relay.stats.bytesProxied | bytes }}</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td ng-if="relay.stats.uptimeSeconds != undefined">{{ relay.stats.uptimeSeconds/60/60 | number:0 }}</td>
|
||||
<td ng-if="relay.stats.uptimeSeconds == undefined"></td>
|
||||
<td title="{{ relay.stats.options['provided-by'] || '' }}" ng-if-end>
|
||||
{{ relay.stats.options['provided-by'] || '' | limitTo:50 }}
|
||||
<span ng-if="(relay.stats.options['provided-by'] || '').length > 50">…
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr>
|
||||
<td>Totals</td>
|
||||
<td>{{ totals.numActiveSessions }}</td>
|
||||
<td>{{ totals.numConnections }}</td>
|
||||
<td>{{ totals.bytesProxied | bytes }}</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.uptimeSeconds/60/60 | number:0 }} hours</td>
|
||||
<td>{{ relays.length }} relays</td>
|
||||
</tr>
|
||||
</tfoor>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="https://www.maxmind.com">https://www.maxmind.com</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
|
||||
<script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
angular.module('syncthing', [
|
||||
])
|
||||
.config(['$httpProvider', function($httpProvider) {
|
||||
$httpProvider.defaults.timeout = 5000;
|
||||
}])
|
||||
.filter('bytes', function() {
|
||||
return function(bytes, precision) {
|
||||
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
|
||||
if (typeof precision === 'undefined') precision = 1;
|
||||
|
||||
var units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
|
||||
number = Math.floor(Math.log(bytes) / Math.log(1024));
|
||||
|
||||
var value = (bytes / Math.pow(1000, Math.floor(number)));
|
||||
if (!isFinite(value)) {
|
||||
value = 0;
|
||||
precision = 0;
|
||||
}
|
||||
if (!isFinite(number)) {
|
||||
units = 'bytes';
|
||||
} else {
|
||||
units = units[number];
|
||||
}
|
||||
return value.toFixed(precision) + ' ' + units;
|
||||
}
|
||||
})
|
||||
.controller('relayDataController', ['$scope', '$rootScope', '$http', '$q', '$compile', '$timeout', function($scope, $rootScope, $http, $q, $compile, $timeout) {
|
||||
$scope.totals = {
|
||||
bytesProxied: 0,
|
||||
goMaxProcs: 0,
|
||||
kbps10s1m5m15m30m60m: [0, 0, 0, 0, 0, 0],
|
||||
numActiveSessions: 0,
|
||||
numConnections: 0,
|
||||
numPendingSessionKeys: 0,
|
||||
numProxies: 0,
|
||||
uptimeSeconds: 0,
|
||||
};
|
||||
$scope.map = L.map('map').setView([40.90296, 1.90925], 2);
|
||||
L.tileLayer('https://tile.openstreetmap.org/{z}/{x}/{y}.png',
|
||||
{
|
||||
attribution: 'Leaflet',
|
||||
maxZoom: 17
|
||||
}).addTo($scope.map);
|
||||
$scope.tooltipTemplate = $('#infoTemplate').html();
|
||||
$scope.usedLocations = {};
|
||||
$scope.sortType = 'stats.numActiveSessions';
|
||||
$scope.sortReverse = true;
|
||||
$scope.sortCompare = function(a, b) {
|
||||
if (a.value == b.value) {
|
||||
return 0;
|
||||
}
|
||||
if (a.type == "undefined" || a.type == "null") {
|
||||
return -1;
|
||||
}
|
||||
if (b.type == "undefined" || b.type == "null") {
|
||||
return 1;
|
||||
}
|
||||
return a.value > b.value ? 1 : -1;
|
||||
}
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
relay.uri = constructURI(relay.url);
|
||||
relay.address = relay.url.split('/')[2];
|
||||
|
||||
addMarkerToMap(relay);
|
||||
|
||||
if (relay.stats) {
|
||||
angular.forEach($scope.totals, function(value, key) {
|
||||
if (typeof $scope.totals[key] == 'number') {
|
||||
$scope.totals[key] += relay.stats[key];
|
||||
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
|
||||
angular.forEach($scope.totals[key], function(value, index) {
|
||||
$scope.totals[key][index] += relay.stats[key][index];
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// After the totals were calculated, add circles.
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
if (relay.stats) {
|
||||
addCircleToMap(relay);
|
||||
}
|
||||
});
|
||||
|
||||
if ($scope.relays.length == 1) {
|
||||
//Center to only relay with zoom
|
||||
$scope.map.panTo(new L.LatLng(relays[0].location.latitude, relays[0].location.longitude));
|
||||
$scope.map.setZoom(13);
|
||||
}
|
||||
});
|
||||
|
||||
function addMarkerToMap(relay) {
|
||||
var loc = relay.location.latitude + "," + relay.location.longitude;
|
||||
|
||||
// Deal with overlapping markers
|
||||
while (loc in $scope.usedLocations) {
|
||||
var locParts = loc.split(',');
|
||||
locParts = [parseFloat(locParts[0]), parseFloat(locParts[1])];
|
||||
locParts[Math.round(Math.random())] += 0.5 * (Math.random() >= 0.5 ? 1 : -1);
|
||||
loc = locParts.join(',');
|
||||
}
|
||||
|
||||
$scope.usedLocations[loc] = true;
|
||||
|
||||
var locParts = loc.split(',');
|
||||
|
||||
relay.marker = new L.Marker([relay.location.latitude, relay.location.longitude],{
|
||||
title: relay.url,
|
||||
});
|
||||
|
||||
var scope = $rootScope.$new(true);
|
||||
scope.relay = relay;
|
||||
|
||||
var icon = new L.Icon({
|
||||
iconSize: [18, 28], // size of the icon
|
||||
iconAnchor: [9, 28], // point of the icon which will correspond to marker's location
|
||||
shadowAnchor: [0, 0], // the same for the shadow
|
||||
popupAnchor: [0, -27], // popup anchor
|
||||
shadowSize: [0,0],
|
||||
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-red.png',
|
||||
shadowUrl: 'https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/images/marker-shadow.png',
|
||||
});
|
||||
|
||||
relay.marker = new L.marker(new L.latLng(locParts[0], locParts[1]),{icon})
|
||||
.bindPopup($compile($scope.tooltipTemplate)(scope)[0],{})
|
||||
.on('mouseover', function (e) {
|
||||
this.openPopup();
|
||||
}).on('mouseout', function (e) {
|
||||
this.closePopup();
|
||||
}).addTo($scope.map);
|
||||
|
||||
relay.showMarker = function() {
|
||||
relay.marker.openPopup();
|
||||
}
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.closePopup();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
console.log(relay.location.latitude)
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
{
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
|
||||
color: "FF0000",
|
||||
fillColor: "#FF0000",
|
||||
fillOpacity: 0.35,
|
||||
}).addTo($scope.map);
|
||||
}
|
||||
|
||||
function constructURI(url) {
|
||||
var uri = document.createElement('a');
|
||||
|
||||
// HAX, otherwise doesn't work
|
||||
uri.href = url.replace('relay://', 'http://');
|
||||
|
||||
// Convert query string to object
|
||||
uri.args = {};
|
||||
angular.forEach(uri.search.replace(/^\?/, '').split('&'), function(query) {
|
||||
var split = query.split('=');
|
||||
uri.args[split[0]] = split[1];
|
||||
});
|
||||
|
||||
return uri;
|
||||
}
|
||||
}]);
|
||||
</script>
|
||||
|
||||
<script type="text/template" id="infoTemplate">
|
||||
<div>
|
||||
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.stats.options['provided-by']">provided by <u>{{ relay.stats.options['provided-by'] }}</u></span></p>
|
||||
<div ng-if="relay.stats">
|
||||
<span ng-if="relay.stats.startTime">Start time: {{ relay.stats.startTime | date:"medium" }}</br></span>
|
||||
<span ng-if="relay.stats.bytesProxied != undefined">Proxied: {{ relay.stats.bytesProxied | bytes }}</br></span>
|
||||
<span ng-if="relay.stats.numActiveSessions != undefined">Sessions: {{ relay.stats.numActiveSessions }}</br></span>
|
||||
<span ng-if="relay.stats.numConnections != undefined">Clients: {{ relay.stats.numConnections }}</br></span>
|
||||
<span ng-if="relay.stats.options.pools">Pools: {{ relay.stats.options.pools.join(', ') }}</br></span>
|
||||
<span ng-if="relay.stats.options['global-rate'] != undefined">
|
||||
<span ng-if="relay.stats.options['global-rate'] > 0">Global rate limit: {{ relay.stats.options['global-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.stats.options['global-rate'] == 0">Global rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
<span ng-if="relay.stats.options['per-session-rate'] != undefined">
|
||||
<span ng-if="relay.stats.options['per-session-rate'] > 0">Session rate limit: {{ relay.stats.options['per-session-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.stats.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
</div>
|
||||
<div ng-if="!relay.stats">
|
||||
Data unavailable.
|
||||
<div>
|
||||
</div>
|
||||
</script>
|
||||
</html>
|
||||
@@ -1,662 +0,0 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
type location struct {
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
City string `json:"city"`
|
||||
Country string `json:"country"`
|
||||
Continent string `json:"continent"`
|
||||
}
|
||||
|
||||
type relay struct {
|
||||
URL string `json:"url"`
|
||||
Location location `json:"location"`
|
||||
uri *url.URL
|
||||
Stats *stats `json:"stats"`
|
||||
StatsRetrieved time.Time `json:"statsRetrieved"`
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
UptimeSeconds int `json:"uptimeSeconds"`
|
||||
PendingSessionKeys int `json:"numPendingSessionKeys"`
|
||||
ActiveSessions int `json:"numActiveSessions"`
|
||||
Connections int `json:"numConnections"`
|
||||
Proxies int `json:"numProxies"`
|
||||
BytesProxied int `json:"bytesProxied"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
GoOS string `json:"goOS"`
|
||||
GoArch string `json:"goArch"`
|
||||
GoMaxProcs int `json:"goMaxProcs"`
|
||||
GoRoutines int `json:"goNumRoutine"`
|
||||
Rates []int64 `json:"kbps10s1m5m15m30m60m"`
|
||||
Options struct {
|
||||
NetworkTimeout int `json:"network-timeout"`
|
||||
PintInterval int `json:"ping-interval"`
|
||||
MessageTimeout int `json:"message-timeout"`
|
||||
SessionRate int `json:"per-session-rate"`
|
||||
GlobalRate int `json:"global-rate"`
|
||||
Pools []string `json:"pools"`
|
||||
ProvidedBy string `json:"provided-by"`
|
||||
} `json:"options"`
|
||||
}
|
||||
|
||||
func (r relay) String() string {
|
||||
return r.URL
|
||||
}
|
||||
|
||||
type request struct {
|
||||
relay *relay
|
||||
result chan result
|
||||
queueTimer *prometheus.Timer
|
||||
}
|
||||
|
||||
type result struct {
|
||||
err error
|
||||
eviction time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute
|
||||
requestQueueLen = 64
|
||||
requestProcessors = 8
|
||||
|
||||
requests chan request
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
knownRelays = make([]*relay, 0)
|
||||
permanentRelays = make([]*relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
globalBlocklist = newErrorTracker(1000)
|
||||
)
|
||||
|
||||
const (
|
||||
httpStatusEnhanceYourCalm = 429
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
requests = make(chan request, requestQueueLen)
|
||||
|
||||
var listener net.Listener
|
||||
var err error
|
||||
|
||||
if permRelaysFile != "" {
|
||||
permanentRelays = loadRelays(permRelaysFile)
|
||||
}
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
for i := 0; i < requestProcessors; i++ {
|
||||
go requestProcessor()
|
||||
}
|
||||
|
||||
// Load relays from cache in the background.
|
||||
// Load them in a serial fashion to make sure any genuine requests
|
||||
// are not dropped.
|
||||
go func() {
|
||||
for _, relay := range loadRelays(knownRelaysFile) {
|
||||
resultChan := make(chan result)
|
||||
requests <- request{relay, resultChan, nil}
|
||||
result := <-resultChan
|
||||
if result.err != nil {
|
||||
relayTestsTotal.WithLabelValues("failed").Inc()
|
||||
} else {
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
// Run the the stats refresher once the relays are loaded.
|
||||
statsRefresher(statsRefresh)
|
||||
}()
|
||||
|
||||
if dir != "" {
|
||||
if debug {
|
||||
log.Println("Starting TLS listener on", listen)
|
||||
}
|
||||
certFile, keyFile := filepath.Join(dir, "http-cert.pem"), filepath.Join(dir, "http-key.pem")
|
||||
var cert tls.Certificate
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load HTTP X509 key pair:", err)
|
||||
}
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
MinVersion: tls.VersionTLS10, // No SSLv3
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
CipherSuites: []uint16{
|
||||
// No RC4
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
listener, err = tls.Listen(proto, listen, tlsCfg)
|
||||
} else {
|
||||
if debug {
|
||||
log.Println("Starting plain listener on", listen)
|
||||
}
|
||||
listener, err = net.Listen(proto, listen)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second))
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
log.Fatalln("serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(metricsRequestsSeconds)
|
||||
// Acquire the mutex just to make sure we're not caught mid-way stats collection
|
||||
mut.RLock()
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
mut.RUnlock()
|
||||
timer.ObserveDuration()
|
||||
}
|
||||
|
||||
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", "no-cache, must-revalidate")
|
||||
|
||||
path := r.URL.Path[1:]
|
||||
if path == "" {
|
||||
path = "index.html"
|
||||
}
|
||||
|
||||
as, ok := auto.Assets()[path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
||||
n := copy(relays, permanentRelays)
|
||||
copy(relays[n:], knownRelays)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
|
||||
// Check the black list. A client is blacklisted if their last 10
|
||||
// attempts to join have all failed. The "Unauthorized" status return
|
||||
// causes strelaysrv to cease attempting to join.
|
||||
if globalBlocklist.IsBlocked(rhost) {
|
||||
log.Println("Rejected blocked client", rhost)
|
||||
http.Error(w, "Too many errors", http.StatusUnauthorized)
|
||||
globalBlocklist.ClearErrors(rhost)
|
||||
return
|
||||
}
|
||||
|
||||
var relayCert *x509.Certificate
|
||||
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
||||
relayCert = r.TLS.PeerCertificates[0]
|
||||
log.Printf("Got TLS cert from relay server")
|
||||
}
|
||||
|
||||
var newRelay relay
|
||||
err := json.NewDecoder(r.Body).Decode(&newRelay)
|
||||
r.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse payload")
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(newRelay.URL)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Canonicalize the URL. In particular, parse and re-encode the query
|
||||
// string so that it's guaranteed to be valid.
|
||||
uri.RawQuery = uri.Query().Encode()
|
||||
newRelay.URL = uri.String()
|
||||
|
||||
if relayCert != nil {
|
||||
advertisedId := uri.Query().Get("id")
|
||||
idFromCert := protocol.NewDeviceID(relayCert.Raw).String()
|
||||
if advertisedId != idFromCert {
|
||||
log.Println("Warning: Relay server requested to join with an ID different from the join request, rejecting")
|
||||
http.Error(w, "mismatched advertised id and join request cert", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
// The client did not provide an IP address, use the IP address of the client.
|
||||
if ip == nil || ip.IsUnspecified() {
|
||||
uri.Host = net.JoinHostPort(rhost, port)
|
||||
newRelay.URL = uri.String()
|
||||
} else if host != rhost && relayCert == nil {
|
||||
if debug {
|
||||
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("IP advertised %s does not match client IP %s", host, rhost), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
newRelay.uri = uri
|
||||
|
||||
for _, current := range permanentRelays {
|
||||
if current.uri.Host == newRelay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
|
||||
}
|
||||
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reschan := make(chan result)
|
||||
|
||||
select {
|
||||
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
|
||||
result := <-reschan
|
||||
if result.err != nil {
|
||||
log.Println("Join from", r.RemoteAddr, "failed:", result.err)
|
||||
globalBlocklist.AddError(rhost)
|
||||
relayTestsTotal.WithLabelValues("failed").Inc()
|
||||
http.Error(w, result.err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("Join from", r.RemoteAddr, "succeeded")
|
||||
globalBlocklist.ClearErrors(rhost)
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]time.Duration{
|
||||
"evictionIn": result.eviction,
|
||||
})
|
||||
|
||||
default:
|
||||
relayTestsTotal.WithLabelValues("dropped").Inc()
|
||||
if debug {
|
||||
log.Println("Dropping request")
|
||||
}
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
}
|
||||
}
|
||||
|
||||
func requestProcessor() {
|
||||
for request := range requests {
|
||||
if request.queueTimer != nil {
|
||||
request.queueTimer.ObserveDuration()
|
||||
}
|
||||
|
||||
timer := prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("test"))
|
||||
handleRelayTest(request)
|
||||
timer.ObserveDuration()
|
||||
}
|
||||
}
|
||||
|
||||
func handleRelayTest(request request) {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if err := client.TestRelay(context.TODO(), request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3); err != nil {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed:", err)
|
||||
}
|
||||
request.result <- result{err, 0}
|
||||
return
|
||||
}
|
||||
|
||||
stats := fetchStats(request.relay)
|
||||
location := getLocation(request.relay.uri.Host)
|
||||
|
||||
mut.Lock()
|
||||
if stats != nil {
|
||||
updateMetrics(request.relay.uri.Host, *stats, location)
|
||||
}
|
||||
request.relay.Stats = stats
|
||||
request.relay.StatsRetrieved = time.Now().Truncate(time.Second)
|
||||
request.relay.Location = location
|
||||
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
if ok {
|
||||
if debug {
|
||||
log.Println("Stopping existing timer for", request.relay)
|
||||
}
|
||||
timer.Stop()
|
||||
}
|
||||
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == request.relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Relay", request.relay, "already exists")
|
||||
}
|
||||
|
||||
// Evict the old entry anyway, as configuration might have changed.
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
|
||||
goto found
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Adding new relay", request.relay)
|
||||
}
|
||||
|
||||
found:
|
||||
|
||||
knownRelays = append(knownRelays, request.relay)
|
||||
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
||||
|
||||
mut.Unlock()
|
||||
|
||||
if err := saveRelays(knownRelaysFile, knownRelays); err != nil {
|
||||
log.Println("Failed to write known relays: " + err.Error())
|
||||
}
|
||||
|
||||
request.result <- result{nil, evictionTime}
|
||||
}
|
||||
|
||||
func evict(relay *relay) func() {
|
||||
return func() {
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
if debug {
|
||||
log.Println("Evicting", relay)
|
||||
}
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Evicted", relay)
|
||||
}
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
deleteMetrics(current.uri.Host)
|
||||
}
|
||||
}
|
||||
delete(evictionTimers, relay.uri.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func loadRelays(file string) []*relay {
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Println("Failed to load relays: " + err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
var relays []*relay
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
uri, err := url.Parse(line)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Skipping relay", line, "due to parse error", err)
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
relays = append(relays, &relay{
|
||||
URL: line,
|
||||
Location: getLocation(uri.Host),
|
||||
uri: uri,
|
||||
})
|
||||
if debug {
|
||||
log.Println("Adding relay", line)
|
||||
}
|
||||
}
|
||||
return relays
|
||||
}
|
||||
|
||||
func saveRelays(file string, relays []*relay) error {
|
||||
var content string
|
||||
for _, relay := range relays {
|
||||
content += relay.uri.String() + "\n"
|
||||
}
|
||||
return os.WriteFile(file, []byte(content), 0o777)
|
||||
}
|
||||
|
||||
func createTestCertificate() tls.Certificate {
|
||||
tmpDir, err := os.MkdirTemp("", "relaypoolsrv")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create test X509 key pair:", err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func getLocation(host string) location {
|
||||
timer := prometheus.NewTimer(locationLookupSeconds)
|
||||
defer timer.ObserveDuration()
|
||||
db, err := geoip2.Open(geoipPath)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
city, err := db.City(addr.IP)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
return location{
|
||||
Longitude: city.Location.Longitude,
|
||||
Latitude: city.Location.Latitude,
|
||||
City: city.City.Names["en"],
|
||||
Country: city.Country.IsoCode,
|
||||
Continent: city.Continent.Code,
|
||||
}
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
type errorTracker struct {
|
||||
errors *lru.TwoQueueCache[string, *errorCounter]
|
||||
}
|
||||
|
||||
type errorCounter struct {
|
||||
count atomic.Int32
|
||||
}
|
||||
|
||||
func newErrorTracker(size int) *errorTracker {
|
||||
cache, err := lru.New2Q[string, *errorCounter](size)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &errorTracker{
|
||||
errors: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *errorTracker) AddError(host string) {
|
||||
entry, ok := b.errors.Get(host)
|
||||
if !ok {
|
||||
entry = &errorCounter{}
|
||||
b.errors.Add(host, entry)
|
||||
}
|
||||
c := entry.count.Add(1)
|
||||
log.Printf("Error count for %s is now %d", host, c)
|
||||
}
|
||||
|
||||
func (b *errorTracker) ClearErrors(host string) {
|
||||
b.errors.Remove(host)
|
||||
}
|
||||
|
||||
func (b *errorTracker) IsBlocked(host string) bool {
|
||||
if be, ok := b.errors.Get(host); ok {
|
||||
return be.count.Load() > 10
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright © 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 10; i++ {
|
||||
u := fmt.Sprintf("permanent%d", i)
|
||||
permanentRelays = append(permanentRelays, &relay{URL: u})
|
||||
}
|
||||
|
||||
knownRelays = []*relay{
|
||||
{URL: "known1"},
|
||||
{URL: "known2"},
|
||||
{URL: "known3"},
|
||||
}
|
||||
|
||||
mut = new(sync.RWMutex)
|
||||
}
|
||||
|
||||
// Regression test: handleGetRequest should not modify permanentRelays.
|
||||
func TestHandleGetRequest(t *testing.T) {
|
||||
needcap := len(permanentRelays) + len(knownRelays)
|
||||
if needcap > cap(permanentRelays) {
|
||||
t.Fatalf("test setup failed: need cap(permanentRelays) >= %d, have %d",
|
||||
needcap, cap(permanentRelays))
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = new(bytes.Buffer)
|
||||
handleGetRequest(w, httptest.NewRequest("GET", "/", nil))
|
||||
|
||||
result := make(map[string][]*relay)
|
||||
err := json.NewDecoder(w.Body).Decode(&result)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
|
||||
relays := result["relays"]
|
||||
expect, actual := len(knownRelays)+len(permanentRelays), len(relays)
|
||||
if actual != expect {
|
||||
t.Errorf("expected %d relays, got %d", expect, actual)
|
||||
}
|
||||
|
||||
// Check for changes in permanentRelays.
|
||||
for i, r := range permanentRelays {
|
||||
switch {
|
||||
case !strings.HasPrefix(r.URL, "permanent"):
|
||||
t.Errorf("relay %q among permanent relays", r.URL)
|
||||
case r.URL != fmt.Sprintf("permanent%d", i):
|
||||
t.Error("order of permanent relays changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalizeQueryValues(t *testing.T) {
|
||||
// This just demonstrates and validates the uri.Parse/String stuff in
|
||||
// regards to query strings.
|
||||
|
||||
in := "http://example.com/?some weird= query^value"
|
||||
exp := "http://example.com/?some+weird=+query%5Evalue"
|
||||
|
||||
uri, err := url.Parse(in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
str := uri.String()
|
||||
if str != in {
|
||||
// Just re-encoding the URL doesn't sanitize the query string.
|
||||
t.Errorf("expected %q, got %q", in, str)
|
||||
}
|
||||
|
||||
uri.RawQuery = uri.Query().Encode()
|
||||
str = uri.String()
|
||||
if str != exp {
|
||||
// The query string is now in correct format.
|
||||
t.Errorf("expected %q, got %q", exp, str)
|
||||
}
|
||||
}
|
||||
@@ -1,260 +0,0 @@
|
||||
// Copyright (C) 2018 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
statusClient = http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
apiRequestsTotal = makeCounter("api_requests_total", "Number of API requests.", "type", "result")
|
||||
apiRequestsSeconds = makeSummary("api_requests_seconds", "Latency of API requests.", "type")
|
||||
|
||||
relayTestsTotal = makeCounter("tests_total", "Number of relay tests.", "result")
|
||||
relayTestActionsSeconds = makeSummary("test_actions_seconds", "Latency of relay test actions.", "type")
|
||||
|
||||
locationLookupSeconds = makeSummary("location_lookup_seconds", "Latency of location lookups.").WithLabelValues()
|
||||
|
||||
metricsRequestsSeconds = makeSummary("metrics_requests_seconds", "Latency of metric requests.").WithLabelValues()
|
||||
scrapeSeconds = makeSummary("relay_scrape_seconds", "Latency of metric scrapes from remote relays.", "result")
|
||||
|
||||
relayUptime = makeGauge("relay_uptime", "Uptime of relay", "relay")
|
||||
relayPendingSessionKeys = makeGauge("relay_pending_session_keys", "Number of pending session keys (two keys per session, one per each side of the connection)", "relay")
|
||||
relayActiveSessions = makeGauge("relay_active_sessions", "Number of sessions that are happening, a session contains two parties", "relay")
|
||||
relayConnections = makeGauge("relay_connections", "Number of devices connected to the relay", "relay")
|
||||
relayProxies = makeGauge("relay_proxies", "Number of active proxy routines sending data between peers (two proxies per session, one for each way)", "relay")
|
||||
relayBytesProxied = makeGauge("relay_bytes_proxied", "Number of bytes proxied by the relay", "relay")
|
||||
relayGoRoutines = makeGauge("relay_go_routines", "Number of Go routines in the process", "relay")
|
||||
relaySessionRate = makeGauge("relay_session_rate", "Rate applied per session", "relay")
|
||||
relayGlobalRate = makeGauge("relay_global_rate", "Global rate applied on the whole relay", "relay")
|
||||
relayBuildInfo = makeGauge("relay_build_info", "Build information about a relay", "relay", "go_version", "go_os", "go_arch")
|
||||
relayLocationInfo = makeGauge("relay_location_info", "Location information about a relay", "relay", "city", "country", "continent")
|
||||
|
||||
lastStats = make(map[string]stats)
|
||||
)
|
||||
|
||||
func makeGauge(name string, help string, labels ...string) *prometheus.GaugeVec {
|
||||
gauge := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(gauge)
|
||||
return gauge
|
||||
}
|
||||
|
||||
func makeSummary(name string, help string, labels ...string) *prometheus.SummaryVec {
|
||||
summary := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(summary)
|
||||
return summary
|
||||
}
|
||||
|
||||
func makeCounter(name string, help string, labels ...string) *prometheus.CounterVec {
|
||||
counter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(counter)
|
||||
return counter
|
||||
}
|
||||
|
||||
func statsRefresher(interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
for range ticker.C {
|
||||
refreshStats()
|
||||
}
|
||||
}
|
||||
|
||||
type statsFetchResult struct {
|
||||
relay *relay
|
||||
stats *stats
|
||||
}
|
||||
|
||||
func refreshStats() {
|
||||
mut.RLock()
|
||||
relays := append(permanentRelays, knownRelays...)
|
||||
mut.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
results := make(chan statsFetchResult, len(relays))
|
||||
for _, rel := range relays {
|
||||
wg.Add(1)
|
||||
go func(rel *relay) {
|
||||
t0 := time.Now()
|
||||
stats := fetchStats(rel)
|
||||
duration := time.Since(t0).Seconds()
|
||||
result := "success"
|
||||
if stats == nil {
|
||||
result = "failed"
|
||||
}
|
||||
scrapeSeconds.WithLabelValues(result).Observe(duration)
|
||||
|
||||
results <- statsFetchResult{
|
||||
relay: rel,
|
||||
stats: fetchStats(rel),
|
||||
}
|
||||
wg.Done()
|
||||
}(rel)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
mut.Lock()
|
||||
relayBuildInfo.Reset()
|
||||
relayLocationInfo.Reset()
|
||||
for result := range results {
|
||||
result.relay.StatsRetrieved = now
|
||||
result.relay.Stats = result.stats
|
||||
if result.stats == nil {
|
||||
deleteMetrics(result.relay.uri.Host)
|
||||
} else {
|
||||
updateMetrics(result.relay.uri.Host, *result.stats, result.relay.Location)
|
||||
}
|
||||
}
|
||||
mut.Unlock()
|
||||
}
|
||||
|
||||
func fetchStats(relay *relay) *stats {
|
||||
statusAddr := relay.uri.Query().Get("statusAddr")
|
||||
if statusAddr == "" {
|
||||
statusAddr = ":22070"
|
||||
}
|
||||
|
||||
statusHost, statusPort, err := net.SplitHostPort(statusAddr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if statusHost == "" {
|
||||
if host, _, err := net.SplitHostPort(relay.uri.Host); err != nil {
|
||||
return nil
|
||||
} else {
|
||||
statusHost = host
|
||||
}
|
||||
}
|
||||
|
||||
url := "http://" + net.JoinHostPort(statusHost, statusPort) + "/status"
|
||||
|
||||
response, err := statusClient.Get(url)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var stats stats
|
||||
|
||||
if json.NewDecoder(response.Body).Decode(&stats); err != nil {
|
||||
return nil
|
||||
}
|
||||
return &stats
|
||||
}
|
||||
|
||||
func updateMetrics(host string, stats stats, location location) {
|
||||
if stats.GoVersion != "" || stats.GoOS != "" || stats.GoArch != "" {
|
||||
relayBuildInfo.WithLabelValues(host, stats.GoVersion, stats.GoOS, stats.GoArch).Add(1)
|
||||
}
|
||||
if location.City != "" || location.Country != "" || location.Continent != "" {
|
||||
relayLocationInfo.WithLabelValues(host, location.City, location.Country, location.Continent).Add(1)
|
||||
}
|
||||
|
||||
if lastStat, ok := lastStats[host]; ok {
|
||||
stats = mergeStats(stats, lastStat)
|
||||
}
|
||||
|
||||
relayUptime.WithLabelValues(host).Set(float64(stats.UptimeSeconds))
|
||||
relayPendingSessionKeys.WithLabelValues(host).Set(float64(stats.PendingSessionKeys))
|
||||
relayActiveSessions.WithLabelValues(host).Set(float64(stats.ActiveSessions))
|
||||
relayConnections.WithLabelValues(host).Set(float64(stats.Connections))
|
||||
relayProxies.WithLabelValues(host).Set(float64(stats.Proxies))
|
||||
relayBytesProxied.WithLabelValues(host).Set(float64(stats.BytesProxied))
|
||||
relayGoRoutines.WithLabelValues(host).Set(float64(stats.GoRoutines))
|
||||
relaySessionRate.WithLabelValues(host).Set(float64(stats.Options.SessionRate))
|
||||
relayGlobalRate.WithLabelValues(host).Set(float64(stats.Options.GlobalRate))
|
||||
lastStats[host] = stats
|
||||
}
|
||||
|
||||
func deleteMetrics(host string) {
|
||||
relayUptime.DeleteLabelValues(host)
|
||||
relayPendingSessionKeys.DeleteLabelValues(host)
|
||||
relayActiveSessions.DeleteLabelValues(host)
|
||||
relayConnections.DeleteLabelValues(host)
|
||||
relayProxies.DeleteLabelValues(host)
|
||||
relayBytesProxied.DeleteLabelValues(host)
|
||||
relayGoRoutines.DeleteLabelValues(host)
|
||||
relaySessionRate.DeleteLabelValues(host)
|
||||
relayGlobalRate.DeleteLabelValues(host)
|
||||
delete(lastStats, host)
|
||||
}
|
||||
|
||||
// Due to some unexplainable behaviour, some of the numbers sometimes travel slightly backwards (by less than 1%)
|
||||
// This happens between scrapes, which is 30s, so this can't be a race.
|
||||
// This causes prometheus to assume a "rate reset", hence causes phenomenal spikes.
|
||||
// One of the number that moves backwards is BytesProxied, which atomically increments a counter with numeric value
|
||||
// returned by net.Conn.Read(). I don't think that can return a negative value, so I have no idea what's going on.
|
||||
func mergeStats(new stats, old stats) stats {
|
||||
new.UptimeSeconds = mergeValue(new.UptimeSeconds, old.UptimeSeconds)
|
||||
new.PendingSessionKeys = mergeValue(new.PendingSessionKeys, old.PendingSessionKeys)
|
||||
new.ActiveSessions = mergeValue(new.ActiveSessions, old.ActiveSessions)
|
||||
new.Connections = mergeValue(new.Connections, old.Connections)
|
||||
new.Proxies = mergeValue(new.Proxies, old.Proxies)
|
||||
new.BytesProxied = mergeValue(new.BytesProxied, old.BytesProxied)
|
||||
new.GoRoutines = mergeValue(new.GoRoutines, old.GoRoutines)
|
||||
new.Options.SessionRate = mergeValue(new.Options.SessionRate, old.Options.SessionRate)
|
||||
new.Options.GlobalRate = mergeValue(new.Options.GlobalRate, old.Options.GlobalRate)
|
||||
return new
|
||||
}
|
||||
|
||||
func mergeValue(new, old int) int {
|
||||
if new >= old {
|
||||
return new // normal increase
|
||||
}
|
||||
if float64(new) > 0.99*float64(old) {
|
||||
return old // slight backward movement
|
||||
}
|
||||
return new // reset (relay restart)
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
if mergeValue(1001, 1000) != 1001 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
|
||||
if mergeValue(999, 1000) != 1000 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
|
||||
if mergeValue(1, 1000) != 1 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
}
|
||||