Compare commits

..

2 Commits

Author SHA1 Message Date
Jakob Borg
725f748b17 Find syncthing binary in $PATH when restarting (fixes #68) 2014-02-17 08:50:55 +01:00
Jakob Borg
f3a793ce91 Add peer node sync status in GUI (fixes #46) 2014-02-16 08:30:32 +01:00
1039 changed files with 12269 additions and 251331 deletions

View File

@@ -1,20 +0,0 @@
comment: false
coverage:
range: "40...100"
precision: 1
status:
patch:
default:
informational: true
project:
default:
informational: true
github_checks:
annotations: false
ignore:
- "**.pb.go"
- "**_mocked.go"
- "**/mocks/*"

View File

@@ -1,12 +0,0 @@
version = 1
exclude_patterns = ["**/*.pb.go"]
test_patterns = ["**/*_test.go"]
[[analyzers]]
name = "go"
enabled = true
[analyzers.meta]
import_paths = ["github.com/syncthing/syncthing"]
build_tags = ["noassets"]

8
.gitattributes vendored
View File

@@ -1,8 +0,0 @@
# Text files use LF line endings in this repository
* text=auto
# Except the dependencies, which we leave alone
vendor/** -text=auto
# Diffs on these files are meaningless
*.svg -diff

11
.github/FUNDING.yml vendored
View File

@@ -1,11 +0,0 @@
github: syncthing
custom: "https://syncthing.net/donations/"
# patreon: # Replace with a single Patreon username
# open_collective: # Replace with a single Open Collective username
# ko_fi: # Replace with a single Ko-fi username
# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
# liberapay: # Replace with a single Liberapay username
# issuehunt: # Replace with a single IssueHunt username
# otechie: # Replace with a single Otechie username

View File

@@ -1,29 +0,0 @@
name: Feature request
description: File a new feature request
labels: ["enhancement", "needs-triage"]
type: Feature
body:
- type: textarea
id: feature
attributes:
label: Feature description
description: Please describe the behavior you'd like to see.
validations:
required: true
- type: textarea
id: problem-usecase
attributes:
label: Problem or use case
description: Please explain which problem this would solve, or what the use case is for the feature. Keep in mind that it's more likely to be implemented if it's generally useful for a larger number of users.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives or workarounds
description: Please describe any alternatives or workarounds you have considered and, possibly, rejected.
validations:
required: true

View File

@@ -1,52 +0,0 @@
name: Bug report
description: If you're actually looking for support instead, see "I need help / I have a question".
labels: ["bug", "needs-triage"]
type: Bug
body:
- type: markdown
attributes:
value: |
:no_entry_sign: If you want to report a security issue, please see [our Security Policy](https://syncthing.net/security/) and do not report the issue here.
:interrobang: If you are not sure if there is a bug, but something isn't working right and you need help, please [use the forum](https://forum.syncthing.net/).
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen, and any steps we might use to reproduce the problem.
placeholder: Tell us what you see!
validations:
required: true
- type: input
id: version
attributes:
label: Syncthing version
description: What version of Syncthing are you running?
placeholder: v1.27.4
validations:
required: true
- type: input
id: platform
attributes:
label: Platform & operating system
description: On what platform(s) are you seeing the problem?
placeholder: Linux arm64
validations:
required: true
- type: input
id: browser
attributes:
label: Browser version
description: If the problem is related to the GUI, describe your browser and version.
placeholder: Safari 17.3.1
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output or crash backtrace. This will be automatically formatted into code, so no need for backticks.
render: shell

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: I need help / I have a question
url: https://forum.syncthing.net/
about: Ask questions, get support, and discuss with other community members.
- name: Android issues
url: https://github.com/syncthing/syncthing-android/issues/
about: The Android app has its own issue tracker.

View File

@@ -1,27 +0,0 @@
### Purpose
Describe the purpose of this change. If there is an existing issue that is
resolved by this pull request, ensure that the commit subject is on the form
`Some short description (fixes #1234)` where 1234 is the issue number.
### Testing
Describe what testing has been done, and how the reviewer can test the change
if new tests are not included.
### Screenshots
If this is a GUI change, include screenshots of the change. If not, please
feel free to just delete this section.
### Documentation
If this is a user visible change (including API and protocol changes), add a link here
to the corresponding pull request on https://github.com/syncthing/docs or describe
the documentation changes necessary.
## Authorship
Your name and email will be added automatically to the AUTHORS file
based on the commit metadata.

10
.github/SECURITY.md vendored
View File

@@ -1,10 +0,0 @@
## Reporting a Vulnerability
If you believe that you've found a Syncthing-related security vulnerability,
please report it by sending email to the address security@syncthing.net. The
[PGP key for security@syncthing.net
(B683AD7B76CAB013)](https://syncthing.net/security-key.txt) can be used to
send encrypted mail or to verify responses received from that address.
You can read more about Syncthing security at
https://syncthing.net/security/.

View File

@@ -1,13 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: monthly
open-pull-requests-limit: 10
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: monthly
open-pull-requests-limit: 10

23
.github/labeler.yml vendored
View File

@@ -1,23 +0,0 @@
version: 1
labels:
- label: enhancement
title: ^feat\b
- label: bug
title: ^fix\b
- label: documentation
title: ^docs\b
- label: chore
title: ^chore\b
- label: chore
title: ^refactor\b
- label: build
title: ^build\b
- label: dependencies
title: ^build\(deps\)\b

52
.github/regsync.yml vendored
View File

@@ -1,52 +0,0 @@
version: 1
creds:
- registry: docker.io
user: "{{env \"DOCKERHUB_USERNAME\"}}"
pass: "{{env \"DOCKERHUB_TOKEN\"}}"
defaults:
ratelimit:
min: 100
retry: 1m
parallel: 4
sync:
- source: ghcr.io/syncthing/syncthing
target: docker.io/syncthing/syncthing
type: repository
tags:
allow:
- latest
- rc
- edge
- \d+
- \d+\.\d+
- \d+\.\d+\.\d+
- \d+\.\d+\.\d+-rc\.\d+
- source: ghcr.io/syncthing/relaysrv
target: docker.io/syncthing/relaysrv
type: repository
tags:
allow:
- latest
- rc
- edge
- \d+
- \d+\.\d+
- \d+\.\d+\.\d+
- \d+\.\d+\.\d+-rc\.\d+
- source: ghcr.io/syncthing/discosrv
target: docker.io/syncthing/discosrv
type: repository
tags:
allow:
- latest
- rc
- edge
- \d+
- \d+\.\d+
- \d+\.\d+\.\d+
- \d+\.\d+\.\d+-rc\.\d+

17
.github/release.yml vendored
View File

@@ -1,17 +0,0 @@
changelog:
exclude:
labels:
- dependencies
categories:
- title: Fixes
labels:
- bug
- title: Features
labels:
- enhancement
- title: Other
labels:
- '*'

View File

@@ -1,85 +0,0 @@
name: Build Infrastructure Images
on:
push:
branches:
- infrastructure
- infra-*
env:
GO_VERSION: "~1.25.0"
CGO_ENABLED: "0"
BUILD_USER: docker
BUILD_HOST: github.syncthing.net
permissions:
contents: read
packages: write
jobs:
docker-syncthing:
name: Build and push Docker images
if: github.repository == 'syncthing/syncthing'
runs-on: ubuntu-latest
environment: docker
strategy:
matrix:
pkg:
- stcrashreceiver
- strelaypoolsrv
- stupgrades
- ursrv
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version: ${{ env.GO_VERSION }}
check-latest: true
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build binaries
run: |
for arch in arm64 amd64; do
go run build.go -goos linux -goarch "$arch" build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
done
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set Docker tags (all branches)
run: |
tags=docker.io/syncthing/${{ matrix.pkg }}:${{ github.sha }},ghcr.io/syncthing/infra/${{ matrix.pkg }}:${{ github.sha }}
echo "TAGS=$tags" >> $GITHUB_ENV
- name: Set Docker tags (latest)
if: github.ref == 'refs/heads/infrastructure'
run: |
tags=docker.io/syncthing/${{ matrix.pkg }}:latest,ghcr.io/syncthing/infra/${{ matrix.pkg }}:latest,${{ env.TAGS }}
echo "TAGS=$tags" >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.${{ matrix.pkg }}
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ env.TAGS }}
labels: |
org.opencontainers.image.revision=${{ github.sha }}

View File

@@ -1,18 +0,0 @@
name: Build Syncthing (Nightly)
on:
schedule:
# Run nightly build at 05:00 UTC
- cron: '00 05 * * *'
workflow_dispatch:
permissions:
contents: write
packages: write
jobs:
build-syncthing:
uses: ./.github/workflows/build-syncthing.yaml
# if we only want nightlies to run for specific users:
# if: contains(fromJSON('["syncthing", "calmh"]'), github.repository_owner)
secrets: inherit

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
name: Mirrors
on: [push, delete]
jobs:
codeberg:
name: Mirror to Codeberg
if: github.repository_owner == 'syncthing'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: yesolutions/mirror-action@master
with:
REMOTE: ssh://git@codeberg.org/${{ github.repository }}.git
GIT_SSH_PRIVATE_KEY: ${{ secrets.CODEBERG_PUSH_KEY }}
GIT_SSH_NO_VERIFY_HOST: "true"

View File

@@ -1,20 +0,0 @@
name: Org membership recommendations
on:
workflow_dispatch:
schedule:
- cron: '0 0 1 * *'
jobs:
run-recommendation:
runs-on: ubuntu-latest
name: Check for a recommendation
steps:
- uses: docker://ghcr.io/calmh/github-org-members:latest
env:
GITHUB_ORGANISATION: syncthing
GITHUB_TOKEN: ${{ secrets.GOM_GITHUB_TOKEN }}
GOM_IGNORE_USERS: ${{ secrets.GOM_IGNORE_USERS }}
GOM_ALSO_REPOS: ${{ secrets.GOM_ALSO_REPOS }}

View File

@@ -1,27 +0,0 @@
name: PR metadata
on:
pull_request_target:
types:
- opened
- reopened
- edited
- synchronize
permissions:
contents: read
pull-requests: write
jobs:
#
# Set labels on PRs, which are then used to categorise release notes
#
labels:
name: Set labels
runs-on: ubuntu-latest
steps:
- uses: srvaroa/labeler@v1
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -1,60 +0,0 @@
name: Release Syncthing
on:
push:
branches:
- release
- release-rc*
permissions:
contents: write
jobs:
create-release-tag:
name: Create release tag
runs-on: ubuntu-latest
environment: release
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- uses: actions/setup-go@v6
with:
go-version: stable
- name: Determine version to release
run: |
if [[ "$GITHUB_REF_NAME" == "release" ]] ; then
next=$(go run ./script/next-version.go)
else
next=$(go run ./script/next-version.go --pre)
fi
echo "NEXT=$next" >> $GITHUB_ENV
echo "Next version is $next"
prev=$(git describe --exclude "*-*" --abbrev=0)
echo "PREV=$prev" >> $GITHUB_ENV
echo "Previous version is $prev"
- name: Determine release notes
run: |
go run ./script/relnotes.go --new-ver "$NEXT" --branch "$GITHUB_REF_NAME" --prev-ver "$PREV" > notes.md
env:
GITHUB_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- name: Create and push tag
run: |
git config --global user.name 'Syncthing Release Automation'
git config --global user.email 'release@syncthing.net'
git tag -a -F notes.md --cleanup=whitespace "$NEXT"
git push origin "$NEXT"
- name: Trigger the build
uses: benc-uk/workflow-dispatch@v1
with:
workflow: build-syncthing.yaml
ref: refs/tags/${{ env.NEXT }}
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}

View File

@@ -1,22 +0,0 @@
name: Trigger nightly build & release
on:
workflow_dispatch:
schedule:
# Run nightly build at 01:00 UTC
- cron: '00 01 * * *'
jobs:
trigger-nightly:
if: github.repository_owner == 'syncthing'
runs-on: ubuntu-latest
name: Push to release-nightly to trigger build
steps:
- uses: actions/checkout@v5
with:
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
fetch-depth: 0
- run: |
git push origin main:release-nightly

View File

@@ -1,28 +0,0 @@
name: Update translations and documentation
on:
workflow_dispatch:
schedule:
- cron: '42 3 * * 1'
jobs:
update_transifex_docs:
runs-on: ubuntu-latest
name: Update translations and documentation
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- uses: actions/setup-go@v6
with:
go-version: stable
- run: |
set -euo pipefail
git config --global user.name 'Syncthing Release Automation'
git config --global user.email 'release@syncthing.net'
bash build.sh translate
bash build.sh prerelease
git push
env:
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}

21
.gitignore vendored
View File

@@ -1,20 +1,3 @@
/syncthing
/stdiscosrv
syncthing
*.tar.gz
*.zip
*.asc
*.deb
*.exe
.jshintrc
coverage.out
files/pidx
bin
perfstats*.csv
coverage.xml
syncthing.sig
RELEASE
deb
*.bz2
/repos
/proto/scripts/protoc-gen-gosyncthing
/compat.json
dist

View File

@@ -1,96 +0,0 @@
version: "2"
linters:
default: all
disable:
- cyclop
- depguard
- err113
- exhaustive
- exhaustruct
- forbidigo
- funcorder
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- goconst
- gocyclo
- godot
- godox
- gomoddirectives
- inamedparam
- interfacebloat
- ireturn
- lll
- maintidx
- mnd
- musttag
- nestif
- nlreturn
- noinlineerr
- nonamedreturns
- paralleltest
- prealloc
- predeclared
- protogetter
- recvcheck
- revive
- tagalign
- tagliatelle
- testpackage
- usetesting # go 1.24
- varnamelen
- whitespace
- wrapcheck
- wsl
- wsl_v5
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- internal/gen
- internal/db/olddb
- cmd/dev
- repos
- third_party$
- builtin$
- examples$
- _test\.go$
rules:
# relax the slog rules for debug lines, for now
- linters: [sloglint]
source: Debug
# contexts are irrelevant for SQLite
- linters: [noctx]
text: database/sql
# Rollback errors can be ignored
- linters: [errcheck]
source: Rollback
# Embedded fields named in selectors may add clarity
- linters: [staticcheck]
text: QF1008
# Don't necessarily rewrite !(foo || bar) to !foo && !bar
- linters: [staticcheck]
text: QF1001
settings:
sloglint:
context: "scope"
static-msg: true
msg-style: capitalized
key-naming-case: camel
formatters:
enable:
- gofumpt
exclusions:
generated: lax
paths:
- internal/gen
- cmd/dev
- repos
- third_party$
- builtin$
- examples$

View File

@@ -1,111 +0,0 @@
# This is the policy-bot configuration for this repository. It controls
# which approvals are required for any given pull request. The format is
# described at https://github.com/palantir/policy-bot. The syntax of the
# policy can be verified by the bot:
# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml
# The policy below is what is required for any pull request.
policy:
approval:
- subject is conventional commit
- or:
- project metadata requires maintainer approval
- a maintainer claims responsibility
- or:
- is approved by a syncthing contributor
- is a translation or dependency update by a contributor
- is a trivial change by a contributor
- a maintainer claims responsibility
# Additionally, maintainers can disapprove of a PR
disapproval:
requires:
teams:
- syncthing/maintainers
# The rules for the policy are described below.
approval_rules:
# All commits (PRs before squashing) should have a valid conventional
# commit type subject.
- name: subject is conventional commit
requires:
conditions:
title:
matches:
- '^(feat|fix|docs|chore|refactor|build): [a-z].+'
- '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'
# Changes to important project metadata and documentation, including this
# policy, require signoff by a maintainer
- name: project metadata requires maintainer approval
if:
changed_files:
paths:
- ^[^/]+\.md
- ^\.policy\.yml
- ^LICENSE
requires:
count: 1
teams:
- syncthing/maintainers
options:
ignore_update_merges: true
allow_non_author_contributor: true
# Regular pull requests require approval by an active contributor
- name: is approved by a syncthing contributor
requires:
count: 1
teams:
- syncthing/contributors
options:
ignore_update_merges: true
allow_non_author_contributor: true
# Changes to some files (translations, dependencies, compatibility) do not
# require approval if they were proposed by a contributor and have a
# matching commit subject
- name: is a translation or dependency update by a contributor
if:
only_changed_files:
paths:
- ^gui/default/assets/lang/
- ^go\.mod$
- ^go\.sum$
- ^compat\.yaml$
title:
matches:
- '^chore\(gui\):'
- '^build\(deps\):'
- '^build\(compat\):'
has_author_in:
teams:
- syncthing/contributors
# If the change is small and the label "trivial" is added, we accept that
# on trust. These PRs can be audited after the fact as appropriate.
# Features are not trivial.
- name: is a trivial change by a contributor
if:
modified_lines:
total: "< 25"
title:
not_matches:
- '^feat'
has_labels:
- trivial
has_author_in:
teams:
- syncthing/contributors
# A member of the maintainers group can take responsibility by adding the
# appropriate label.
- name: a maintainer claims responsibility
if:
has_labels:
- maintainer-responsibility
has_author_in:
teams:
- syncthing/maintainers

View File

@@ -1,4 +0,0 @@
line_ending: lf
formatter:
type: basic
retain_line_breaks: true

322
AUTHORS
View File

@@ -1,322 +0,0 @@
# This is the official list of Syncthing authors for copyright purposes.
#
# THIS FILE IS MOSTLY AUTO GENERATED. IF YOU'VE MADE A COMMIT TO THE
# REPOSITORY YOU WILL BE ADDED HERE AUTOMATICALLY WITHOUT THE NEED FOR
# ANY MANUAL ACTION.
#
# That said, you are welcome to correct your name or add a nickname / GitHub
# user name as appropriate. The format is:
#
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
#
# The in-GUI authors list is periodically automatically updated from the
# contents of this file.
#
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net> <jborg@coreweave.com>
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
Simon Frei (imsodin) <freisim93@gmail.com>
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
Alexander Graf (alex2108) <register-github@alex-graf.de>
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
Antony Male (canton7) <antony.male@gmail.com>
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
bt90 <btom1990@googlemail.com>
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
Emil Lundberg <emil@emlun.se>
Eric P <eric@kastelo.net>
Evgeny Kuznetsov <evgeny@kuznetsov.md>
greatroar <61184462+greatroar@users.noreply.github.com>
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
Lode Hoste (Zillode) <zillode@zillode.be>
Marcus B Spencer <marcus@marcusspencer.xyz> <marcus@marcusspencer.us>
Michael Ploujnikov (plouj) <ploujj@gmail.com>
Ross Smith II (rasa) <ross@smithii.com>
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
Tommy van der Vorst <tommy-github@pixelspark.nl> <tommy@pixelspark.nl>
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
Aleksey Vasenev <margtu-fivt@ya.ru>
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
Alex Ionescu <github@ionescu.sh>
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
Alex Xu <alex.hello71@gmail.com>
Alexander Seiler <seileralex@gmail.com>
Alexandre Alves <alexandrealvesdb.contact@gmail.com>
Aman Gupta <aman@tmm1.net>
Andreas Sommer <andreas.sommer87@googlemail.com>
andresvia <andres.via@gmail.com>
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
andyleap <andyleap@gmail.com>
Anjan Momi <anjan@momi.ca>
Anthony Goeckner <agoeckner@users.noreply.github.com>
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
Anur <anurnomeru@163.com>
Aranjedeath <Aranjedeath@users.noreply.github.com>
ardevd <ardevd@users.noreply.github.com>
Arkadiusz Tymiński <gevleeog@gmail.com>
Aroun <login@b-vo.fr>
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
Ashish Bhate <bhate.ashish@gmail.com>
Aurélien Rainone <476650+arl@users.noreply.github.com>
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
Bart De Vries (mogwa1) <devriesb@gmail.com>
Beat Reichenbach <44111292+beatreichenbach@users.noreply.github.com>
Ben Shepherd (benshep) <bjashepherd@gmail.com>
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
Benedikt Heine (bebehei) <bebe@bebehei.de>
Benno Fünfstück <benno.fuenfstueck@gmail.com>
Benny Ng (tpng) <benny.tpng@gmail.com>
boomsquared <54829195+boomsquared@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn>
Boris Rybalkin <ribalkin@gmail.com>
Brendan Long (brendanlong) <self@brendanlong.com>
Catfriend1 <16361913+Catfriend1@users.noreply.github.com>
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
Cedric Staniewski (xduugu) <cedric@gmx.ca>
Chih-Hsuan Yen <yan12125@gmail.com> <1937689+yan12125@users.noreply.github.com>
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
Chris Howie (cdhowie) <me@chrishowie.com>
Chris Joel (cdata) <chris@scriptolo.gy>
Christian Kujau <ckujau@users.noreply.github.com>
Christian Prescott <me@christianprescott.com>
chucic <chucic@seznam.cz>
cjc7373 <niuchangcun@gmail.com>
Colin Kennedy (moshen) <moshen.colin@gmail.com>
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
Cyprien Devillez <cypx@users.noreply.github.com>
d-volution <49024624+d-volution@users.noreply.github.com>
Dan <benda.daniel@gmail.com>
Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com>
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
Daniel Martí (mvdan) <mvdan@mvdan.cc>
Daniel Padrta <64928366+danpadcz@users.noreply.github.com>
Daniil Gentili <daniil@daniil.it>
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
dashangcun <907225865@qq.com>
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
Denis A. (dva) <denisva@gmail.com>
Dennis Wilson (snnd) <dw@risu.io>
derekriemer <derek.riemer@colorado.edu>
DerRockWolf <50499906+DerRockWolf@users.noreply.github.com>
desbma <desbma@users.noreply.github.com>
Devon G. Redekopp <devon@redekopp.com>
digital <didev@dinid.net>
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
domain <32405309+szu17dmy@users.noreply.github.com>
Domenic Horner <domenic@tgxn.net>
Dominik Heidler (asdil12) <dominik@heidler.eu>
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
Elliot Huffman <thelich2@gmail.com>
Emil Hessman (ceh) <emil@hessman.se>
Eng Zer Jun <engzerjun@gmail.com>
entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain>
Eric Lesiuta <elesiuta@gmail.com>
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
Evan Spensley <94762716+0evan@users.noreply.github.com>
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
Felix <53702818+f-eliks@users.noreply.github.com>
Felix Ableitner (Nutomic) <me@nutomic.com>
Felix Lampe <mail@flampe.de>
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
Frank Isemann (fti7) <frank@isemann.name>
Gahl Saraf <saraf.gahl@gmail.com> <gahl@raftt.io>
georgespatton <georgespatton@users.noreply.github.com>
ghjklw <malo@jaffre.info>
Gilli Sigurdsson (gillisig) <gilli@vx.is>
Gleb Sinyavskiy <zhulik.gleb@gmail.com>
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
Greg <gco@jazzhaiku.com>
guangwu <guoguangwu@magic-shield.com>
gudvinr <gudvinr@gmail.com>
Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
Han Boetes <han@boetes.org>
HansK-p <42314815+HansK-p@users.noreply.github.com>
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
Hazem Krimi <me@hazemkrimi.tech>
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
Hireworks <129852174+hireworksltd@users.noreply.github.com>
Hugo Locurcio <hugo.locurcio@hugo.pro>
Iain Barnett <iainspeed@gmail.com>
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
ignacy123 <ignacy.buczek@onet.pl>
Iskander Sharipov (Alex) <quasilyte@gmail.com>
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
Jack Croft <jccroft1@users.noreply.github.com>
Jacob <jyundt@gmail.com>
Jake Peterson (acogdev) <jake@acogdev.com>
James O'Beirne <wild-github@au92.org>
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
Jaspitta <ste.scarpitta@gmail.com>
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
Jaya Kumar <jaya.kumar@ict.nl>
Jeffery To <jeffery.to@gmail.com>
jelle van der Waa <jelle@vdwaa.nl>
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss (seehuhn) <voss@seehuhn.de>
Johan Vromans (sciurius) <jvromans@squirrel.nl>
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
Jonas Thelemann <e-mail@jonas-thelemann.de>
Jonathan <artback@protonmail.com> <jonagn@gmail.com>
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat>
Julian Lehrhuber <jul13579@users.noreply.github.com>
Jörg Thalheim <Mic92@users.noreply.github.com>
Jędrzej Kula <kula.jedrek@gmail.com>
Kapil Sareen <kapilsareen584@gmail.com>
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
Kebin Liu <lkebin@gmail.com>
Keith Harrison <keithh@protonmail.com>
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
Kevin Allen (ironmig) <kma1660@gmail.com>
Kevin Bushiri (keevBush) <keevbush@gmail.com> <36192217+keevBush@users.noreply.github.com>
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
klemens <ka7@github.com>
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
kylosus <33132401+kylosus@users.noreply.github.com>
Lars Lehtonen <lars.lehtonen@gmail.com>
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
Leo Arias (elopio) <yo@elopio.net>
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
Lukas Lihotzki <lukas@lihotzki.de>
Luke Hamburg <1992842+luckman212@users.noreply.github.com>
luzpaz <luzpaz@users.noreply.github.com>
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
Marcel Meyer <mm.marcelmeyer@gmail.com>
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
Marcus Legendre <marcus.legendre@gmail.com>
Mario Majila <mariustshipichik@gmail.com>
Mark Pulford (mpx) <mark@kyne.com.au>
Martchus <martchus@gmx.net>
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
Mateusz Ż <thedead4fun@live.com>
mathias4833 <67101597+mathias4833@users.noreply.github.com>
Matic Potočnik <hairyfotr@gmail.com>
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
Matt Robenolt <matt@ydekproductions.com>
Matteo Ruina <matteo.ruina@gmail.com>
Maurizio Tomasi <ziotom78@gmail.com>
Max <github@germancoding.com>
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
MaximAL <almaximal@ya.ru>
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Rienstra <mrienstra@gmail.com>
MichaIng <micha@dietpi.com>
Migelo <miha@filetki.si>
Mike Boone <mike@boonedocks.net>
MikeLund <MikeLund@users.noreply.github.com>
MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
Mingxuan Lin <gdlmx@users.noreply.github.com>
mv1005 <49659413+mv1005@users.noreply.github.com>
Nate Morrison (nrm21) <natemorrison@gmail.com>
nf <nf@wh3rd.net>
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
Nick Busey <NickBusey@users.noreply.github.com>
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
Nicolas Perraut <n.perraut@gmail.com>
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
NinoM4ster <ninom4ster@gmail.com>
Nitroretro <43112364+Nitroretro@users.noreply.github.com>
NoLooseEnds <jon.koslung@gmail.com>
Oliver Freyermuth <o.freyermuth@googlemail.com>
orangekame3 <miya.org.0309@gmail.com>
otbutz <tbutz@optitool.de>
overkill <22098433+0verk1ll@users.noreply.github.com>
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
Pablo <pbaeyens31+github@gmail.com>
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
Paul Brit <paulbrit44@gmail.com>
Paul Donald <newtwen+github@gmail.com>
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
perewa <cavalcante.ten@gmail.com>
Peter Badida <KeyWeeUsr@users.noreply.github.com>
Peter Dave Hello <hsu@peterdavehello.org>
Peter Hoeg (peterhoeg) <peter@speartail.com>
Peter Marquardt (wwwutz) <wwwutz@gmail.com> <wwwutz@googlemail.com>
Phani Rithvij <phanirithvij2000@gmail.com>
Phil Davis <phil.davis@inf.org>
Philippe Schommers (filoozoom) <philippe@schommers.be>
Phill Luby (pluby) <phill.luby@newredo.com>
Piotr Bejda (piobpl) <piotrb10@gmail.com>
polyfloyd <polyfloyd@users.noreply.github.com>
pullmerge <166967364+pullmerge@users.noreply.github.com>
Quentin Hibon <qh.public@yahoo.com>
Rahmi Pruitt <rjpruitt16@gmail.com>
red_led <red-led@users.noreply.github.com>
Robert Carosi (nov1n) <robert@carosi.nl>
Roberto Santalla <roobre@users.noreply.github.com>
Robin Schoonover <robin@cornhooves.org>
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
rubenbe <github-com-00ff86@vandamme.email>
Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
Ryan Qian <i@bitbili.net>
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
Scott Klupfel (kluppy) <kluppy@going2blue.com>
sec65 <106604020+sec65@users.noreply.github.com>
Sergey Mishin (ralder) <ralder@yandex.ru>
Sertonix <83883937+Sertonix@users.noreply.github.com>
Severin von Wnuck-Lipinski <ss7@live.de>
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
Simon Mwepu <simonmwepu@gmail.com>
Simon Pickup <simon@pickupinfinity.com>
Sly_tom_cat <slytomcat@mail.ru>
Sonu Kumar Saw <31889738+dev-saw99@users.noreply.github.com>
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
Sven Bachmann <dev@mcbachmann.de>
Sébastien WENSKE <sebastien@wenske.fr>
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
Terrance <git@terrance.allofti.me>
TheCreeper <TheCreeper@users.noreply.github.com>
Thomas <9749173+uhthomas@users.noreply.github.com>
Thomas Hipp <thomashipp@gmail.com>
Tim Abell (timabell) <tim@timwise.co.uk>
Tim Howes (timhowes) <timhowes@berkeley.edu>
Tobias Frölich <40638719+tobifroe@users.noreply.github.com>
Tobias Klauser <tobias.klauser@gmail.com>
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
Tobias Tom (tobiastom) <t.tom@succont.de>
Tom Jakubowski <tom@crystae.net>
Tully Robinson (tojrobinson) <tully@tojr.org>
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
Tyler Kropp <kropptyler@gmail.com>
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
vapatel2 <149737089+vapatel2@users.noreply.github.com>
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
Victor Buinsky (buinsky) <vix_booja@tut.by>
Vik <63919734+ViktorOn@users.noreply.github.com>
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
villekalliomaki <53118179+villekalliomaki@users.noreply.github.com>
Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com>
wangguoliang <liangcszzu@163.com>
WangXi <xib1102@icloud.com>
Will Rouesnel <wrouesnel@wrouesnel.com>
William A. Kennington III (wkennington) <william@wkennington.com>
wouter bolsterlee <wouter@bolsterl.ee>
xarx00 <xarx00@users.noreply.github.com>
Xavier O. (damajor) <damajor@gmail.com>
xjtdy888 (xjtdy888) <xjtdy888@163.com> <xjtdy888@gmail.com>
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
yparitcher <y@paritcher.com>
佛跳墙 <daoquan@qq.com>
落心 <luoxin.ttt@gmail.com>

View File

@@ -1,73 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at security@syncthing.net. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org

View File

@@ -1,208 +1,22 @@
## Reporting Bugs
Please do contribute!
Please file bugs in the [GitHub Issue
Tracker](https://github.com/syncthing/syncthing/issues). Include at
least the following:
## Building
- What happened
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
- What did you expect to happen instead of what *did* happen, if it's
not crazy obvious
## Tests
- What operating system, operating system version and version of
Syncthing you are running
Yes please!
- The same for other connected devices, where relevant
## Style
- Screenshot if the issue concerns something visible in the GUI
`go fmt`
- Console log entries, where possible and relevant
## Documentation
If you're not sure whether something is relevant, erring on the side of
too much information will never get you yelled at. :)
[Hack it here](https://github.com/calmh/syncthing/wiki)
## Contributing Translations
## License
All translations are done via
[Weblate](https://hosted.weblate.org/projects/syncthing/). If you wish
to contribute to a translation, just head over there and sign up.
Before every release, the language resources are updated from the
latest info on Weblate.
Note that the previously used service at
[Transifex](https://www.transifex.com/projects/p/syncthing/) is being
retired and we kindly ask you to sign up on Weblate for continued
involvement.
## Contributing Documentation
Updates to the [documentation site](https://docs.syncthing.net/) can be
made as pull requests on the [documentation
repository](https://github.com/syncthing/docs).
## Contributing Code
Every contribution is welcome. If you want to contribute but are unsure
where to start, any open issues are fair game! Here's a short rundown of
what you need to keep in mind:
- Don't worry. You are not expected to get everything right on the first
attempt, we'll guide you through it.
- Make sure there is an
[issue](https://github.com/syncthing/syncthing/issues) that describes the
change you want to do. If the thing you want to do does not have an issue
yet, please file one before starting work on it.
- Fork the repository and make your changes in a new branch. Once it's ready
for review, create a pull request.
### Authorship
All code authors are listed in the AUTHORS file. When your first pull
request is accepted your details are added to the AUTHORS file and the list
of authors in the GUI. Commits must be made with the same name and email as
listed in the AUTHORS file. To accomplish this, ensure that your git
configuration is set correctly prior to making your first commit:
$ git config --global user.name "Jane Doe"
$ git config --global user.email janedoe@example.com
You must be reachable on the given email address. If you do not wish to use
your real name for whatever reason, using a nickname or pseudonym is
perfectly acceptable.
### The Developer Certificate of Origin (DCO)
The Syncthing project requires the Developer Certificate of Origin (DCO)
sign-off on pull requests (PRs). This means that all commit messages must
contain a signature line to indicate that the developer accepts the DCO.
The DCO is a lightweight way for contributors to certify that they wrote (or
otherwise have the right to submit) the code and changes they are
contributing to the project. Here is the full [text of the
DCO](https://developercertificate.org):
---
By making a contribution to this project, I certify that:
1. The contribution was created in whole or in part by me and I have the
right to submit it under the open source license indicated in the file;
or
2. The contribution is based upon previous work that, to the best of my
knowledge, is covered under an appropriate open source license and I have
the right under that license to submit that work with modifications,
whether created in whole or in part by me, under the same open source
license (unless I am permitted to submit under a different license), as
indicated in the file; or
3. The contribution was provided directly to me by some other person who
certified (1), (2) or (3) and I have not modified it.
4. I understand and agree that this project and the contribution are public
and that a record of the contribution (including all personal information
I submit with it, including my sign-off) is maintained indefinitely and
may be redistributed consistent with this project or the open source
license(s) involved.
---
Contributors indicate that they adhere to these requirements by adding
a `Signed-off-by` line to their commit messages. For example:
This is my commit message
Signed-off-by: Random J Developer <random@developer.example.org>
The name and email address in this line must match those of the committing
author, and be the same as what you want in the AUTHORS file as per above.
### Coding Style
#### General
- All text files use Unix line endings. The git settings already present in
the repository attempt to enforce this.
- When making changes, follow the brace and parenthesis style of the
surrounding code.
#### Go Specific
- Follow the conventions laid out in [Effective
Go](https://go.dev/doc/effective_go) as much as makes sense. The review
guidelines in [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments) should
generally be followed.
- Each commit should be `go fmt` clean.
- Imports are grouped per `goimports` standard; that is, standard
library first, then third party libraries after a blank line.
### Commits
- Commit messages (and pull request titles) should follow the [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/) specification and
be in lower case.
- We use a scope description in the commit message subject. This is the
component of Syncthing that the commit affects. For example, `gui`,
`protocol`, `scanner`, `upnp`, etc -- typically, the part after
`internal/`, `lib/` or `cmd/` in the package path. If the commit doesn't
affect a specific component, such as for changes to the build system or
documentation, the scope should be omitted. The same goes for changes that
affect many components which would be cumbersome to list.
- Commits that resolve an existing issue must include the issue number
as `(fixes #123)` at the end of the commit message subject. A correctly
formatted commit message subject looks like this:
feat(dialer): add env var to disable proxy fallback (fixes #3006)
- If the commit message subject doesn't say it all, one or more paragraphs of
describing text should be added to the commit message. This should explain
why the change is made and what it accomplishes.
- When drafting a pull request, please feel free to add commits with
corrections and merge from `main` when necessary. This provides a clear time
line with changes and simplifies review. Do not, in general, rebase your
commits, as this makes review harder.
- Pull requests are merged to `main` using squash merge. The "stream of
consciousness" set of commits described in the previous point will be reduced
to a single commit at merge time. The pull request title and description will
be used as the commit message.
### Tests
Yes please, do add tests when adding features or fixing bugs. Also, when a
pull request is filed a number of automatic tests are run on the code. This
includes:
- That the code actually builds and the test suite passes.
- That the code is correctly formatted (`go fmt`).
- That the commits are based on a reasonably recent `main`.
- That the output from `go lint` and `go vet` is clean. (This checks for a
number of potential problems the compiler doesn't catch.)
## Licensing
All contributions are made available under the same license as the already
existing material being contributed to. For most of the project and unless
otherwise stated this means MPLv2, but there are exceptions:
- Certain commands (under cmd/...) may have a separate license, indicated by
the presence of a LICENSE file in the corresponding directory.
- The documentation (man/...) is licensed under the Creative Commons
Attribution 4.0 International License.
Regardless of the license in effect, you retain the copyright to your
contribution.
MIT

View File

@@ -1,57 +0,0 @@
ARG GOVERSION=latest
#
# Maybe build Syncthing. This is a bit ugly as we can't make an entire
# section of the Dockerfile conditional, so we end up always pulling the
# golang image as builder. Then we check if the executable we need already
# exists (pre-built) otherwise we build it.
#
FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src
COPY . .
ENV CGO_ENABLED=0
RUN if [ ! -f syncthing-linux-$TARGETARCH ] ; then \
go run build.go -no-upgrade build syncthing ; \
mv syncthing syncthing-linux-$TARGETARCH ; \
fi
#
# The rest of the Dockerfile uses the binary from the builder, prebuilt or
# not.
#
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing"
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
VOLUME ["/var/syncthing"]
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
COPY --from=builder /src/syncthing-linux-$TARGETARCH /bin/syncthing
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
HEALTHCHECK --interval=1m --timeout=10s \
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
ENV STGUIADDRESS=0.0.0.0:8384
ENV STHOMEDIR=/var/syncthing/config
RUN chmod 755 /bin/entrypoint.sh
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing"]

View File

@@ -1,17 +0,0 @@
ARG GOVERSION=latest
FROM golang:$GOVERSION
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Builder"
# FPM to build Debian packages
RUN apt-get update && apt-get install -y --no-install-recommends \
locales rubygems ruby-dev build-essential git \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \
&& gem install fpm

View File

@@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Crash Receiver"
EXPOSE 8080
COPY stcrashreceiver-linux-${TARGETARCH} /bin/stcrashreceiver
ENTRYPOINT [ "/bin/stcrashreceiver" ]

View File

@@ -1,42 +0,0 @@
ARG GOVERSION=latest
FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src
COPY . .
ENV CGO_ENABLED=0
RUN if [ ! -f stdiscosrv-linux-$TARGETARCH ] ; then \
go run build.go -no-upgrade build stdiscosrv ; \
mv stdiscosrv stdiscosrv-linux-$TARGETARCH ; \
fi
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Discovery Server"
EXPOSE 19200 8443
VOLUME ["/var/stdiscosrv"]
RUN apk add --no-cache ca-certificates su-exec
COPY --from=builder /src/stdiscosrv-linux-$TARGETARCH /bin/stdiscosrv
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/stdiscosrv
HEALTHCHECK --interval=1m --timeout=10s \
CMD nc -z localhost 8443 || exit 1
WORKDIR /var/stdiscosrv
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/stdiscosrv"]

View File

@@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Relay Pool Server"
EXPOSE 8080
COPY strelaypoolsrv-linux-${TARGETARCH} /bin/strelaypoolsrv
ENTRYPOINT ["/bin/strelaypoolsrv", "-listen", ":8080"]

View File

@@ -1,42 +0,0 @@
ARG GOVERSION=latest
FROM golang:$GOVERSION AS builder
ARG BUILD_USER
ARG BUILD_HOST
ARG TARGETARCH
WORKDIR /src
COPY . .
ENV CGO_ENABLED=0
RUN if [ ! -f strelaysrv-linux-$TARGETARCH ] ; then \
go run build.go -no-upgrade build strelaysrv ; \
mv strelaysrv strelaysrv-linux-$TARGETARCH ; \
fi
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Relay Server"
EXPOSE 22067 22070
VOLUME ["/var/strelaysrv"]
RUN apk add --no-cache ca-certificates su-exec
COPY --from=builder /src/strelaysrv-linux-$TARGETARCH /bin/strelaysrv
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
ENV PUID=1000 PGID=1000 HOME=/var/strelaysrv
HEALTHCHECK --interval=1m --timeout=10s \
CMD nc -z localhost 22067 || exit 1
WORKDIR /var/strelaysrv
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaysrv"]

View File

@@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Upgrades"
EXPOSE 8080
COPY stupgrades-linux-${TARGETARCH} /bin/stupgrades
ENTRYPOINT [ "/bin/stupgrades" ]

View File

@@ -1,16 +0,0 @@
FROM alpine
ARG TARGETARCH
LABEL org.opencontainers.image.authors="The Syncthing Project" \
org.opencontainers.image.url="https://syncthing.net" \
org.opencontainers.image.documentation="https://docs.syncthing.net" \
org.opencontainers.image.source="https://github.com/syncthing/syncthing" \
org.opencontainers.image.vendor="The Syncthing Project" \
org.opencontainers.image.licenses="MPL-2.0" \
org.opencontainers.image.title="Syncthing Usage Reporting Server"
EXPOSE 8080
COPY ursrv-linux-${TARGETARCH} /bin/ursrv
ENTRYPOINT [ "/bin/ursrv" ]

View File

@@ -1,83 +0,0 @@
# The Syncthing Goals
Syncthing is a **continuous file synchronization program**. It synchronizes
files between two or more computers. We strive to fulfill the goals below.
The goals are listed in order of importance, the most important one being
the first.
> "Syncing files" here is precise. It means we specifically exclude things
> that are not files - calendar items, instant messages, and so on. If those
> are in fact stored as files on disk, they can of course be synced as
> files.
Syncthing should be:
### 1. Safe From Data Loss
Protecting the user's data is paramount. We take every reasonable precaution
to avoid corrupting the user's files.
> This is the overriding goal, without which synchronizing files becomes
> pointless. This means that we do not make unsafe trade offs for the sake
> of performance or, in some cases, even usability.
### 2. Secure Against Attackers
Again, protecting the user's data is paramount. Regardless of our other
goals, we must never allow the user's data to be susceptible to eavesdropping
or modification by unauthorized parties.
> This should be understood in context. It is not necessarily reasonable to
> expect Syncthing to be resistant against well equipped state level
> attackers. We will, however, do our best. Note also that this is different
> from anonymity which is not, currently, a goal.
### 3. Easy to Use
Syncthing should be approachable, understandable, and inclusive.
> Complex concepts and maths form the base of Syncthing's functionality.
> This should nonetheless be abstracted or hidden to a degree where
> Syncthing is usable by the general public.
### 4. Automatic
User interaction should be required only when absolutely necessary.
> Specifically this means that changes to files are picked up without
> prompting, conflicts are resolved without prompting and connections are
> maintained without prompting. We only prompt the user when it is required
> to fulfill one of the (overriding) Secure, Safe or Easy goals.
### 5. Universally Available
Syncthing should run on every common computer. We are mindful that the
latest technology is not always available to every individual.
> Computers include desktops, laptops, servers, virtual machines, small
> general purpose computers such as Raspberry Pis and, *where possible*,
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats,
> and so on may include computing capabilities but it is not our goal for
> Syncthing to run smoothly on these devices.
### 6. For Individuals
Syncthing is primarily about empowering the individual user with safe,
secure, and easy to use file synchronization.
> We acknowledge that it's also useful in an enterprise setting and include
> functionality to support that. If this is in conflict with the
> requirements of the individual, those will however take priority.
### 7. Everything Else
There are many things we care about that don't make it on to the list. It is
fine to optimize for these values as well, as long as they are not in
conflict with the stated goals above.
> For example, performance is a thing we care about. We just don't care more
> about it than safety, security, etc. Maintainability of the code base and
> providing entertainment value for the maintainers are also things that
> matter. It is understood that there are aspects of Syncthing that are
> suboptimal or even in opposition with the goals above. However, we
> continuously strive to align Syncthing more and more with these goals.

392
LICENSE
View File

@@ -1,373 +1,19 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.
Copyright (C) 2013 Jakob Borg
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,78 +0,0 @@
# Docker Container for Syncthing
Use the Dockerfile in this repo, or pull the `syncthing/syncthing` image
from Docker Hub.
Use the `/var/syncthing` volume to have the synchronized files available on the
host. You can add more folders and map them as you prefer.
Note that Syncthing runs as UID 1000 and GID 1000 by default. These may be
altered with the `PUID` and `PGID` environment variables. In addition
the name of the Syncthing instance can be optionally defined by using
`--hostname=syncthing` parameter.
To grant Syncthing additional capabilities without running as root, use the
`PCAP` environment variable with the same syntax as that for `setcap(8)`.
For example, `PCAP=cap_chown,cap_fowner+ep`.
To set a different umask value, use the `UMASK` environment variable. For
example `UMASK=002`.
## Example Usage
**Docker cli**
```
$ docker pull syncthing/syncthing
$ docker run --network=host -e STGUIADDRESS= \
-v /wherever/st-sync:/var/syncthing \
syncthing/syncthing:latest
```
**Docker compose**
```yml
---
version: "3"
services:
syncthing:
image: syncthing/syncthing
container_name: syncthing
hostname: my-syncthing
environment:
- PUID=1000
- PGID=1000
- STGUIADDRESS=
volumes:
- /wherever/st-sync:/var/syncthing
network_mode: host
restart: unless-stopped
healthcheck:
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
interval: 1m
timeout: 10s
retries: 3
```
## Discovery
Please note that Docker's default network mode prevents local IP addresses
from being discovered, as Syncthing can only see the internal IP address of
the container on the `172.17.0.0/16` subnet. This would likely break the ability
for nodes to establish LAN connections properly, resulting in poor transfer
rates unless local device addresses are configured manually.
It is therefore strongly recommended to stick to the [host network mode](https://docs.docker.com/network/host/),
as shown above.
Be aware that syncthing alone is now in control of what interfaces and ports it
listens on. You can edit the syncthing configuration to change the defaults if
there are conflicts.
## GUI Security
By default Syncthing inside the Docker image listens on `0.0.0.0:8384`. This
allows GUI connections when running without host network mode. The example
above unsets the `STGUIADDRESS` environment variable to have Syncthing fall
back to listening on what has been configured in the configuration file or the
GUI settings dialog. By default this is the localhost IP address `127.0.0.1`.
If you configure your GUI to be externally reachable, make sure you set up
authentication and enable TLS.

128
README.md
View File

@@ -1,110 +1,38 @@
[![Syncthing][14]][15]
syncthing
=========
---
This is `syncthing`, an open BitTorrent Sync alternative. It is
currently far from ready for mass consumption, but it is a usable proof
of concept and tech demo. The following are the project goals:
[![MPLv2 License](https://img.shields.io/badge/license-MPLv2-blue.svg?style=flat-square)](https://www.mozilla.org/MPL/2.0/)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/88/badge)](https://bestpractices.coreinfrastructure.org/projects/88)
[![Go Report Card](https://goreportcard.com/badge/github.com/syncthing/syncthing)](https://goreportcard.com/report/github.com/syncthing/syncthing)
1. Define an open, secure, language neutral protocol usable for
efficient synchronization of a file repository between an arbitrary
number of nodes. This is the [Block Exchange
Protocol](https://github.com/calmh/syncthing/blob/master/protocol/PROTOCOL.md)
(BEP).
## Goals
2. Provide the reference implementation to demonstrate the usability of
said protocol. This is the `syncthing` utility.
Syncthing is a **continuous file synchronization program**. It synchronizes
files between two or more computers. We strive to fulfill the goals below.
The goals are listed in order of importance, the most important ones first.
This is the summary version of the goal list - for more
commentary, see the full [Goals document][13].
The two are evolving together; the protocol is not to be considered
stable until syncthing 1.0 is released, at which point it is locked down
for incompatible changes.
Syncthing should be:
Syncthing does not use the BitTorrent protocol. The reasons for this are
1) we don't know if BitTorrent Sync does either, so there's nothing to
be compatible with, 2) BitTorrent includes a lot of functionality for
making sure large swarms of selfish agents behave and somehow work
towards a common goal. Here we have a much smaller swarm of cooperative
agents and a simpler approach will suffice.
1. **Safe From Data Loss**
Documentation
=============
Protecting the user's data is paramount. We take every reasonable
precaution to avoid corrupting the user's files.
The syncthing documentation is kept on the
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
2. **Secure Against Attackers**
License
=======
Again, protecting the user's data is paramount. Regardless of our other
goals, we must never allow the user's data to be susceptible to
eavesdropping or modification by unauthorized parties.
MIT
3. **Easy to Use**
Syncthing should be approachable, understandable, and inclusive.
4. **Automatic**
User interaction should be required only when absolutely necessary.
5. **Universally Available**
Syncthing should run on every common computer. We are mindful that the
latest technology is not always available to every individual.
6. **For Individuals**
Syncthing is primarily about empowering the individual user with safe,
secure, and easy to use file synchronization.
7. **Everything Else**
There are many things we care about that don't make it on to the list. It
is fine to optimize for these values, as long as they are not in conflict
with the stated goals above.
## Getting Started
Take a look at the [getting started guide][2].
There are a few examples for keeping Syncthing running in the background
on your system in [the etc directory][3]. There are also several [GUI
implementations][11] for Windows, Mac, and Linux.
## Docker
To run Syncthing in Docker, see [the Docker README][16].
## Getting in Touch
The first and best point of contact is the [Forum][8].
If you've found something that is clearly a
bug, feel free to report it in the [GitHub issue tracker][10].
If you believe that youve found a Syncthing-related security vulnerability,
please report it by emailing security@syncthing.net. Do not report it in the
Forum or issue tracker.
## Building
Building Syncthing from source is easy. After extracting the source bundle from
a release or checking out git, you just need to run `go run build.go` and the
binaries are created in `./bin`. There's [a guide][5] with more details on the
build process.
## Signed Releases
Release binaries are GPG signed with the key available from
https://syncthing.net/security/. There is also a built-in automatic
upgrade mechanism (disabled in some distribution channels) which uses a
compiled in ECDSA signature. macOS and Windows binaries are also
code-signed.
## Documentation
Please see the Syncthing [documentation site][6] [[source]][17].
All code is licensed under the [MPLv2 License][7].
[1]: https://docs.syncthing.net/specs/bep-v1.html
[2]: https://docs.syncthing.net/intro/getting-started.html
[3]: https://github.com/syncthing/syncthing/blob/main/etc
[5]: https://docs.syncthing.net/dev/building.html
[6]: https://docs.syncthing.net/
[7]: https://github.com/syncthing/syncthing/blob/main/LICENSE
[8]: https://forum.syncthing.net/
[10]: https://github.com/syncthing/syncthing/issues
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
[13]: https://github.com/syncthing/syncthing/blob/main/GOALS.md
[14]: assets/logo-text-128.png
[15]: https://syncthing.net/
[16]: https://github.com/syncthing/syncthing/blob/main/README-Docker.md
[17]: https://github.com/syncthing/docs

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.8 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.5 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.1 KiB

View File

File diff suppressed because one or more lines are too long

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 160 KiB

View File

Binary file not shown.

BIN
assets/st-logo.pxm Normal file
View File

Binary file not shown.

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

View File

Binary file not shown.

6
auto/gui.files.go Normal file
View File

File diff suppressed because one or more lines are too long

74
blocks.go Normal file
View File

@@ -0,0 +1,74 @@
package main
import (
"bytes"
"crypto/sha256"
"io"
)
type Block struct {
Offset int64
Size uint32
Hash []byte
}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset int64
for {
lr := &io.LimitedReader{r, int64(blocksize)}
hf := sha256.New()
n, err := io.Copy(hf, lr)
if err != nil {
return nil, err
}
if n == 0 {
break
}
b := Block{
Offset: offset,
Size: uint32(n),
Hash: hf.Sum(nil),
}
blocks = append(blocks, b)
offset += int64(n)
}
if len(blocks) == 0 {
// Empty file
blocks = append(blocks, Block{
Offset: 0,
Size: 0,
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
})
}
return blocks, nil
}
// BlockDiff returns lists of common and missing (to transform src into tgt)
// blocks. Both block lists must have been created with the same block size.
func BlockDiff(src, tgt []Block) (have, need []Block) {
if len(tgt) == 0 && len(src) != 0 {
return nil, nil
}
if len(tgt) != 0 && len(src) == 0 {
// Copy the entire file
return nil, tgt
}
for i := range tgt {
if i >= len(src) || bytes.Compare(tgt[i].Hash, src[i].Hash) != 0 {
// Copy differing block
need = append(need, tgt[i])
} else {
have = append(have, tgt[i])
}
}
return have, need
}

116
blocks_test.go Normal file
View File

@@ -0,0 +1,116 @@
package main
import (
"bytes"
"fmt"
"testing"
)
var blocksTestData = []struct {
data []byte
blocksize int
hash []string
}{
{[]byte(""), 1024, []string{
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
{[]byte("contents"), 1024, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 9, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 8, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 7, []string{
"ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
"043a718774c572bd8a25adbeb1bfcd5c0256ae11cecf9f9c3f925d0e52beaf89"},
},
{[]byte("contents"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
},
{[]byte("conconts"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"44ad63f60af0f6db6fdde6d5186ef78176367df261fa06be3079b6c80c8adba4"},
},
{[]byte("contenten"), 3, []string{
"1143da2bc54c495c4be31d3868785d39ffdfd56df5668f0645d8f14d47647952",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3",
"e4432baa90819aaef51d2a7f8e148bf7e679610f3173752fabb4dcb2d0f418d3"},
},
}
func TestBlocks(t *testing.T) {
for _, test := range blocksTestData {
buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize)
if err != nil {
t.Fatal(err)
}
if l := len(blocks); l != len(test.hash) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
} else {
i := 0
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
}
bs := test.blocksize
if rem := len(test.data) - int(off); bs > rem {
bs = rem
}
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
}
i++
}
}
}
}
var diffTestData = []struct {
a string
b string
s int
d []Block
}{
{"contents", "contents", 1024, []Block{}},
{"", "", 1024, []Block{}},
{"contents", "contents", 3, []Block{}},
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
{"contents", "contants", 3, []Block{{3, 3, nil}}},
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
{"contents", "", 3, []Block{{0, 0, nil}}},
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
{"contents", "con", 3, nil},
{"contents", "cont", 3, []Block{{3, 1, nil}}},
{"cont", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
}
func TestDiff(t *testing.T) {
for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
_, d := BlockDiff(a, b)
if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
} else {
for j := range test.d {
if d[j].Offset != test.d[j].Offset {
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
}
if d[j].Size != test.d[j].Size {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
}
}
}
}
}

View File

@@ -1,12 +0,0 @@
version: v2
managed:
enabled: true
override:
- file_option: go_package_prefix
value: github.com/syncthing/syncthing/internal/gen
plugins:
- remote: buf.build/protocolbuffers/go:v1.35.1
out: .
opt: module=github.com/syncthing/syncthing
inputs:
- directory: proto

View File

@@ -1,10 +0,0 @@
version: v2
modules:
- path: proto
name: github.com/syncthing/syncthing
lint:
use:
- STANDARD
breaking:
use:
- WIRE_JSON

45
buffers/buffers.go Normal file
View File

@@ -0,0 +1,45 @@
package buffers
const (
largeMin = 1024
)
var (
smallBuffers = make(chan []byte, 32)
largeBuffers = make(chan []byte, 32)
)
func Get(size int) []byte {
var ch = largeBuffers
if size < largeMin {
ch = smallBuffers
}
var buf []byte
select {
case buf = <-ch:
default:
}
if len(buf) < size {
return make([]byte, size)
}
return buf[:size]
}
func Put(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) == 0 {
return
}
var ch = largeBuffers
if len(buf) < largeMin {
ch = smallBuffers
}
select {
case ch <- buf:
default:
}
}

1433
build.go
View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
function build {
go run build.go @args
}
$cmd, $rest = $args
switch ($cmd) {
"test" {
$env:LOGGER_DISCARD=1
build test
}
"bench" {
$env:LOGGER_DISCARD=1
build bench
}
default {
build @rest
}
}

View File

@@ -1,36 +1,51 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
#!/bin/bash
script() {
name="$1"
shift
go run "script/$name.go" "$@"
}
export COPYFILE_DISABLE=true
build() {
go run build.go "$@"
}
version=$(git describe --always)
buildDir=dist
case "${1:-default}" in
test)
LOGGER_DISCARD=1 build test
;;
if [[ $fast != yes ]] ; then
go get -d
go test ./...
fi
bench)
LOGGER_DISCARD=1 build bench
;;
if [[ -z $1 ]] ; then
go build -ldflags "-X main.Version $version"
elif [[ $1 == "embed" ]] ; then
embedder auto gui > auto/gui.files.go \
&& go build -ldflags "-X main.Version $version"
elif [[ $1 == "tar" ]] ; then
go build -ldflags "-X main.Version $version" \
&& mkdir syncthing-dist \
&& cp syncthing README.md LICENSE syncthing-dist \
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
&& rm -rf syncthing-dist
elif [[ $1 == "all" ]] ; then
rm -rf "$buildDir"
mkdir -p "$buildDir" || exit 1
prerelease)
script authors
script copyrights
build weblate
pushd man ; ./refresh.sh ; popd
git add -A gui man AUTHORS
git commit -m 'chore(gui, man, authors): update docs, translations, and contributors'
;;
*)
build "$@"
;;
esac
export GOARM=7
for os in darwin-amd64 linux-386 linux-amd64 linux-arm freebsd-386 freebsd-amd64 windows-386 windows-amd64 ; do
echo "$os"
export name="syncthing-$os"
export GOOS=${os%-*}
export GOARCH=${os#*-}
go build -ldflags "-X main.Version $version"
mkdir -p "$name"
cp README.md LICENSE "$name"
case $GOOS in
windows)
cp syncthing.exe "$buildDir/$name.exe"
mv syncthing.exe "$name"
zip -qr "$buildDir/$name.zip" "$name"
;;
*)
cp syncthing "$buildDir/$name"
mv syncthing "$name"
tar zcf "$buildDir/$name.tar.gz" "$name"
;;
esac
rm -r "$name"
done
fi

1
cmd/.gitignore vendored
View File

@@ -1 +0,0 @@
!syncthing

View File

@@ -1,164 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"crypto/sha256"
"errors"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
)
func main() {
flag.Parse()
log.Println(compareDirectories(flag.Args()...))
}
// Compare a number of directories. Returns nil if the contents are identical,
// otherwise an error describing the first found difference.
func compareDirectories(dirs ...string) error {
chans := make([]chan fileInfo, len(dirs))
for i := range chans {
chans[i] = make(chan fileInfo)
}
errcs := make([]chan error, len(dirs))
abort := make(chan struct{})
for i := range dirs {
errcs[i] = startWalker(dirs[i], chans[i], abort)
}
res := make([]fileInfo, len(dirs))
for {
numDone := 0
for i := range chans {
fi, ok := <-chans[i]
if !ok {
err, hasError := <-errcs[i]
if hasError {
close(abort)
return err
}
numDone++
}
res[i] = fi
}
for i := 1; i < len(res); i++ {
if res[i] != res[0] {
close(abort)
if res[i].name < res[0].name {
return fmt.Errorf("%s missing %v (present in %s)", dirs[0], res[i], dirs[i])
} else if res[i].name > res[0].name {
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
}
return fmt.Errorf("mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
}
}
if numDone == len(dirs) {
return nil
}
}
}
type fileInfo struct {
name string
mode os.FileMode
mod int64
hash [sha256.Size]byte
}
func (f fileInfo) String() string {
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
}
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rn, _ := filepath.Rel(dir, path)
if rn == "." {
return nil
}
if rn == ".stversions" || rn == ".stfolder" {
return filepath.SkipDir
}
var f fileInfo
if info.Mode()&os.ModeSymlink != 0 {
f = fileInfo{
name: rn,
mode: os.ModeSymlink,
}
tgt, err := os.Readlink(path)
if err != nil {
return err
}
f.hash = sha256.Sum256([]byte(tgt))
} else if info.IsDir() {
f = fileInfo{
name: rn,
mode: info.Mode(),
// hash and modtime zero for directories
}
} else {
f = fileInfo{
name: rn,
mode: info.Mode(),
mod: info.ModTime().Unix(),
}
sum, err := sha256file(path)
if err != nil {
return err
}
f.hash = sum
}
select {
case res <- f:
return nil
case <-abort:
return errors.New("abort")
}
}
errc := make(chan error)
go func() {
err := filepath.Walk(dir, walker)
close(res)
if err != nil {
errc <- err
}
close(errc)
}()
return errc
}
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := sha256.New()
io.Copy(h, f)
hb := h.Sum(nil)
copy(hash[:], hb)
return
}

View File

@@ -1,121 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"context"
"crypto/rand"
"encoding/binary"
"flag"
"log"
"strings"
"time"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/discoproto"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/beacon"
"github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/protocol"
)
var (
all = false // print all packets, not just first from each device/source
fake = false // send fake packets to lure out other devices faster
mc = "[ff12::8384]:21027"
bc = 21027
)
var (
// Static prefix that we use when generating fake device IDs, so that we
// can recognize them ourselves. Also makes the device ID start with
// "STPROBE-" which is humanly recognizable.
randomPrefix = []byte{148, 223, 23, 4, 148}
// Our random, fake, device ID that we use when sending announcements.
myID = randomDeviceID()
)
func main() {
flag.BoolVar(&all, "all", all, "Print all received announcements (not only first)")
flag.BoolVar(&fake, "fake", fake, "Send fake announcements")
flag.StringVar(&mc, "mc", mc, "IPv6 multicast address")
flag.IntVar(&bc, "bc", bc, "IPv4 broadcast port number")
flag.Parse()
if fake {
log.Println("My ID:", myID)
}
ctx := context.Background()
runbeacon(ctx, beacon.NewMulticast(mc), fake)
runbeacon(ctx, beacon.NewBroadcast(bc), fake)
select {}
}
func runbeacon(ctx context.Context, bc beacon.Interface, fake bool) {
go bc.Serve(ctx)
go recv(bc)
if fake {
go send(bc)
}
}
// receives and prints discovery announcements
func recv(bc beacon.Interface) {
seen := make(map[string]bool)
for {
data, src := bc.Recv()
if m := binary.BigEndian.Uint32(data); m != discover.Magic {
log.Printf("Incorrect magic %x in announcement from %v", m, src)
continue
}
var ann discoproto.Announce
proto.Unmarshal(data[4:], &ann)
id, _ := protocol.DeviceIDFromBytes(ann.Id)
if id == myID {
// This is one of our own fake packets, don't print it.
continue
}
// Print announcement details for the first packet from a given
// device ID and source address, or if -all was given.
key := id.String() + src.String()
if all || !seen[key] {
log.Printf("Announcement from %v\n", src)
log.Printf(" %v at %s\n", id, strings.Join(ann.Addresses, ", "))
seen[key] = true
}
}
}
// sends fake discovery announcements once every second
func send(bc beacon.Interface) {
ann := &discoproto.Announce{
Id: myID[:],
Addresses: []string{"tcp://fake.example.com:12345"},
}
bs, _ := proto.Marshal(ann)
for {
bc.Send(bs)
time.Sleep(time.Second)
}
}
// returns a random but recognizable device ID
func randomDeviceID() protocol.DeviceID {
var id protocol.DeviceID
copy(id[:], randomPrefix)
rand.Read(id[len(randomPrefix):])
return id
}

View File

@@ -1,70 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
)
type event struct {
ID int `json:"id"`
Type string `json:"type"`
Time time.Time `json:"time"`
Data map[string]interface{} `json:"data"`
}
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
target := flag.String("target", "localhost:8384", "Target Syncthing instance")
types := flag.String("types", "", "Filter for specific event types (comma-separated)")
apikey := flag.String("apikey", "", "Syncthing API key")
flag.Parse()
if *apikey == "" {
log.Fatal("Must give -apikey argument")
}
var eventsArg string
if len(*types) > 0 {
eventsArg = "&events=" + *types
}
since := 0
for {
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/rest/events?since=%d%s", *target, since, eventsArg), nil)
if err != nil {
log.Fatal(err)
}
req.Header.Set("X-API-Key", *apikey)
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
var events []event
err = json.NewDecoder(res.Body).Decode(&events)
if err != nil {
log.Fatal(err)
}
res.Body.Close()
for _, event := range events {
bs, _ := json.MarshalIndent(event, "", " ")
log.Printf("%s", bs)
since = event.ID
}
}
}

View File

@@ -1,84 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"context"
"flag"
"log"
"os"
"path/filepath"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner"
)
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
standardBlocks := flag.Bool("s", false, "Use standard block size")
flag.Parse()
path := flag.Arg(0)
if path == "" {
log.Fatal("Need one argument: path to check")
}
log.Println("File:")
log.Println(" ", filepath.Clean(path))
log.Println()
fi, err := os.Lstat(path)
if err != nil {
log.Fatal(err)
}
log.Println("Lstat:")
log.Printf(" Size: %d bytes", fi.Size())
log.Printf(" Mode: 0%o", fi.Mode())
log.Printf(" Time: %v", fi.ModTime())
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
log.Println()
if !fi.Mode().IsDir() && !fi.Mode().IsRegular() {
fi, err = os.Stat(path)
if err != nil {
log.Fatal(err)
}
log.Println("Stat:")
log.Printf(" Size: %d bytes", fi.Size())
log.Printf(" Mode: 0%o", fi.Mode())
log.Printf(" Time: %v", fi.ModTime())
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
log.Println()
}
if fi.Mode().IsRegular() {
log.Println("Blocks:")
fd, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
blockSize := int(fi.Size())
if *standardBlocks || blockSize < protocol.MinBlockSize {
blockSize = protocol.BlockSize(fi.Size())
}
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil)
if err != nil {
log.Fatal(err)
}
for _, b := range bs {
log.Println(" ", b)
}
}
}

View File

@@ -1,110 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"context"
"crypto/tls"
"errors"
"flag"
"fmt"
"net/url"
"os"
"time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
)
var timeout = 5 * time.Second
func main() {
var server string
flag.StringVar(&server, "server", "", "Announce server (blank for default set)")
flag.DurationVar(&timeout, "timeout", timeout, "Query timeout")
flag.Usage = usage
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(64)
}
id, err := protocol.DeviceIDFromString(flag.Args()[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if server != "" {
checkServers(id, server)
} else {
checkServers(id, config.DefaultDiscoveryServers...)
}
}
type checkResult struct {
server string
addresses []string
error
}
func checkServers(deviceID protocol.DeviceID, servers ...string) {
t0 := time.Now()
resc := make(chan checkResult)
for _, srv := range servers {
srv := srv
go func() {
res := checkServer(deviceID, srv)
res.server = srv
resc <- res
}()
}
for range servers {
res := <-resc
u, _ := url.Parse(res.server)
fmt.Printf("%s (%v):\n", u.Host, time.Since(t0))
if res.error != nil {
fmt.Println(" " + res.error.Error())
}
for _, addr := range res.addresses {
fmt.Println(" address:", addr)
}
}
}
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, events.NoopLogger, nil)
if err != nil {
return checkResult{error: err}
}
res := make(chan checkResult, 1)
time.AfterFunc(timeout, func() {
res <- checkResult{error: errors.New("timeout")}
})
go func() {
addresses, err := disco.Lookup(context.Background(), deviceID)
res <- checkResult{addresses: addresses, error: err}
}()
return <-res
}
func usage() {
fmt.Printf("Usage:\n\t%s [options] <device ID>\n\nOptions:\n", os.Args[0])
flag.PrintDefaults()
}

View File

@@ -1,45 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// Command stfindignored lists ignored files under a given folder root.
package main
import (
"flag"
"fmt"
"os"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
)
func main() {
flag.Parse()
root := flag.Arg(0)
if root == "" {
root = "."
}
vfs := fs.NewWalkFilesystem(fs.NewFilesystem(fs.FilesystemTypeBasic, root))
ign := ignore.New(vfs)
if err := ign.Load(".stignore"); err != nil {
fmt.Fprintf(os.Stderr, "Fatal: loading ignores: %v\n", err)
os.Exit(1)
}
vfs.Walk(".", func(path string, info fs.FileInfo, err error) error {
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: %s: %v\n", path, err)
return fs.SkipDir
}
if ign.Match(path).IsIgnored() {
fmt.Println(path)
}
return nil
})
}

View File

@@ -1,121 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"path/filepath"
"time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
)
func main() {
dir := flag.String("dir", "~/files", "Directory to generate into")
files := flag.Int("files", 1000, "Number of files to create")
maxExp := flag.Int("maxexp", 20, "Max size exponent")
src := flag.String("src", "/dev/urandom", "Source of file data")
flag.Parse()
if err := generateFiles(*dir, *files, *maxExp, *src); err != nil {
log.Println(err)
}
}
func generateFiles(dir string, files, maxexp int, srcname string) error {
fd, err := os.Open(srcname)
if err != nil {
return err
}
for i := 0; i < files; i++ {
n := randomName()
if rand.Float64() < 0.05 {
// Some files and directories are dotfiles
n = "." + n
}
p0 := filepath.Join(dir, string(n[0]), n[0:2])
err = os.MkdirAll(p0, 0o755)
if err != nil {
log.Fatal(err)
}
p1 := filepath.Join(p0, n)
s := int64(1 << uint(rand.Intn(maxexp)))
a := int64(128 * 1024)
if a > s {
a = s
}
s += rand.Int63n(a)
if err := generateOneFile(fd, p1, s); err != nil {
return err
}
}
return nil
}
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
src := io.LimitReader(&infiniteReader{fd}, s)
dst, err := os.Create(p1)
if err != nil {
return err
}
_, err = io.Copy(dst, src)
if err != nil {
return err
}
err = dst.Close()
if err != nil {
return err
}
os.Chmod(p1, os.FileMode(rand.Intn(0o777)|0o400))
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
return os.Chtimes(p1, t, t)
}
func randomName() string {
var b [16]byte
readRand(b[:])
return fmt.Sprintf("%x", b[:])
}
func readRand(bs []byte) (int, error) {
var r uint32
for i := range bs {
if i%4 == 0 {
r = uint32(rand.Int63())
}
bs[i] = byte(r >> uint((i%4)*8))
}
return len(bs), nil
}
type infiniteReader struct {
rd io.ReadSeeker
}
func (i *infiniteReader) Read(bs []byte) (int, error) {
n, err := i.rd.Read(bs)
if err == io.EOF {
err = nil
i.rd.Seek(0, 0)
}
return n, err
}

View File

@@ -1,123 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"flag"
"io"
"log"
"os"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/signature"
"github.com/syncthing/syncthing/lib/upgrade"
)
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
flag.Parse()
if flag.NArg() < 1 {
log.Print(`Usage:
stsigtool <command>
Where command is one of:
gen
- generate a new key pair
sign <privkeyfile> [datafile]
- sign a file
verify <signaturefile> <datafile>
- verify a signature, using the built in public key
verify <signaturefile> <datafile> <pubkeyfile>
- verify a signature, using the specified public key file
`)
}
switch flag.Arg(0) {
case "gen":
gen()
case "sign":
sign(flag.Arg(1), flag.Arg(2))
case "verify":
if flag.NArg() == 4 {
verifyWithFile(flag.Arg(1), flag.Arg(2), flag.Arg(3))
} else {
verifyWithKey(flag.Arg(1), flag.Arg(2), upgrade.SigningKey)
}
}
}
func gen() {
priv, pub, err := signature.GenerateKeys()
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(priv)
os.Stdout.Write(pub)
}
func sign(keyname, dataname string) {
privkey, err := os.ReadFile(keyname)
if err != nil {
log.Fatal(err)
}
var input io.Reader
if dataname == "-" || dataname == "" {
input = os.Stdin
} else {
fd, err := os.Open(dataname)
if err != nil {
log.Fatal(err)
}
defer fd.Close()
input = fd
}
sig, err := signature.Sign(privkey, input)
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(sig)
}
func verifyWithFile(signame, dataname, keyname string) {
pubkey, err := os.ReadFile(keyname)
if err != nil {
log.Fatal(err)
}
verifyWithKey(signame, dataname, pubkey)
}
func verifyWithKey(signame, dataname string, pubkey []byte) {
sig, err := os.ReadFile(signame)
if err != nil {
log.Fatal(err)
}
fd, err := os.Open(dataname)
if err != nil {
log.Fatal(err)
}
defer fd.Close()
err = signature.Verify(pubkey, sig, fd)
if err != nil {
log.Fatal(err)
}
log.Println("correct signature")
}

View File

@@ -1,214 +0,0 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"flag"
"fmt"
"math/big"
mr "math/rand"
"os"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/protocol"
)
type result struct {
id protocol.DeviceID
priv *ecdsa.PrivateKey
derBytes []byte
}
func main() {
flag.Parse()
prefix := strings.ToUpper(strings.ReplaceAll(flag.Arg(0), "-", ""))
if len(prefix) > 7 {
prefix = prefix[:7] + "-" + prefix[7:]
}
found := make(chan result)
stop := make(chan struct{})
var count atomic.Int64
// Print periodic progress reports.
go printProgress(prefix, &count)
// Run one certificate generator per CPU core.
var wg sync.WaitGroup
for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
wg.Add(1)
go func() {
generatePrefixed(prefix, &count, found, stop)
wg.Done()
}()
}
// Save the result, when one has been found.
res := <-found
close(stop)
wg.Wait()
fmt.Println("Found", res.id)
saveCert(res.priv, res.derBytes)
fmt.Println("Saved to cert.pem, key.pem")
}
// Try certificates until one is found that has the prefix at the start of
// the resulting device ID. Increments count atomically, sends the result to
// found, returns when stop is closed.
func generatePrefixed(prefix string, count *atomic.Int64, found chan<- result, stop <-chan struct{}) {
notBefore := time.Now()
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
template := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(mr.Int63()),
Subject: pkix.Name{
CommonName: "syncthing",
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
}
priv, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for {
select {
case <-stop:
return
default:
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
id := protocol.NewDeviceID(derBytes)
count.Add(1)
if strings.HasPrefix(id.String(), prefix) {
select {
case found <- result{id, priv, derBytes}:
case <-stop:
}
return
}
}
}
func printProgress(prefix string, count *atomic.Int64) {
started := time.Now()
wantBits := 5 * len(prefix)
if wantBits > 63 {
fmt.Printf("Want %d bits for prefix %q, refusing to boil the ocean.\n", wantBits, prefix)
os.Exit(1)
}
expectedIterations := float64(int(1) << uint(wantBits))
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
for range time.NewTicker(15 * time.Second).C {
tried := count.Load()
elapsed := time.Since(started)
rate := float64(tried) / elapsed.Seconds()
expected := timeStr(expectedIterations / rate)
fmt.Printf("Trying %.0f certs/s, tested %d so far in %v, expect ~%s total time to complete\n", rate, tried, elapsed/time.Second*time.Second, expected)
}
}
func saveCert(priv interface{}, derBytes []byte) {
certOut, err := os.Create("cert.pem")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = certOut.Close()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
block, err := pemBlockForKey(priv)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = pem.Encode(keyOut, block)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = keyOut.Close()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func pemBlockForKey(priv interface{}) (*pem.Block, error) {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil
case *ecdsa.PrivateKey:
b, err := x509.MarshalECPrivateKey(k)
if err != nil {
return nil, err
}
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil
default:
return nil, errors.New("unknown key type")
}
}
func timeStr(seconds float64) string {
if seconds < 60 {
return fmt.Sprintf("%.0fs", seconds)
}
if seconds < 3600 {
return fmt.Sprintf("%.0fm", seconds/60)
}
if seconds < 86400 {
return fmt.Sprintf("%.0fh", seconds/3600)
}
if seconds < 86400*365 {
return fmt.Sprintf("%.0f days", seconds/3600)
}
return fmt.Sprintf("%.0f years", seconds/86400/365)
}

View File

@@ -1,98 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"crypto/sha256"
"flag"
"fmt"
"io"
"os"
"time"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
)
func main() {
period := flag.Duration("period", 200*time.Millisecond, "Sleep period between checks")
flag.Parse()
file := flag.Arg(0)
if file == "" {
fmt.Println("Expects a path as an argument")
return
}
exists := true
size := int64(0)
mtime := time.Time{}
var hash [sha256.Size]byte
for {
time.Sleep(*period)
newExists := true
fi, err := os.Stat(file)
if err != nil && os.IsNotExist(err) {
newExists = false
} else if err != nil {
fmt.Println("stat:", err)
return
}
if newExists != exists {
exists = newExists
if !newExists {
fmt.Println(file, "does not exist")
} else {
fmt.Println(file, "appeared")
}
}
if !exists {
size = 0
mtime = time.Time{}
hash = [sha256.Size]byte{}
continue
}
if fi.IsDir() {
fmt.Println(file, "is directory")
return
}
newSize := fi.Size()
newMtime := fi.ModTime()
newHash, err := sha256file(file)
if err != nil {
fmt.Println("sha256file:", err)
}
if newSize != size || newMtime != mtime || newHash != hash {
fmt.Println(file, "Size:", newSize, "Mtime:", newMtime, "Hash:", fmt.Sprintf("%x", newHash))
hash = newHash
size = newSize
mtime = newMtime
}
}
}
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := sha256.New()
io.Copy(h, f)
hb := h.Sum(nil)
copy(hash[:], hb)
return
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,199 +0,0 @@
// Copyright (C) 2023 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"cmp"
"compress/gzip"
"context"
"io"
"log"
"math"
"os"
"path/filepath"
"slices"
"time"
)
type diskStore struct {
dir string
inbox chan diskEntry
maxBytes int64
maxFiles int
currentFiles []currentFile
currentSize int64
}
type diskEntry struct {
path string
data []byte
}
type currentFile struct {
path string
size int64
mtime int64
}
func (d *diskStore) Serve(ctx context.Context) {
if err := os.MkdirAll(d.dir, 0o700); err != nil {
log.Println("Creating directory:", err)
return
}
if err := d.inventory(); err != nil {
log.Println("Failed to inventory disk store:", err)
}
d.clean()
cleanTimer := time.NewTicker(time.Minute)
inventoryTimer := time.NewTicker(24 * time.Hour)
buf := new(bytes.Buffer)
gw := gzip.NewWriter(buf)
for {
select {
case entry := <-d.inbox:
path := d.fullPath(entry.path)
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
log.Println("Creating directory:", err)
continue
}
buf.Reset()
gw.Reset(buf)
if _, err := gw.Write(entry.data); err != nil {
log.Println("Failed to compress crash report:", err)
continue
}
if err := gw.Close(); err != nil {
log.Println("Failed to compress crash report:", err)
continue
}
if err := os.WriteFile(path, buf.Bytes(), 0o600); err != nil {
log.Printf("Failed to write %s: %v", entry.path, err)
_ = os.Remove(path)
continue
}
d.currentSize += int64(buf.Len())
d.currentFiles = append(d.currentFiles, currentFile{
size: int64(len(entry.data)),
path: path,
})
case <-cleanTimer.C:
d.clean()
case <-inventoryTimer.C:
if err := d.inventory(); err != nil {
log.Println("Failed to inventory disk store:", err)
}
case <-ctx.Done():
return
}
}
}
func (d *diskStore) Put(path string, data []byte) bool {
select {
case d.inbox <- diskEntry{
path: path,
data: data,
}:
return true
default:
return false
}
}
func (d *diskStore) Get(path string) ([]byte, error) {
path = d.fullPath(path)
bs, err := os.ReadFile(path)
if err != nil {
return nil, err
}
gr, err := gzip.NewReader(bytes.NewReader(bs))
if err != nil {
return nil, err
}
defer gr.Close()
return io.ReadAll(gr)
}
func (d *diskStore) Exists(path string) bool {
path = d.fullPath(path)
_, err := os.Lstat(path)
return err == nil
}
func (d *diskStore) clean() {
for len(d.currentFiles) > 0 && (len(d.currentFiles) > d.maxFiles || d.currentSize > d.maxBytes) {
f := d.currentFiles[0]
log.Println("Removing", f.path)
if err := os.Remove(f.path); err != nil {
log.Println("Failed to remove file:", err)
}
d.currentFiles = d.currentFiles[1:]
d.currentSize -= f.size
}
var oldest time.Duration
if len(d.currentFiles) > 0 {
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
}
metricDiskstoreFilesTotal.Set(float64(len(d.currentFiles)))
metricDiskstoreBytesTotal.Set(float64(d.currentSize))
metricDiskstoreOldestAgeSeconds.Set(math.Round(oldest.Seconds()))
log.Printf("Clean complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
}
func (d *diskStore) inventory() error {
d.currentFiles = nil
d.currentSize = 0
err := filepath.Walk(d.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if filepath.Ext(path) != ".gz" {
return nil
}
d.currentSize += info.Size()
d.currentFiles = append(d.currentFiles, currentFile{
path: path,
size: info.Size(),
mtime: info.ModTime().Unix(),
})
return nil
})
slices.SortFunc(d.currentFiles, func(a, b currentFile) int {
return cmp.Compare(a.mtime, b.mtime)
})
var oldest time.Duration
if len(d.currentFiles) > 0 {
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
}
metricDiskstoreFilesTotal.Set(float64(len(d.currentFiles)))
metricDiskstoreBytesTotal.Set(float64(d.currentSize))
metricDiskstoreOldestAgeSeconds.Set(math.Round(oldest.Seconds()))
log.Printf("Inventory complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
return err
}
func (d *diskStore) fullPath(path string) string {
return filepath.Join(d.dir, path[0:2], path[2:]) + ".gz"
}

View File

@@ -1,228 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// Command stcrashreceiver is a trivial HTTP server that allows two things:
//
// - uploading files (crash reports) named like a SHA256 hash using a PUT request
// - checking whether such file exists using a HEAD request
//
// Typically this should be deployed behind something that manages HTTPS.
package main
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/alecthomas/kong"
raven "github.com/getsentry/raven-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/ur"
)
const maxRequestSize = 1 << 20 // 1 MiB
type cli struct {
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
MetricsListen string `help:"HTTP listen address for metrics" default:":8081" env:"METRICS_LISTEN_ADDRESS"`
IgnorePatterns string `help:"File containing ignore patterns (regexp)" env:"IGNORE_PATTERNS" type:"existingfile"`
}
func main() {
var params cli
kong.Parse(&params)
mux := http.NewServeMux()
ds := &diskStore{
dir: filepath.Join(params.Dir, "crash_reports"),
inbox: make(chan diskEntry, params.DiskQueue),
maxFiles: params.MaxDiskFiles,
maxBytes: params.MaxDiskSizeMB << 20,
}
go ds.Serve(context.Background())
ss := &sentryService{
dsn: params.DSN,
inbox: make(chan sentryRequest, params.SentryQueue),
}
go ss.Serve(context.Background())
var ip *ignorePatterns
if params.IgnorePatterns != "" {
var err error
ip, err = loadIgnorePatterns(params.IgnorePatterns)
if err != nil {
log.Fatalf("Failed to load ignore patterns: %v", err)
}
}
cr := &crashReceiver{
store: ds,
sentry: ss,
ignore: ip,
}
mux.Handle("/", cr)
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte("OK"))
})
if params.MetricsListen != "" {
mmux := http.NewServeMux()
mmux.Handle("/metrics", promhttp.Handler())
go func() {
if err := http.ListenAndServe(params.MetricsListen, mmux); err != nil {
log.Fatalln("HTTP serve metrics:", err)
}
}()
}
if params.DSN != "" {
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"), ip))
}
log.SetOutput(os.Stdout)
if err := http.ListenAndServe(params.Listen, mux); err != nil {
log.Fatalln("HTTP serve:", err)
}
}
func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
result := "failure"
defer func() {
metricFailureReportsTotal.WithLabelValues(result).Inc()
}()
lr := io.LimitReader(req.Body, maxRequestSize)
bs, err := io.ReadAll(lr)
req.Body.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if ignore.match(bs) {
result = "ignored"
return
}
var reports []ur.FailureReport
err = json.Unmarshal(bs, &reports)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if len(reports) == 0 {
// Shouldn't happen
log.Printf("Got zero failure reports")
return
}
version, err := build.ParseVersion(reports[0].Version)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, r := range reports {
pkt := packet(version, "failure")
pkt.Message = r.Description
pkt.Extra = raven.Extra{
"count": r.Count,
}
for k, v := range r.Extra {
pkt.Extra[k] = v
}
if r.Goroutines != "" {
url, err := saveFailureWithGoroutines(r.FailureData, failureDir)
if err != nil {
log.Println("Saving failure report:", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
pkt.Extra["goroutinesURL"] = url
}
message := sanitizeMessageLDB(r.Description)
pkt.Fingerprint = []string{message}
if err := sendReport(dsn, pkt, userIDFor(req)); err != nil {
log.Println("Failed to send failure report:", err)
} else {
log.Println("Sent failure report:", r.Description)
result = "success"
}
}
}
}
func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string, error) {
bs := make([]byte, len(data.Description)+len(data.Goroutines))
copy(bs, data.Description)
copy(bs[len(data.Description):], data.Goroutines)
id := fmt.Sprintf("%x", sha256.Sum256(bs))
path := fullPathCompressed(failureDir, id)
err := compressAndWrite(bs, path)
if err != nil {
return "", err
}
return reportServer + path, nil
}
type ignorePatterns struct {
patterns []*regexp.Regexp
}
func loadIgnorePatterns(path string) (*ignorePatterns, error) {
bs, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var patterns []*regexp.Regexp
for _, line := range strings.Split(string(bs), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
re, err := regexp.Compile(line)
if err != nil {
return nil, err
}
patterns = append(patterns, re)
}
log.Printf("Loaded %d ignore patterns", len(patterns))
return &ignorePatterns{patterns: patterns}, nil
}
func (i *ignorePatterns) match(report []byte) bool {
if i == nil {
return false
}
for _, re := range i.patterns {
if re.Match(report) {
return true
}
}
return false
}

View File

@@ -1,40 +0,0 @@
// Copyright (C) 2023 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
metricCrashReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "crash_reports_total",
}, []string{"result"})
metricFailureReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "failure_reports_total",
}, []string{"result"})
metricDiskstoreFilesTotal = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_files_total",
})
metricDiskstoreBytesTotal = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_bytes_total",
})
metricDiskstoreOldestAgeSeconds = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "crashreceiver",
Name: "diskstore_oldest_age_seconds",
})
)

View File

@@ -1,242 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"context"
"errors"
"io"
"log"
"regexp"
"strings"
"sync"
raven "github.com/getsentry/raven-go"
"github.com/maruel/panicparse/v2/stack"
"github.com/syncthing/syncthing/lib/build"
)
const reportServer = "https://crash.syncthing.net/report/"
var loader = newGithubSourceCodeLoader()
func init() {
raven.SetSourceCodeLoader(loader)
}
var (
clients = make(map[string]*raven.Client)
clientsMut sync.Mutex
)
type sentryService struct {
dsn string
inbox chan sentryRequest
}
type sentryRequest struct {
reportID string
userID string
data []byte
}
func (s *sentryService) Serve(ctx context.Context) {
for {
select {
case req := <-s.inbox:
pkt, err := parseCrashReport(req.reportID, req.data)
if err != nil {
log.Println("Failed to parse crash report:", err)
continue
}
if err := sendReport(s.dsn, pkt, req.userID); err != nil {
log.Println("Failed to send crash report:", err)
}
case <-ctx.Done():
return
}
}
}
func (s *sentryService) Send(reportID, userID string, data []byte) bool {
select {
case s.inbox <- sentryRequest{reportID, userID, data}:
return true
default:
return false
}
}
func sendReport(dsn string, pkt *raven.Packet, userID string) error {
pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID})
clientsMut.Lock()
defer clientsMut.Unlock()
cli, ok := clients[dsn]
if !ok {
var err error
cli, err = raven.New(dsn)
if err != nil {
return err
}
clients[dsn] = cli
}
// The client sets release and such on the packet before sending, in the
// misguided idea that it knows this better than the packet we give
// it. So we copy the values from the packet to the client first...
cli.SetRelease(pkt.Release)
cli.SetEnvironment(pkt.Environment)
defer cli.Wait()
_, errC := cli.Capture(pkt, nil)
return <-errC
}
func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
parts := bytes.SplitN(report, []byte("\n"), 2)
if len(parts) != 2 {
return nil, errors.New("no first line")
}
version, err := build.ParseVersion(string(parts[0]))
if err != nil {
return nil, err
}
report = parts[1]
foundPanic := false
var subjectLine []byte
for {
parts = bytes.SplitN(report, []byte("\n"), 2)
if len(parts) != 2 {
return nil, errors.New("no panic line found")
}
line := parts[0]
report = parts[1]
if foundPanic {
// The previous line was our "Panic at ..." header. We are now
// at the beginning of the real panic trace and this is our
// subject line.
subjectLine = line
break
} else if bytes.HasPrefix(line, []byte("Panic at")) {
foundPanic = true
}
}
r := bytes.NewReader(report)
ctx, _, err := stack.ScanSnapshot(r, io.Discard, stack.DefaultOpts())
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
}
if ctx == nil || len(ctx.Goroutines) == 0 {
return nil, errors.New("no goroutines found")
}
// Lock the source code loader to the version we are processing here.
if version.Commit != "" {
// We have a commit hash, so we know exactly which source to use
loader.LockWithVersion(version.Commit)
} else if strings.HasPrefix(version.Tag, "v") {
// Lets hope the tag is close enough
loader.LockWithVersion(version.Tag)
} else {
// Last resort
loader.LockWithVersion("main")
}
defer loader.Unlock()
var trace raven.Stacktrace
for _, gr := range ctx.Goroutines {
if gr.First {
trace.Frames = make([]*raven.StacktraceFrame, len(gr.Stack.Calls))
for i, sc := range gr.Stack.Calls {
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name, sc.RemoteSrcPath, sc.Line, 3, nil)
}
break
}
}
pkt := packet(version, "crash")
pkt.Message = string(subjectLine)
pkt.Extra = raven.Extra{
"url": reportServer + path,
}
pkt.Interfaces = []raven.Interface{&trace}
pkt.Fingerprint = crashReportFingerprint(pkt.Message)
return pkt, nil
}
var (
indexRe = regexp.MustCompile(`\[[-:0-9]+\]`)
sizeRe = regexp.MustCompile(`(length|capacity) [0-9]+`)
ldbPosRe = regexp.MustCompile(`(\(pos=)([0-9]+)\)`)
ldbChecksumRe = regexp.MustCompile(`(want=0x)([a-z0-9]+)( got=0x)([a-z0-9]+)`)
ldbFileRe = regexp.MustCompile(`(\[file=)([0-9]+)(\.ldb\])`)
ldbInternalKeyRe = regexp.MustCompile(`(internal key ")[^"]+(", len=)[0-9]+`)
ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `)
)
func sanitizeMessageLDB(message string) string {
message = ldbPosRe.ReplaceAllString(message, "${1}x)")
message = ldbFileRe.ReplaceAllString(message, "${1}x${3}")
message = ldbChecksumRe.ReplaceAllString(message, "${1}X${3}X")
message = ldbInternalKeyRe.ReplaceAllString(message, "${1}x${2}x")
message = ldbPathRe.ReplaceAllString(message, "$1 x: ")
return message
}
func crashReportFingerprint(message string) []string {
// Do not fingerprint on the stack in case of db corruption or fatal
// db io error - where it occurs doesn't matter.
orig := message
message = sanitizeMessageLDB(message)
if message != orig {
return []string{message}
}
message = indexRe.ReplaceAllString(message, "[x]")
message = sizeRe.ReplaceAllString(message, "$1 x")
// {{ default }} is what sentry uses as a fingerprint by default. While
// never specified, the docs point at this being some hash derived from the
// stack trace. Here we include the filtered panic message on top of that.
// https://docs.sentry.io/platforms/go/data-management/event-grouping/sdk-fingerprinting/#basic-example
return []string{"{{ default }}", message}
}
func packet(version build.VersionParts, reportType string) *raven.Packet {
pkt := &raven.Packet{
Platform: "go",
Release: version.Tag,
Environment: version.Environment(),
Tags: raven.Tags{
raven.Tag{Key: "version", Value: version.Version},
raven.Tag{Key: "tag", Value: version.Tag},
raven.Tag{Key: "codename", Value: version.Codename},
raven.Tag{Key: "runtime", Value: version.Runtime},
raven.Tag{Key: "goos", Value: version.GOOS},
raven.Tag{Key: "goarch", Value: version.GOARCH},
raven.Tag{Key: "builder", Value: version.Builder},
raven.Tag{Key: "report_type", Value: reportType},
},
}
if version.Commit != "" {
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.Commit})
}
for _, tag := range version.Extra {
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
}
return pkt
}

View File

@@ -1,95 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"fmt"
"os"
"testing"
)
func TestParseReport(t *testing.T) {
bs, err := os.ReadFile("_testdata/panic.log")
if err != nil {
t.Fatal(err)
}
pkt, err := parseCrashReport("1/2/345", bs)
if err != nil {
t.Fatal(err)
}
bs, err = pkt.JSON()
if err != nil {
t.Fatal(err)
}
fmt.Printf("%s\n", bs)
}
func TestCrashReportFingerprint(t *testing.T) {
cases := []struct {
message, exp string
ldb bool
}{
{
message: "panic: leveldb/table: corruption on data-block (pos=51308946): checksum mismatch, want=0xa89f9aa0 got=0xd27cc4c7 [file=004003.ldb]",
exp: "panic: leveldb/table: corruption on data-block (pos=x): checksum mismatch, want=0xX got=0xX [file=x.ldb]",
ldb: true,
},
{
message: "panic: leveldb/table: corruption on table-footer (pos=248): bad magic number [file=001370.ldb]",
exp: "panic: leveldb/table: corruption on table-footer (pos=x): bad magic number [file=x.ldb]",
ldb: true,
},
{
message: "panic: runtime error: slice bounds out of range [4294967283:4194304]",
exp: "panic: runtime error: slice bounds out of range [x]",
},
{
message: "panic: runtime error: slice bounds out of range [-2:]",
exp: "panic: runtime error: slice bounds out of range [x]",
},
{
message: "panic: runtime error: slice bounds out of range [:4294967283] with capacity 32768",
exp: "panic: runtime error: slice bounds out of range [x] with capacity x",
},
{
message: "panic: runtime error: index out of range [0] with length 0",
exp: "panic: runtime error: index out of range [x] with length x",
},
{
message: `panic: leveldb: internal key "\x01", len=1: invalid length`,
exp: `panic: leveldb: internal key "x", len=x: invalid length`,
ldb: true,
},
{
message: `panic: write /var/syncthing/config/index-v0.14.0.db/2732813.log: cannot allocate memory`,
exp: `panic: write x: cannot allocate memory`,
ldb: true,
},
{
message: `panic: filling Blocks: read C:\Users\Serv-Resp-Tizayuca\AppData\Local\Syncthing\index-v0.14.0.db\006561.ldb: Error de datos (comprobación de redundancia cíclica).`,
exp: `panic: filling Blocks: read x: Error de datos (comprobación de redundancia cíclica).`,
ldb: true,
},
}
for i, tc := range cases {
fingerprint := crashReportFingerprint(tc.message)
expLen := 2
if tc.ldb {
expLen = 1
}
if l := len(fingerprint); l != expLen {
t.Errorf("tc %v: Unexpected fingerprint length: %v != %v", i, l, expLen)
} else if msg := fingerprint[expLen-1]; msg != tc.exp {
t.Errorf("tc %v:\n\"%v\" !=\n\"%v\"", i, msg, tc.exp)
}
}
}

View File

@@ -1,117 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"sync"
"time"
)
const (
urlPrefix = "https://raw.githubusercontent.com/syncthing/syncthing/"
httpTimeout = 10 * time.Second
)
type githubSourceCodeLoader struct {
mut sync.Mutex
version string
cache map[string]map[string][][]byte // version -> file -> lines
client *http.Client
}
func newGithubSourceCodeLoader() *githubSourceCodeLoader {
return &githubSourceCodeLoader{
cache: make(map[string]map[string][][]byte),
client: &http.Client{Timeout: httpTimeout},
}
}
func (l *githubSourceCodeLoader) LockWithVersion(version string) {
l.mut.Lock()
l.version = version
if _, ok := l.cache[version]; !ok {
l.cache[version] = make(map[string][][]byte)
}
}
func (l *githubSourceCodeLoader) Unlock() {
l.mut.Unlock()
}
func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]byte, int) {
filename = filepath.ToSlash(filename)
lines, ok := l.cache[l.version][filename]
if !ok {
// Cache whatever we managed to find (or nil if nothing, so we don't try again)
defer func() {
l.cache[l.version][filename] = lines
}()
knownPrefixes := []string{"/lib/", "/cmd/"}
var idx int
for _, pref := range knownPrefixes {
idx = strings.Index(filename, pref)
if idx >= 0 {
break
}
}
if idx == -1 {
return nil, 0
}
url := urlPrefix + l.version + filename[idx:]
resp, err := l.client.Get(url)
if err != nil {
fmt.Println("Loading source:", err)
return nil, 0
}
if resp.StatusCode != http.StatusOK {
fmt.Println("Loading source:", resp.Status)
return nil, 0
}
data, err := io.ReadAll(resp.Body)
_ = resp.Body.Close()
if err != nil {
fmt.Println("Loading source:", err.Error())
return nil, 0
}
lines = bytes.Split(data, []byte{'\n'})
}
return getLineFromLines(lines, line, context)
}
func getLineFromLines(lines [][]byte, line, context int) ([][]byte, int) {
if lines == nil {
// cached error from ReadFile: return no lines
return nil, 0
}
line-- // stack trace lines are 1-indexed
start := line - context
var idx int
if start < 0 {
start = 0
idx = line
} else {
idx = context
}
end := line + context + 1
if line >= len(lines) {
return nil, 0
}
if end > len(lines) {
end = len(lines)
}
return lines[start:end], idx
}

View File

@@ -1,133 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"io"
"log"
"net/http"
"path"
"strings"
"sync"
)
type crashReceiver struct {
store *diskStore
sentry *sentryService
ignore *ignorePatterns
ignoredMut sync.RWMutex
ignored map[string]struct{}
}
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// The final path component should be a SHA256 hash in hex, so 64 hex
// characters. We don't care about case on the request but use lower
// case internally.
reportID := strings.ToLower(path.Base(req.URL.Path))
if len(reportID) != 64 {
http.Error(w, "Bad request", http.StatusBadRequest)
return
}
for _, c := range reportID {
if c >= 'a' && c <= 'f' {
continue
}
if c >= '0' && c <= '9' {
continue
}
http.Error(w, "Bad request", http.StatusBadRequest)
return
}
switch req.Method {
case http.MethodGet:
r.serveGet(reportID, w, req)
case http.MethodHead:
r.serveHead(reportID, w, req)
case http.MethodPut:
r.servePut(reportID, w, req)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
// serveGet responds to GET requests by serving the uncompressed report.
func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http.Request) {
bs, err := r.store.Get(reportID)
if err != nil {
http.Error(w, "Not found", http.StatusNotFound)
return
}
w.Write(bs)
}
// serveHead responds to HEAD requests by checking if the named report
// already exists in the system.
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
r.ignoredMut.RLock()
_, ignored := r.ignored[reportID]
r.ignoredMut.RUnlock()
if ignored {
return // found
}
if !r.store.Exists(reportID) {
http.Error(w, "Not found", http.StatusNotFound)
}
}
// servePut accepts and stores the given report.
func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *http.Request) {
result := "receive_failure"
defer func() {
metricCrashReportsTotal.WithLabelValues(result).Inc()
}()
r.ignoredMut.RLock()
_, ignored := r.ignored[reportID]
r.ignoredMut.RUnlock()
if ignored {
result = "ignored_cached"
io.Copy(io.Discard, req.Body)
return // found
}
// Read at most maxRequestSize of report data.
log.Println("Receiving report", reportID)
lr := io.LimitReader(req.Body, maxRequestSize)
bs, err := io.ReadAll(lr)
if err != nil {
log.Println("Reading report:", err)
http.Error(w, "Internal server error", http.StatusInternalServerError)
return
}
if r.ignore.match(bs) {
r.ignoredMut.Lock()
if r.ignored == nil {
r.ignored = make(map[string]struct{})
}
r.ignored[reportID] = struct{}{}
r.ignoredMut.Unlock()
result = "ignored"
return
}
result = "success"
// Store the report
if !r.store.Put(reportID, bs) {
log.Println("Failed to store report (queue full):", reportID)
result = "queue_failure"
}
// Send the report to Sentry
if !r.sentry.Send(reportID, userIDFor(req), bs) {
log.Println("Failed to send report to sentry (queue full):", reportID)
result = "sentry_failure"
}
}

View File

@@ -1,56 +0,0 @@
// Copyright (C) 2021 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"net"
"net/http"
"os"
"path/filepath"
"time"
)
// userIDFor returns a string we can use as the user ID for the purpose of
// counting affected users. It's the truncated hash of a salt, the user
// remote IP, and the current month.
func userIDFor(req *http.Request) string {
addr := req.RemoteAddr
if fwd := req.Header.Get("X-Forwarded-For"); fwd != "" {
addr = fwd
}
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
now := time.Now().Format("200601")
salt := "stcrashreporter"
hash := sha256.Sum256([]byte(salt + addr + now))
return hex.EncodeToString(hash[:8])
}
// 01234567890abcdef... => 01/23
func dirFor(base string) string {
return filepath.Join(base[0:2], base[2:4])
}
func fullPathCompressed(root, reportID string) string {
return filepath.Join(root, dirFor(reportID), reportID) + ".gz"
}
func compressAndWrite(bs []byte, fullPath string) error {
// Compress the report for storage
buf := new(bytes.Buffer)
gw := gzip.NewWriter(buf)
_, _ = gw.Write(bs) // can't fail
gw.Close()
// Create an output file with the compressed report
return os.WriteFile(fullPath, buf.Bytes(), 0o644)
}

View File

@@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 The Syncthing Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,23 +0,0 @@
# relaypoolsrv
This is the relay pool server for the `syncthing` project, which allows
community hosted [relaysrv](https://github.com/syncthing/relaysrv)'s to join
the public pool.
Servers that join the pool are then advertised to users of `syncthing` as
potential connection points for those who are unable to connect directly due
to NAT or firewall issues.
There is very little reason why you'd want to run this yourself, as
`relaypoolsrv` is just used for announcement and lookup of public relay
servers. If you are looking to set up a private or a public relay, please
check the documentation for
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
to join the default public pool.
See `relaypoolsrv -help` for configuration options.
##### Third-party attributions
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).

View File

@@ -1 +0,0 @@
gui.files.go

View File

@@ -1,10 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
//go:generate go run ../../../../script/genassets.go -o gui.files.go ../gui
// Package auto contains auto generated files for web assets.
package auto

View File

@@ -1,16 +0,0 @@
// Copyright (C) 2021 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
//go:build noassets
// +build noassets
package auto
import "github.com/syncthing/syncthing/lib/assets"
func Assets() map[string]assets.Asset {
return nil
}

View File

@@ -1,402 +0,0 @@
<!DOCTYPE html>
<html lang="en" ng-app="syncthing" ng-controller="relayDataController">
<head>
<meta charset="utf-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=edge"/>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta name="description" content=""/>
<meta name="author" content=""/>
<title>Relay stats</title>
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.6.0/dist/leaflet.css"
integrity="sha512-xwE/Az9zrjBIphAcBb3F6JVqxf46+CDLwfLMHloNu6KEQCAWi6HcDUbeOfBIptF7tcCzusKFjFw2yuvEpDL9wQ=="
crossorigin=""/>
<script src="https://unpkg.com/leaflet@1.6.0/dist/leaflet.js"
integrity="sha512-gZwIG9x3wUXg2hdXF6+rVkLF/0Vi9U8D2Ntg4Ga5I5BZpVkVxlJWbSQtXPSiUTtC0TjtGOmxa1AJPuV0CPthew=="
crossorigin=""></script>
<style>
#map {
height: 600px;
}
.ng-cloak {
display: none;
}
table {
font-size: 11px !important;
width: 100%;
border: 1px;
}
td {
padding: 0px !important;
}
tfoot td {
font-weight: bold;
}
</style>
</head>
<body class="ng-cloak">
<div class="container">
<h1>Relay Pool Data</h1>
<div ng-if="relays === undefined" class="text-center">
<img src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
<p>Please wait while we gather data…</p>
</div>
<div>
<div ng-show="relays !== undefined" class="ng-hide">
<p>
The relays listed on this page are not managed or vetted by the Syncthing project.
Each relay is the responsibility of the relay operator.
Currently {{ relays.length }} relays are online.
</p>
</div>
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
<p>The circle size represents how much bytes the relay has transferred relatively to other relays.</p>
</div>
<div>
<table class="table table-striped table-condensed table">
<thead>
<tr>
<th rowspan="2">Address</td>
<th rowspan="2">
<a ng-click="sortType = 'stats.numActiveSessions'; sortReverse = !sortReverse">
Sessions
<span ng-show="sortType == 'stats.numActiveSessions' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.numActiveSessions' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th rowspan="2">
<a ng-click="sortType = 'stats.numConnections'; sortReverse = !sortReverse">
Connections
<span ng-show="sortType == 'stats.numConnections' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.numConnections' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th rowspan="2">
<a ng-click="sortType = 'stats.bytesProxied'; sortReverse = !sortReverse">
Data relayed
<span ng-show="sortType == 'stats.bytesProxied' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.bytesProxied' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th colspan="6" class="text-center">Transfer rate in the last period</th>
<th rowspan="2">
<a ng-click="sortType = 'stats.uptimeSeconds'; sortReverse = !sortReverse">
Uptime hours
<span ng-show="sortType == 'stats.uptimeSeconds' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'status.uptimeSeconds' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th rowspan="2">
<a ng-click="sortType = 'stats.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
Provided by
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
</tr>
<tr>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[0]'; sortReverse = !sortReverse">
10s
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[1]'; sortReverse = !sortReverse">
1m
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[2]'; sortReverse = !sortReverse">
5m
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[3]'; sortReverse = !sortReverse">
15m
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[4]'; sortReverse = !sortReverse">
30m
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
<th>
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[5]'; sortReverse = !sortReverse">
60m
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && !sortReverse" class="fas fa-caret-down"></span>
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && sortReverse" class="fas fa-caret-up"></span>
</a>
</th>
</tr>
</thead>
<tbody>
<tr ng-repeat="relay in relays | orderBy:sortType:sortReverse:sortCompare" ng-mouseover="relay.showMarker()" ng-mouseleave="relay.hideMarker()">
<td>{{ relay.address }}</td>
<td ng-if="!relay.stats" colspan="11"></td>
<td ng-if-start="relay.stats">{{ relay.stats.numActiveSessions }}</td>
<td>{{ relay.stats.numConnections }}</td>
<td>{{ relay.stats.bytesProxied | bytes }}</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
<td>{{ relay.stats.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
<td ng-if="relay.stats.uptimeSeconds != undefined">{{ relay.stats.uptimeSeconds/60/60 | number:0 }}</td>
<td ng-if="relay.stats.uptimeSeconds == undefined"></td>
<td title="{{ relay.stats.options['provided-by'] || '' }}" ng-if-end>
{{ relay.stats.options['provided-by'] || '' | limitTo:50 }}
<span ng-if="(relay.stats.options['provided-by'] || '').length > 50">&hellip;
</td>
</tr>
</tbody>
<tfoot>
<tr>
<td>Totals</td>
<td>{{ totals.numActiveSessions }}</td>
<td>{{ totals.numConnections }}</td>
<td>{{ totals.bytesProxied | bytes }}</td>
<td>{{ totals.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
<td>{{ totals.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
<td>{{ totals.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
<td>{{ totals.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
<td>{{ totals.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
<td>{{ totals.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
<td>{{ totals.uptimeSeconds/60/60 | number:0 }} hours</td>
<td>{{ relays.length }} relays</td>
</tr>
</tfoor>
</table>
</div>
<hr>
<p>
This product includes GeoLite2 data created by MaxMind, available from
<a href="https://www.maxmind.com">https://www.maxmind.com</a>.
</p>
</div>
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.4.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
<script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
</body>
<script>
angular.module('syncthing', [
])
.config(['$httpProvider', function($httpProvider) {
$httpProvider.defaults.timeout = 5000;
}])
.filter('bytes', function() {
return function(bytes, precision) {
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
if (typeof precision === 'undefined') precision = 1;
var units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
number = Math.floor(Math.log(bytes) / Math.log(1024));
var value = (bytes / Math.pow(1000, Math.floor(number)));
if (!isFinite(value)) {
value = 0;
precision = 0;
}
if (!isFinite(number)) {
units = 'bytes';
} else {
units = units[number];
}
return value.toFixed(precision) + ' ' + units;
}
})
.controller('relayDataController', ['$scope', '$rootScope', '$http', '$q', '$compile', '$timeout', function($scope, $rootScope, $http, $q, $compile, $timeout) {
$scope.totals = {
bytesProxied: 0,
goMaxProcs: 0,
kbps10s1m5m15m30m60m: [0, 0, 0, 0, 0, 0],
numActiveSessions: 0,
numConnections: 0,
numPendingSessionKeys: 0,
numProxies: 0,
uptimeSeconds: 0,
};
$scope.map = L.map('map').setView([40.90296, 1.90925], 2);
L.tileLayer('https://tile.openstreetmap.org/{z}/{x}/{y}.png',
{
attribution: 'Leaflet',
maxZoom: 17
}).addTo($scope.map);
$scope.tooltipTemplate = $('#infoTemplate').html();
$scope.usedLocations = {};
$scope.sortType = 'stats.numActiveSessions';
$scope.sortReverse = true;
$scope.sortCompare = function(a, b) {
if (a.value == b.value) {
return 0;
}
if (a.type == "undefined" || a.type == "null") {
return -1;
}
if (b.type == "undefined" || b.type == "null") {
return 1;
}
return a.value > b.value ? 1 : -1;
}
$http.get("/endpoint/full").then(function(response) {
$scope.relays = response.data.relays;
angular.forEach($scope.relays, function(relay) {
relay.uri = constructURI(relay.url);
relay.address = relay.url.split('/')[2];
addMarkerToMap(relay);
if (relay.stats) {
angular.forEach($scope.totals, function(value, key) {
if (typeof $scope.totals[key] == 'number') {
$scope.totals[key] += relay.stats[key];
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
angular.forEach($scope.totals[key], function(value, index) {
$scope.totals[key][index] += relay.stats[key][index];
});
}
});
}
});
// After the totals were calculated, add circles.
angular.forEach($scope.relays, function(relay) {
if (relay.stats) {
addCircleToMap(relay);
}
});
if ($scope.relays.length == 1) {
//Center to only relay with zoom
$scope.map.panTo(new L.LatLng(relays[0].location.latitude, relays[0].location.longitude));
$scope.map.setZoom(13);
}
});
function addMarkerToMap(relay) {
var loc = relay.location.latitude + "," + relay.location.longitude;
// Deal with overlapping markers
while (loc in $scope.usedLocations) {
var locParts = loc.split(',');
locParts = [parseFloat(locParts[0]), parseFloat(locParts[1])];
locParts[Math.round(Math.random())] += 0.5 * (Math.random() >= 0.5 ? 1 : -1);
loc = locParts.join(',');
}
$scope.usedLocations[loc] = true;
var locParts = loc.split(',');
relay.marker = new L.Marker([relay.location.latitude, relay.location.longitude],{
title: relay.url,
});
var scope = $rootScope.$new(true);
scope.relay = relay;
var icon = new L.Icon({
iconSize: [18, 28], // size of the icon
iconAnchor: [9, 28], // point of the icon which will correspond to marker's location
shadowAnchor: [0, 0], // the same for the shadow
popupAnchor: [0, -27], // popup anchor
shadowSize: [0,0],
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-red.png',
shadowUrl: 'https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/images/marker-shadow.png',
});
relay.marker = new L.marker(new L.latLng(locParts[0], locParts[1]),{icon})
.bindPopup($compile($scope.tooltipTemplate)(scope)[0],{})
.on('mouseover', function (e) {
this.openPopup();
}).on('mouseout', function (e) {
this.closePopup();
}).addTo($scope.map);
relay.showMarker = function() {
relay.marker.openPopup();
}
relay.hideMarker = function() {
relay.marker.closePopup();
}
}
function addCircleToMap(relay) {
console.log(relay.location.latitude)
L.circle([relay.location.latitude, relay.location.longitude],
{
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
color: "FF0000",
fillColor: "#FF0000",
fillOpacity: 0.35,
}).addTo($scope.map);
}
function constructURI(url) {
var uri = document.createElement('a');
// HAX, otherwise doesn't work
uri.href = url.replace('relay://', 'http://');
// Convert query string to object
uri.args = {};
angular.forEach(uri.search.replace(/^\?/, '').split('&'), function(query) {
var split = query.split('=');
uri.args[split[0]] = split[1];
});
return uri;
}
}]);
</script>
<script type="text/template" id="infoTemplate">
<div>
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.stats.options['provided-by']">provided by <u>{{ relay.stats.options['provided-by'] }}</u></span></p>
<div ng-if="relay.stats">
<span ng-if="relay.stats.startTime">Start time: {{ relay.stats.startTime | date:"medium" }}</br></span>
<span ng-if="relay.stats.bytesProxied != undefined">Proxied: {{ relay.stats.bytesProxied | bytes }}</br></span>
<span ng-if="relay.stats.numActiveSessions != undefined">Sessions: {{ relay.stats.numActiveSessions }}</br></span>
<span ng-if="relay.stats.numConnections != undefined">Clients: {{ relay.stats.numConnections }}</br></span>
<span ng-if="relay.stats.options.pools">Pools: {{ relay.stats.options.pools.join(', ') }}</br></span>
<span ng-if="relay.stats.options['global-rate'] != undefined">
<span ng-if="relay.stats.options['global-rate'] > 0">Global rate limit: {{ relay.stats.options['global-rate'] | bytes }}/s</span>
<span ng-if="relay.stats.options['global-rate'] == 0">Global rate limit: unlimited</span>
<br/>
</span>
<span ng-if="relay.stats.options['per-session-rate'] != undefined">
<span ng-if="relay.stats.options['per-session-rate'] > 0">Session rate limit: {{ relay.stats.options['per-session-rate'] | bytes }}/s</span>
<span ng-if="relay.stats.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
<br/>
</span>
</div>
<div ng-if="!relay.stats">
Data unavailable.
<div>
</div>
</script>
</html>

View File

@@ -1,719 +0,0 @@
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"log"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto"
"github.com/syncthing/syncthing/lib/assets"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/geoip"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/relay/client"
"github.com/syncthing/syncthing/lib/tlsutil"
)
type location struct {
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
City string `json:"city"`
Country string `json:"country"`
Continent string `json:"continent"`
}
type relay struct {
URL string `json:"url"`
Location location `json:"location"`
uri *url.URL
Stats *stats `json:"stats"`
StatsRetrieved time.Time `json:"statsRetrieved"`
}
type relayShort struct {
URL string `json:"url"`
}
type stats struct {
StartTime time.Time `json:"startTime"`
UptimeSeconds int `json:"uptimeSeconds"`
PendingSessionKeys int `json:"numPendingSessionKeys"`
ActiveSessions int `json:"numActiveSessions"`
Connections int `json:"numConnections"`
Proxies int `json:"numProxies"`
BytesProxied int `json:"bytesProxied"`
GoVersion string `json:"goVersion"`
GoOS string `json:"goOS"`
GoArch string `json:"goArch"`
GoMaxProcs int `json:"goMaxProcs"`
GoRoutines int `json:"goNumRoutine"`
Rates []int64 `json:"kbps10s1m5m15m30m60m"`
Options struct {
NetworkTimeout int `json:"network-timeout"`
PintInterval int `json:"ping-interval"`
MessageTimeout int `json:"message-timeout"`
SessionRate int `json:"per-session-rate"`
GlobalRate int `json:"global-rate"`
Pools []string `json:"pools"`
ProvidedBy string `json:"provided-by"`
} `json:"options"`
}
func (r relay) String() string {
return r.URL
}
type request struct {
relay *relay
result chan result
queueTimer *prometheus.Timer
}
type result struct {
err error
eviction time.Duration
}
var (
testCert tls.Certificate
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
listen = ":80"
metricsListen = ":8081"
dir string
evictionTime = time.Hour
debug bool
permRelaysFile string
ipHeader string
proto string
statsRefresh = time.Minute
requestQueueLen = 64
requestProcessors = 8
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
maxRelaysReturned = 100
requests chan request
mut sync.RWMutex
knownRelays = make([]*relay, 0)
permanentRelays = make([]*relay, 0)
evictionTimers = make(map[string]*time.Timer)
globalBlocklist = newErrorTracker(1000)
)
const (
httpStatusEnhanceYourCalm = 429
)
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(log.Lshortfile)
flag.StringVar(&listen, "listen", listen, "Listen address")
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
flag.IntVar(&maxRelaysReturned, "max-relays-returned", maxRelaysReturned, "Maximum number of relays returned for a normal endpoint query")
flag.Parse()
requests = make(chan request, requestQueueLen)
geoip, err := geoip.NewGeoLite2CityProvider(context.Background(), geoipAccountID, geoipLicenseKey, os.TempDir())
if err != nil {
log.Fatalln("Failed to create GeoIP provider:", err)
}
go geoip.Serve(context.TODO())
var listener net.Listener
if permRelaysFile != "" {
permanentRelays = loadRelays(permRelaysFile, geoip)
}
testCert = createTestCertificate()
for range requestProcessors {
go requestProcessor(geoip)
}
// Load relays from cache in the background.
// Load them in a serial fashion to make sure any genuine requests
// are not dropped.
go func() {
for _, relay := range loadRelays(knownRelaysFile, geoip) {
resultChan := make(chan result)
requests <- request{relay, resultChan, nil}
result := <-resultChan
if result.err != nil {
relayTestsTotal.WithLabelValues("failed").Inc()
} else {
relayTestsTotal.WithLabelValues("success").Inc()
}
}
// Run the stats refresher once the relays are loaded.
statsRefresher(statsRefresh)
}()
if dir != "" {
if debug {
log.Println("Starting TLS listener on", listen)
}
certFile, keyFile := filepath.Join(dir, "http-cert.pem"), filepath.Join(dir, "http-key.pem")
var cert tls.Certificate
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
log.Fatalln("Failed to load HTTP X509 key pair:", err)
}
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS10, // No SSLv3
ClientAuth: tls.RequestClientCert,
CipherSuites: []uint16{
// No RC4
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
},
}
listener, err = tls.Listen(proto, listen, tlsCfg)
} else {
if debug {
log.Println("Starting plain listener on", listen)
}
listener, err = net.Listen(proto, listen)
}
if err != nil {
log.Fatalln("listen:", err)
}
if metricsListen != "" {
mmux := http.NewServeMux()
mmux.HandleFunc("/metrics", handleMetrics)
go func() {
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
log.Fatalln("HTTP serve metrics:", err)
}
}()
}
getMux := http.NewServeMux()
getMux.HandleFunc("/", handleAssets)
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
postMux := http.NewServeMux()
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet, http.MethodHead, http.MethodOptions:
getMux.ServeHTTP(w, r)
case http.MethodPost:
postMux.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
srv := http.Server{
Handler: handler,
ReadTimeout: 10 * time.Second,
}
srv.SetKeepAlivesEnabled(false)
err = srv.Serve(listener)
if err != nil {
log.Fatalln("serve:", err)
}
}
func handleMetrics(w http.ResponseWriter, r *http.Request) {
timer := prometheus.NewTimer(metricsRequestsSeconds)
// Acquire the mutex just to make sure we're not caught mid-way stats collection
mut.RLock()
promhttp.Handler().ServeHTTP(w, r)
mut.RUnlock()
timer.ObserveDuration()
}
func handleAssets(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache, must-revalidate")
path := r.URL.Path[1:]
if path == "" {
path = "index.html"
}
as, ok := auto.Assets()[path]
if !ok {
w.WriteHeader(http.StatusNotFound)
return
}
assets.Serve(w, r, as)
}
func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
w = NewLoggingResponseWriter(w)
defer func() {
timer.ObserveDuration()
lw := w.(*loggingResponseWriter)
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
}()
next(w, r)
}
}
// handleEndpointFull returns the relay list with full metadata and
// statistics. Large, and expensive.
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Header().Set("Access-Control-Allow-Origin", "*")
mut.RLock()
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
n := copy(relays, permanentRelays)
copy(relays[n:], knownRelays)
mut.RUnlock()
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
"relays": relays,
})
}
// handleEndpointShort returns the relay list with only the URL.
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Header().Set("Access-Control-Allow-Origin", "*")
mut.RLock()
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
for _, r := range append(permanentRelays, knownRelays...) {
relays = append(relays, relayShort{URL: slimURL(r.URL)})
}
mut.RUnlock()
if len(relays) > maxRelaysReturned {
rand.Shuffle(relays)
relays = relays[:maxRelaysReturned]
}
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
"relays": relays,
})
}
func handleRegister(w http.ResponseWriter, r *http.Request) {
// Get the IP address of the client
rhost := r.RemoteAddr
if ipHeader != "" {
hdr := r.Header.Get(ipHeader)
fields := strings.Split(hdr, ",")
if len(fields) > 0 {
rhost = strings.TrimSpace(fields[len(fields)-1])
}
}
if host, _, err := net.SplitHostPort(rhost); err == nil {
rhost = host
}
// Check the black list. A client is blacklisted if their last 10
// attempts to join have all failed. The "Unauthorized" status return
// causes strelaysrv to cease attempting to join.
if globalBlocklist.IsBlocked(rhost) {
log.Println("Rejected blocked client", rhost)
http.Error(w, "Too many errors", http.StatusUnauthorized)
globalBlocklist.ClearErrors(rhost)
return
}
var relayCert *x509.Certificate
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
relayCert = r.TLS.PeerCertificates[0]
log.Printf("Got TLS cert from relay server")
}
var newRelay relay
err := json.NewDecoder(r.Body).Decode(&newRelay)
r.Body.Close()
if err != nil {
if debug {
log.Println("Failed to parse payload")
}
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
uri, err := url.Parse(newRelay.URL)
if err != nil {
if debug {
log.Println("Failed to parse URI", newRelay.URL)
}
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Canonicalize the URL. In particular, parse and re-encode the query
// string so that it's guaranteed to be valid.
uri.RawQuery = uri.Query().Encode()
newRelay.URL = uri.String()
if relayCert != nil {
advertisedId := uri.Query().Get("id")
idFromCert := protocol.NewDeviceID(relayCert.Raw).String()
if advertisedId != idFromCert {
log.Println("Warning: Relay server requested to join with an ID different from the join request, rejecting")
http.Error(w, "mismatched advertised id and join request cert", http.StatusBadRequest)
return
}
}
host, port, err := net.SplitHostPort(uri.Host)
if err != nil {
if debug {
log.Println("Failed to split URI", newRelay.URL)
}
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
ip := net.ParseIP(host)
// The client did not provide an IP address, use the IP address of the client.
if ip == nil || ip.IsUnspecified() {
uri.Host = net.JoinHostPort(rhost, port)
newRelay.URL = uri.String()
} else if host != rhost && relayCert == nil {
if debug {
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
}
http.Error(w, fmt.Sprintf("IP advertised %s does not match client IP %s", host, rhost), http.StatusUnauthorized)
return
}
newRelay.uri = uri
for _, current := range permanentRelays {
if current.uri.Host == newRelay.uri.Host {
if debug {
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
}
http.Error(w, "Invalid request", http.StatusBadRequest)
return
}
}
reschan := make(chan result)
select {
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
result := <-reschan
if result.err != nil {
log.Println("Join from", r.RemoteAddr, "failed:", result.err)
globalBlocklist.AddError(rhost)
relayTestsTotal.WithLabelValues("failed").Inc()
http.Error(w, result.err.Error(), http.StatusBadRequest)
return
}
log.Println("Join from", r.RemoteAddr, "succeeded")
globalBlocklist.ClearErrors(rhost)
relayTestsTotal.WithLabelValues("success").Inc()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]time.Duration{
"evictionIn": result.eviction,
})
default:
relayTestsTotal.WithLabelValues("dropped").Inc()
if debug {
log.Println("Dropping request")
}
w.WriteHeader(httpStatusEnhanceYourCalm)
}
}
func requestProcessor(geoip *geoip.Provider) {
for request := range requests {
if request.queueTimer != nil {
request.queueTimer.ObserveDuration()
}
timer := prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("test"))
handleRelayTest(request, geoip)
timer.ObserveDuration()
}
}
func handleRelayTest(request request, geoip *geoip.Provider) {
if debug {
log.Println("Request for", request.relay)
}
if err := client.TestRelay(context.TODO(), request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3); err != nil {
if debug {
log.Println("Test for relay", request.relay, "failed:", err)
}
request.result <- result{err, 0}
return
}
stats := fetchStats(request.relay)
location := getLocation(request.relay.uri.Host, geoip)
mut.Lock()
if stats != nil {
updateMetrics(request.relay.uri.Host, *stats, location)
}
request.relay.Stats = stats
request.relay.StatsRetrieved = time.Now().Truncate(time.Second)
request.relay.Location = location
timer, ok := evictionTimers[request.relay.uri.Host]
if ok {
if debug {
log.Println("Stopping existing timer for", request.relay)
}
timer.Stop()
}
for i, current := range knownRelays {
if current.uri.Host == request.relay.uri.Host {
if debug {
log.Println("Relay", request.relay, "already exists")
}
// Evict the old entry anyway, as configuration might have changed.
last := len(knownRelays) - 1
knownRelays[i] = knownRelays[last]
knownRelays = knownRelays[:last]
goto found
}
}
if debug {
log.Println("Adding new relay", request.relay)
}
found:
knownRelays = append(knownRelays, request.relay)
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
mut.Unlock()
if err := saveRelays(knownRelaysFile, knownRelays); err != nil {
log.Println("Failed to write known relays: " + err.Error())
}
request.result <- result{nil, evictionTime}
}
func evict(relay *relay) func() {
return func() {
mut.Lock()
defer mut.Unlock()
if debug {
log.Println("Evicting", relay)
}
for i, current := range knownRelays {
if current.uri.Host == relay.uri.Host {
if debug {
log.Println("Evicted", relay)
}
last := len(knownRelays) - 1
knownRelays[i] = knownRelays[last]
knownRelays = knownRelays[:last]
deleteMetrics(current.uri.Host)
}
}
delete(evictionTimers, relay.uri.Host)
}
}
func loadRelays(file string, geoip *geoip.Provider) []*relay {
content, err := os.ReadFile(file)
if err != nil {
log.Println("Failed to load relays: " + err.Error())
return nil
}
var relays []*relay
for _, line := range strings.Split(string(content), "\n") {
if line == "" {
continue
}
uri, err := url.Parse(line)
if err != nil {
if debug {
log.Println("Skipping relay", line, "due to parse error", err)
}
continue
}
relays = append(relays, &relay{
URL: line,
Location: getLocation(uri.Host, geoip),
uri: uri,
})
if debug {
log.Println("Adding relay", line)
}
}
return relays
}
func saveRelays(file string, relays []*relay) error {
var content string
for _, relay := range relays {
content += relay.uri.String() + "\n"
}
return os.WriteFile(file, []byte(content), 0o777)
}
func createTestCertificate() tls.Certificate {
tmpDir, err := os.MkdirTemp("", "relaypoolsrv")
if err != nil {
log.Fatal(err)
}
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365, false)
if err != nil {
log.Fatalln("Failed to create test X509 key pair:", err)
}
return cert
}
func getLocation(host string, geoip *geoip.Provider) location {
timer := prometheus.NewTimer(locationLookupSeconds)
defer timer.ObserveDuration()
addr, err := net.ResolveTCPAddr("tcp", host)
if err != nil {
return location{}
}
city, err := geoip.City(addr.IP)
if err != nil {
return location{}
}
return location{
Longitude: city.Location.Longitude,
Latitude: city.Location.Latitude,
City: city.City.Names["en"],
Country: city.Country.IsoCode,
Continent: city.Continent.Code,
}
}
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
return &loggingResponseWriter{w, http.StatusOK}
}
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
type errorTracker struct {
errors *lru.TwoQueueCache[string, *errorCounter]
}
type errorCounter struct {
count atomic.Int32
}
func newErrorTracker(size int) *errorTracker {
cache, err := lru.New2Q[string, *errorCounter](size)
if err != nil {
panic(err)
}
return &errorTracker{
errors: cache,
}
}
func (b *errorTracker) AddError(host string) {
entry, ok := b.errors.Get(host)
if !ok {
entry = &errorCounter{}
b.errors.Add(host, entry)
}
c := entry.count.Add(1)
log.Printf("Error count for %s is now %d", host, c)
}
func (b *errorTracker) ClearErrors(host string) {
b.errors.Remove(host)
}
func (b *errorTracker) IsBlocked(host string) bool {
if be, ok := b.errors.Get(host); ok {
return be.count.Load() > 10
}
return false
}
func slimURL(u string) string {
p, err := url.Parse(u)
if err != nil {
return u
}
newQuery := url.Values{}
if id := p.Query().Get("id"); id != "" {
newQuery.Set("id", id)
}
p.RawQuery = newQuery.Encode()
return p.String()
}

View File

@@ -1,106 +0,0 @@
// Copyright © 2020 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http/httptest"
"net/url"
"strings"
"testing"
)
func init() {
for i := 0; i < 10; i++ {
u := fmt.Sprintf("permanent%d", i)
permanentRelays = append(permanentRelays, &relay{URL: u})
}
knownRelays = []*relay{
{URL: "known1"},
{URL: "known2"},
{URL: "known3"},
}
}
// Regression test: handleGetRequest should not modify permanentRelays.
func TestHandleGetRequest(t *testing.T) {
needcap := len(permanentRelays) + len(knownRelays)
if needcap > cap(permanentRelays) {
t.Fatalf("test setup failed: need cap(permanentRelays) >= %d, have %d",
needcap, cap(permanentRelays))
}
w := httptest.NewRecorder()
w.Body = new(bytes.Buffer)
handleEndpointFull(w, httptest.NewRequest("GET", "/", nil))
result := make(map[string][]*relay)
err := json.NewDecoder(w.Body).Decode(&result)
if err != nil {
t.Fatalf("invalid JSON: %v", err)
}
relays := result["relays"]
expect, actual := len(knownRelays)+len(permanentRelays), len(relays)
if actual != expect {
t.Errorf("expected %d relays, got %d", expect, actual)
}
// Check for changes in permanentRelays.
for i, r := range permanentRelays {
switch {
case !strings.HasPrefix(r.URL, "permanent"):
t.Errorf("relay %q among permanent relays", r.URL)
case r.URL != fmt.Sprintf("permanent%d", i):
t.Error("order of permanent relays changed")
}
}
}
func TestCanonicalizeQueryValues(t *testing.T) {
// This just demonstrates and validates the uri.Parse/String stuff in
// regards to query strings.
in := "http://example.com/?some weird= query^value"
exp := "http://example.com/?some+weird=+query%5Evalue"
uri, err := url.Parse(in)
if err != nil {
t.Fatal(err)
}
str := uri.String()
if str != in {
// Just re-encoding the URL doesn't sanitize the query string.
t.Errorf("expected %q, got %q", in, str)
}
uri.RawQuery = uri.Query().Encode()
str = uri.String()
if str != exp {
// The query string is now in correct format.
t.Errorf("expected %q, got %q", exp, str)
}
}
func TestSlimURL(t *testing.T) {
cases := []struct {
in, out string
}{
{"http://example.com/", "http://example.com/"},
{"relay://192.0.2.42:22067/?globalLimitBps=0&id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M&networkTimeout=2m0s&pingInterval=1m0s&providedBy=Test&sessionLimitBps=0&statusAddr=%3A22070", "relay://192.0.2.42:22067/?id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M"},
}
for _, c := range cases {
if got := slimURL(c.in); got != c.out {
t.Errorf("expected %q, got %q", c.out, got)
}
}
}

View File

@@ -1,245 +0,0 @@
// Copyright (C) 2018 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
package main
import (
"encoding/json"
"net"
"net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
var (
statusClient = http.Client{
Timeout: 5 * time.Second,
}
apiRequestsTotal = makeCounter("api_requests_total", "Number of API requests.", "type", "result")
apiRequestsSeconds = makeSummary("api_requests_seconds", "Latency of API requests.", "type")
relayTestsTotal = makeCounter("tests_total", "Number of relay tests.", "result")
relayTestActionsSeconds = makeSummary("test_actions_seconds", "Latency of relay test actions.", "type")
locationLookupSeconds = makeSummary("location_lookup_seconds", "Latency of location lookups.").WithLabelValues()
metricsRequestsSeconds = makeSummary("metrics_requests_seconds", "Latency of metric requests.").WithLabelValues()
scrapeSeconds = makeSummary("relay_scrape_seconds", "Latency of metric scrapes from remote relays.", "result")
relayUptime = makeGauge("relay_uptime", "Uptime of relay", "relay")
relayPendingSessionKeys = makeGauge("relay_pending_session_keys", "Number of pending session keys (two keys per session, one per each side of the connection)", "relay")
relayActiveSessions = makeGauge("relay_active_sessions", "Number of sessions that are happening, a session contains two parties", "relay")
relayConnections = makeGauge("relay_connections", "Number of devices connected to the relay", "relay")
relayProxies = makeGauge("relay_proxies", "Number of active proxy routines sending data between peers (two proxies per session, one for each way)", "relay")
relayBytesProxied = makeGauge("relay_bytes_proxied", "Number of bytes proxied by the relay", "relay")
relayGoRoutines = makeGauge("relay_go_routines", "Number of Go routines in the process", "relay")
relaySessionRate = makeGauge("relay_session_rate", "Rate applied per session", "relay")
relayGlobalRate = makeGauge("relay_global_rate", "Global rate applied on the whole relay", "relay")
relayBuildInfo = makeGauge("relay_build_info", "Build information about a relay", "relay", "go_version", "go_os", "go_arch")
relayLocationInfo = makeGauge("relay_location_info", "Location information about a relay", "relay", "city", "country", "continent")
lastStats = make(map[string]stats)
)
func makeGauge(name string, help string, labels ...string) *prometheus.GaugeVec {
gauge := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "relaypoolsrv",
Name: name,
Help: help,
},
labels,
)
prometheus.MustRegister(gauge)
return gauge
}
func makeSummary(name string, help string, labels ...string) *prometheus.SummaryVec {
summary := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: "syncthing",
Subsystem: "relaypoolsrv",
Name: name,
Help: help,
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
labels,
)
prometheus.MustRegister(summary)
return summary
}
func makeCounter(name string, help string, labels ...string) *prometheus.CounterVec {
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "relaypoolsrv",
Name: name,
Help: help,
},
labels,
)
prometheus.MustRegister(counter)
return counter
}
func statsRefresher(interval time.Duration) {
ticker := time.NewTicker(interval)
for range ticker.C {
refreshStats()
}
}
type statsFetchResult struct {
relay *relay
stats *stats
}
func refreshStats() {
mut.RLock()
relays := append(permanentRelays, knownRelays...)
mut.RUnlock()
now := time.Now()
var wg sync.WaitGroup
results := make(chan statsFetchResult, len(relays))
for _, rel := range relays {
wg.Add(1)
go func(rel *relay) {
t0 := time.Now()
stats := fetchStats(rel)
duration := time.Since(t0).Seconds()
result := "success"
if stats == nil {
result = "failed"
}
scrapeSeconds.WithLabelValues(result).Observe(duration)
results <- statsFetchResult{
relay: rel,
stats: fetchStats(rel),
}
wg.Done()
}(rel)
}
wg.Wait()
close(results)
mut.Lock()
relayBuildInfo.Reset()
relayLocationInfo.Reset()
for result := range results {
result.relay.StatsRetrieved = now
result.relay.Stats = result.stats
if result.stats == nil {
deleteMetrics(result.relay.uri.Host)
} else {
updateMetrics(result.relay.uri.Host, *result.stats, result.relay.Location)
}
}
mut.Unlock()
}
func fetchStats(relay *relay) *stats {
statusAddr := relay.uri.Query().Get("statusAddr")
if statusAddr == "" {
statusAddr = ":22070"
}
statusHost, statusPort, err := net.SplitHostPort(statusAddr)
if err != nil {
return nil
}
if statusHost == "" {
if host, _, err := net.SplitHostPort(relay.uri.Host); err != nil {
return nil
} else {
statusHost = host
}
}
url := "http://" + net.JoinHostPort(statusHost, statusPort) + "/status"
response, err := statusClient.Get(url)
if err != nil {
return nil
}
var stats stats
if err := json.NewDecoder(response.Body).Decode(&stats); err != nil {
return nil
}
return &stats
}
func updateMetrics(host string, stats stats, location location) {
if stats.GoVersion != "" || stats.GoOS != "" || stats.GoArch != "" {
relayBuildInfo.WithLabelValues(host, stats.GoVersion, stats.GoOS, stats.GoArch).Add(1)
}
if location.City != "" || location.Country != "" || location.Continent != "" {
relayLocationInfo.WithLabelValues(host, location.City, location.Country, location.Continent).Add(1)
}
if lastStat, ok := lastStats[host]; ok {
stats = mergeStats(stats, lastStat)
}
relayUptime.WithLabelValues(host).Set(float64(stats.UptimeSeconds))
relayPendingSessionKeys.WithLabelValues(host).Set(float64(stats.PendingSessionKeys))
relayActiveSessions.WithLabelValues(host).Set(float64(stats.ActiveSessions))
relayConnections.WithLabelValues(host).Set(float64(stats.Connections))
relayProxies.WithLabelValues(host).Set(float64(stats.Proxies))
relayBytesProxied.WithLabelValues(host).Set(float64(stats.BytesProxied))
relayGoRoutines.WithLabelValues(host).Set(float64(stats.GoRoutines))
relaySessionRate.WithLabelValues(host).Set(float64(stats.Options.SessionRate))
relayGlobalRate.WithLabelValues(host).Set(float64(stats.Options.GlobalRate))
lastStats[host] = stats
}
func deleteMetrics(host string) {
relayUptime.DeleteLabelValues(host)
relayPendingSessionKeys.DeleteLabelValues(host)
relayActiveSessions.DeleteLabelValues(host)
relayConnections.DeleteLabelValues(host)
relayProxies.DeleteLabelValues(host)
relayBytesProxied.DeleteLabelValues(host)
relayGoRoutines.DeleteLabelValues(host)
relaySessionRate.DeleteLabelValues(host)
relayGlobalRate.DeleteLabelValues(host)
delete(lastStats, host)
}
// Due to some unexplainable behaviour, some of the numbers sometimes travel slightly backwards (by less than 1%)
// This happens between scrapes, which is 30s, so this can't be a race.
// This causes prometheus to assume a "rate reset", hence causes phenomenal spikes.
// One of the number that moves backwards is BytesProxied, which atomically increments a counter with numeric value
// returned by net.Conn.Read(). I don't think that can return a negative value, so I have no idea what's going on.
func mergeStats(new stats, old stats) stats {
new.UptimeSeconds = mergeValue(new.UptimeSeconds, old.UptimeSeconds)
new.PendingSessionKeys = mergeValue(new.PendingSessionKeys, old.PendingSessionKeys)
new.ActiveSessions = mergeValue(new.ActiveSessions, old.ActiveSessions)
new.Connections = mergeValue(new.Connections, old.Connections)
new.Proxies = mergeValue(new.Proxies, old.Proxies)
new.BytesProxied = mergeValue(new.BytesProxied, old.BytesProxied)
new.GoRoutines = mergeValue(new.GoRoutines, old.GoRoutines)
new.Options.SessionRate = mergeValue(new.Options.SessionRate, old.Options.SessionRate)
new.Options.GlobalRate = mergeValue(new.Options.GlobalRate, old.Options.GlobalRate)
return new
}
func mergeValue(new, old int) int {
if new >= old {
return new // normal increase
}
if float64(new) > 0.99*float64(old) {
return old // slight backward movement
}
return new // reset (relay restart)
}

Some files were not shown because too many files have changed in this diff Show More