mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-30 17:39:20 -05:00
Compare commits
72 Commits
v2.0.0-bet
...
v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0945304a79 | ||
|
|
9703dd9f57 | ||
|
|
259e9ef08e | ||
|
|
6a0c6128d8 | ||
|
|
b05ece0681 | ||
|
|
e9133ef82b | ||
|
|
67ba20d777 | ||
|
|
21da0d7890 | ||
|
|
ebbe57d0ab | ||
|
|
f4abc71dcc | ||
|
|
8aa02da93a | ||
|
|
0e560486db | ||
|
|
57d413099d | ||
|
|
1fdf07933c | ||
|
|
c50678618f | ||
|
|
8094b459e4 | ||
|
|
6765867a2e | ||
|
|
4fb8ee6a6f | ||
|
|
674834ccf4 | ||
|
|
3bd2bff23b | ||
|
|
40660c5fb7 | ||
|
|
d940d094a1 | ||
|
|
9d67727989 | ||
|
|
6f51700a7f | ||
|
|
598915193a | ||
|
|
905e5ec07f | ||
|
|
4075b886d0 | ||
|
|
cade790198 | ||
|
|
98555a9a80 | ||
|
|
48b757cac1 | ||
|
|
58c85fc9db | ||
|
|
ddd98a818a | ||
|
|
64b5a1b738 | ||
|
|
1a131a56f2 | ||
|
|
beda37f28b | ||
|
|
2532ac35cf | ||
|
|
bcd30ceaec | ||
|
|
9a3493c2f4 | ||
|
|
fa404d5a0d | ||
|
|
73ad18fbfb | ||
|
|
1dd264894a | ||
|
|
8c3d2f3bc5 | ||
|
|
702ed8ecc1 | ||
|
|
b038650810 | ||
|
|
a16bf555c0 | ||
|
|
cd6ea60fa1 | ||
|
|
0bf21d9db2 | ||
|
|
f61843ef2e | ||
|
|
23e8366f8d | ||
|
|
93e72cc83f | ||
|
|
190dff142c | ||
|
|
c667ada63a | ||
|
|
93ae30d889 | ||
|
|
486eebc4ac | ||
|
|
ff33d976d1 | ||
|
|
69890b4282 | ||
|
|
533c9a6ab0 | ||
|
|
9521bb3931 | ||
|
|
e46a0f99c3 | ||
|
|
ed97e365b2 | ||
|
|
b4776ea4e0 | ||
|
|
b5ffd0a796 | ||
|
|
c74299b59a | ||
|
|
8b6d837483 | ||
|
|
3e74b3dee2 | ||
|
|
2902da996c | ||
|
|
f6f144bf17 | ||
|
|
ab5c42f4a0 | ||
|
|
7db3f7eaac | ||
|
|
f0b666269b | ||
|
|
190a59842c | ||
|
|
40888c1a66 |
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Feature request
|
||||
description: File a new feature request
|
||||
labels: ["enhancement", "needs-triage"]
|
||||
type: Feature
|
||||
body:
|
||||
|
||||
- type: textarea
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Bug report
|
||||
description: If you're actually looking for support instead, see "I need help / I have a question".
|
||||
labels: ["bug", "needs-triage"]
|
||||
type: Bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
23
.github/labeler.yml
vendored
Normal file
23
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: 1
|
||||
labels:
|
||||
|
||||
- label: enhancement
|
||||
title: ^feat\b
|
||||
|
||||
- label: bug
|
||||
title: ^fix\b
|
||||
|
||||
- label: documentation
|
||||
title: ^docs\b
|
||||
|
||||
- label: chore
|
||||
title: ^chore\b
|
||||
|
||||
- label: chore
|
||||
title: ^refactor\b
|
||||
|
||||
- label: build
|
||||
title: ^build\b
|
||||
|
||||
- label: dependencies
|
||||
title: ^build\(deps\)\b
|
||||
17
.github/release.yml
vendored
Normal file
17
.github/release.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- dependencies
|
||||
|
||||
categories:
|
||||
- title: Fixes
|
||||
labels:
|
||||
- bug
|
||||
|
||||
- title: Features
|
||||
labels:
|
||||
- enhancement
|
||||
|
||||
- title: Other
|
||||
labels:
|
||||
- '*'
|
||||
2
.github/workflows/build-infra-dockers.yaml
vendored
2
.github/workflows/build-infra-dockers.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
name: Build and push Docker images
|
||||
if: github.repository == 'syncthing/syncthing'
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
environment: docker
|
||||
strategy:
|
||||
matrix:
|
||||
pkg:
|
||||
|
||||
136
.github/workflows/build-syncthing.yaml
vendored
136
.github/workflows/build-syncthing.yaml
vendored
@@ -3,6 +3,9 @@ name: Build Syncthing
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches-ignore:
|
||||
- release
|
||||
- release-rc*
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -86,26 +89,6 @@ jobs:
|
||||
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
|
||||
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
correctness:
|
||||
name: Check correctness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
cache: false
|
||||
check-latest: true
|
||||
|
||||
- name: Check correctness
|
||||
run: |
|
||||
go test -v ./meta
|
||||
|
||||
#
|
||||
# The basic checks job is a virtual one that depends on the matrix tests,
|
||||
# the correctness checks, and various builds that we always do. This makes
|
||||
@@ -120,7 +103,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-test
|
||||
- correctness
|
||||
- package-linux
|
||||
- package-cross
|
||||
- package-source
|
||||
@@ -194,7 +176,7 @@ jobs:
|
||||
|
||||
codesign-windows:
|
||||
name: Codesign for Windows
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: windows-latest
|
||||
needs:
|
||||
@@ -301,7 +283,7 @@ jobs:
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
@@ -401,7 +383,7 @@ jobs:
|
||||
|
||||
notarize-macos:
|
||||
name: Notarize for macOS
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-macos
|
||||
@@ -545,7 +527,7 @@ jobs:
|
||||
|
||||
sign-for-upgrade:
|
||||
name: Sign for upgrade
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- codesign-windows
|
||||
@@ -726,12 +708,15 @@ jobs:
|
||||
- name: Push artifacts
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v packages objstore:nightly
|
||||
args: sync -v --no-update-modtime packages objstore:nightly
|
||||
|
||||
#
|
||||
# Push release artifacts to Spaces
|
||||
@@ -741,6 +726,8 @@ jobs:
|
||||
name: Publish release files
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
permissions:
|
||||
contents: write
|
||||
needs:
|
||||
- sign-for-upgrade
|
||||
- package-debian
|
||||
@@ -777,22 +764,64 @@ jobs:
|
||||
- name: Push to object store (${{ env.VERSION }})
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: cool
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v packages objstore:release/${{ env.VERSION }}
|
||||
args: sync -v --no-update-modtime packages objstore:release/${{ env.VERSION }}
|
||||
|
||||
- name: Push to object store (latest)
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v objstore:release/${{ env.VERSION }} objstore:release/latest
|
||||
args: sync -v --no-update-modtime objstore:release/${{ env.VERSION }} objstore:release/latest
|
||||
|
||||
- name: Create GitHub releases and push binaries
|
||||
run: |
|
||||
maybePrerelease=""
|
||||
if [[ $VERSION == *-* ]]; then
|
||||
maybePrerelease="--prerelease"
|
||||
fi
|
||||
export GH_PROMPT_DISABLED=1
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes-from-tag
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
packages/*.asc packages/*.json \
|
||||
packages/syncthing-*.tar.gz \
|
||||
packages/syncthing-*.zip \
|
||||
packages/syncthing_*.deb
|
||||
|
||||
PKGS=$(pwd)/packages
|
||||
cd /tmp # gh will not release for repo x while inside repo y
|
||||
for repo in relaysrv discosrv ; do
|
||||
export GH_REPO="syncthing/$repo"
|
||||
if ! gh release view --json name "$VERSION" >/dev/null 2>&1 ; then
|
||||
gh release create "$VERSION" \
|
||||
$maybePrerelease \
|
||||
--title "$VERSION" \
|
||||
--notes "https://github.com/syncthing/syncthing/releases/tag/$VERSION"
|
||||
fi
|
||||
gh release upload --clobber "$VERSION" \
|
||||
$PKGS/*.asc \
|
||||
$PKGS/*${repo}*
|
||||
done
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
#
|
||||
# Push Debian/APT archive
|
||||
@@ -800,7 +829,7 @@ jobs:
|
||||
|
||||
publish-apt:
|
||||
name: Publish APT
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release-nightly' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: release
|
||||
needs:
|
||||
- package-debian
|
||||
@@ -839,9 +868,13 @@ jobs:
|
||||
- name: Pull archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync objstore:apt/dists dists
|
||||
|
||||
@@ -858,12 +891,15 @@ jobs:
|
||||
- name: Push archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
with:
|
||||
args: sync -v dists objstore:apt/dists
|
||||
args: sync -v --no-update-modtime dists objstore:apt/dists
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
@@ -872,7 +908,7 @@ jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
|
||||
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release-nightly' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/tags/v'))
|
||||
environment: docker
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
49
.github/workflows/pr-linters.yaml
vendored
Normal file
49
.github/workflows/pr-linters.yaml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: Run PR linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# golangci-lint runs a suite of static analysis checks on the code
|
||||
#
|
||||
|
||||
golangci:
|
||||
runs-on: ubuntu-latest
|
||||
name: Golangci-lint
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- name: ensure asset generation
|
||||
run: go run build.go assets
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
only-new-issues: true
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
meta:
|
||||
name: Meta checks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- run: |
|
||||
go run build.go assets
|
||||
go test -v ./meta
|
||||
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: PR metadata
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Set labels on PRs, which are then used to categorise release notes
|
||||
#
|
||||
|
||||
labels:
|
||||
name: Set labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: srvaroa/labeler@v1
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Release Syncthing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
- release-rc*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
create-release-tag:
|
||||
name: Create release tag
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Determine version to release
|
||||
run: |
|
||||
if [[ "$GITHUB_REF_NAME" == "release" ]] ; then
|
||||
next=$(go run ./script/next-version.go)
|
||||
else
|
||||
next=$(go run ./script/next-version.go --pre)
|
||||
fi
|
||||
echo "NEXT=$next" >> $GITHUB_ENV
|
||||
echo "Next version is $next"
|
||||
|
||||
prev=$(git describe --exclude "*-*" --abbrev=0)
|
||||
echo "PREV=$prev" >> $GITHUB_ENV
|
||||
echo "Previous version is $prev"
|
||||
|
||||
- name: Determine release notes
|
||||
run: |
|
||||
go run ./script/relnotes.go --new-ver "$NEXT" --branch "$GITHUB_REF_NAME" --prev-ver "$PREV" > notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
git tag -a -F notes.md --cleanup=whitespace "$NEXT"
|
||||
git push origin "$NEXT"
|
||||
|
||||
- name: Trigger the build
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: build-syncthing.yaml
|
||||
ref: refs/tags/${{ env.NEXT }}
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
@@ -1,39 +1,67 @@
|
||||
version: "2"
|
||||
linters:
|
||||
enable-all: true
|
||||
default: all
|
||||
disable:
|
||||
- cyclop
|
||||
- depguard
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- paralleltest
|
||||
- prealloc
|
||||
- predeclared
|
||||
- protogetter
|
||||
- scopelint
|
||||
- recvcheck
|
||||
- revive
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- usetesting # go 1.24
|
||||
- varnamelen
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
9
AUTHORS
9
AUTHORS
@@ -48,6 +48,7 @@ Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Ashish Bhate <bhate.ashish@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
@@ -113,6 +114,7 @@ diemade <spamkill@posteo.ch>
|
||||
digital <didev@dinid.net>
|
||||
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
domain <32405309+szu17dmy@users.noreply.github.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
@@ -147,6 +149,7 @@ Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Hazem Krimi <me@hazemkrimi.tech>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hireworks <129852174+hireworksltd@users.noreply.github.com>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
@@ -221,9 +224,10 @@ luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcel Meyer <mm.marcelmeyer@gmail.com>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus B Spencer <marcus@marcusspencer.xyz>
|
||||
Marcus B Spencer <marcus@marcusspencer.xyz> <marcus@marcusspencer.us>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
@@ -277,6 +281,7 @@ Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Paul Donald <newtwen+github@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
@@ -292,6 +297,7 @@ Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
polyfloyd <polyfloyd@users.noreply.github.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
pullmerge <166967364+pullmerge@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
@@ -327,6 +333,7 @@ Syncthing Release Automation <release@syncthing.net>
|
||||
Sébastien WENSKE <sebastien@wenske.fr>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
TheCreeper <TheCreeper@users.noreply.github.com>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
|
||||
6
build.go
6
build.go
@@ -330,7 +330,7 @@ func runCommand(cmd string, target target) {
|
||||
writeCompatJSON()
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
buildDeb(target, tags)
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
@@ -609,7 +609,7 @@ func buildZip(target target, tags []string) {
|
||||
fmt.Println(filename)
|
||||
}
|
||||
|
||||
func buildDeb(target target) {
|
||||
func buildDeb(target target, tags []string) {
|
||||
os.RemoveAll("deb")
|
||||
|
||||
// "goarch" here is set to whatever the Debian packages expect. We correct
|
||||
@@ -623,7 +623,7 @@ func buildDeb(target target) {
|
||||
goarch = "arm"
|
||||
}
|
||||
|
||||
build(target, []string{"noupgrade"})
|
||||
build(target, append(tags, "noupgrade"))
|
||||
|
||||
for i := range target.installationFiles {
|
||||
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.BinaryName(), 1)
|
||||
|
||||
1
build.sh
1
build.sh
@@ -23,6 +23,7 @@ case "${1:-default}" in
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
script copyrights
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
|
||||
@@ -8,6 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -177,8 +178,8 @@ func (d *diskStore) inventory() error {
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
slices.SortFunc(d.currentFiles, func(a, b currentFile) int {
|
||||
return cmp.Compare(a.mtime, b.mtime)
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -110,6 +111,7 @@ var (
|
||||
requestProcessors = 8
|
||||
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
|
||||
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
|
||||
maxRelaysReturned = 100
|
||||
|
||||
requests chan request
|
||||
|
||||
@@ -141,6 +143,7 @@ func main() {
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
|
||||
flag.IntVar(&maxRelaysReturned, "max-relays-returned", maxRelaysReturned, "Maximum number of relays returned for a normal endpoint query")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -331,6 +334,10 @@ func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
||||
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
||||
}
|
||||
mut.RUnlock()
|
||||
if len(relays) > maxRelaysReturned {
|
||||
rand.Shuffle(relays)
|
||||
relays = relays[:maxRelaysReturned]
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
||||
"relays": relays,
|
||||
|
||||
@@ -201,17 +201,21 @@ func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
havePre := make(map[string]bool)
|
||||
haveStable := make(map[string]bool)
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
major, _, _ := strings.Cut(rel.Tag, ".")
|
||||
if !rel.Prerelease && !haveStable[major] {
|
||||
// Remember the first non-pre for each major
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
haveStable[major] = true
|
||||
continue
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
if rel.Prerelease && !havePre[major] && !haveStable[major] {
|
||||
// We remember the first prerelease we find, unless we've
|
||||
// already found a non-pre of the same major.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
havePre[major] = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
@@ -258,9 +262,10 @@ func filterForCompabitility(rels []upgrade.Release, ua, osv string) []upgrade.Re
|
||||
}
|
||||
|
||||
type cachedReleases struct {
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
latestRel, latestPre string
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Releases() []upgrade.Release {
|
||||
@@ -274,8 +279,26 @@ func (c *cachedReleases) Update(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latestRel, latestPre := "", ""
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease && latestRel == "" {
|
||||
latestRel = rel.Tag
|
||||
}
|
||||
if rel.Prerelease && latestPre == "" {
|
||||
latestPre = rel.Tag
|
||||
}
|
||||
if latestRel != "" && latestPre != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mut.Lock()
|
||||
c.current = rels
|
||||
if latestRel != c.latestRel || latestPre != c.latestPre {
|
||||
metricLatestReleaseInfo.DeleteLabelValues(c.latestRel, c.latestPre)
|
||||
metricLatestReleaseInfo.WithLabelValues(latestRel, latestPre).Set(1)
|
||||
c.latestRel = latestRel
|
||||
c.latestPre = latestPre
|
||||
}
|
||||
c.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -27,4 +27,10 @@ var (
|
||||
Subsystem: "upgrade",
|
||||
Name: "http_requests",
|
||||
}, []string{"target", "result"})
|
||||
metricLatestReleaseInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "latest_release_info",
|
||||
Help: "Release information",
|
||||
}, []string{"latest_release", "latest_pre"})
|
||||
)
|
||||
|
||||
@@ -26,9 +26,11 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
@@ -40,11 +42,15 @@ type CLI struct {
|
||||
DumpFile string `env:"UR_DUMP_FILE" default:"reports.jsons.gz"`
|
||||
DumpInterval time.Duration `env:"UR_DUMP_INTERVAL" default:"5m"`
|
||||
|
||||
S3Endpoint string `name:"s3-endpoint" hidden:"true" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" hidden:"true" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" hidden:"true" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" hidden:"true" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" hidden:"true" env:"UR_S3_SECRET_KEY"`
|
||||
S3Endpoint string `name:"s3-endpoint" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" env:"UR_S3_SECRET_KEY"`
|
||||
|
||||
AzureBlobAccount string `name:"azure-blob-account" env:"UR_AZUREBLOB_ACCOUNT"`
|
||||
AzureBlobKey string `name:"azure-blob-key" env:"UR_AZUREBLOB_KEY"`
|
||||
AzureBlobContainer string `name:"azure-blob-container" env:"UR_AZUREBLOB_CONTAINER"`
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -77,6 +83,7 @@ var (
|
||||
{regexp.MustCompile(`\ssyncthing@archlinux`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`@openSUSE`), "openSUSE (3rd party)"},
|
||||
{regexp.MustCompile(`\sbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile(`\sroot@buildkitsandbox`), "LinuxServer.io (3rd party)"},
|
||||
{regexp.MustCompile(`\sports@freebsd`), "FreeBSD (3rd party)"},
|
||||
@@ -119,19 +126,25 @@ func (cli *CLI) Run() error {
|
||||
go geo.Serve(context.TODO())
|
||||
}
|
||||
|
||||
// s3
|
||||
// Blob storage
|
||||
|
||||
var s3sess *s3.Session
|
||||
var blobs blob.Store
|
||||
if cli.S3Endpoint != "" {
|
||||
s3sess, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
blobs, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 session", "error", err)
|
||||
return err
|
||||
}
|
||||
} else if cli.AzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.AzureBlobAccount, cli.AzureBlobKey, cli.AzureBlobContainer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create Azure blob store", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && s3sess != nil {
|
||||
if err := cli.downloadDumpFile(s3sess); err != nil {
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && blobs != nil {
|
||||
if err := cli.downloadDumpFile(blobs); err != nil {
|
||||
slog.Error("Failed to download dump file", "error", err)
|
||||
}
|
||||
}
|
||||
@@ -153,7 +166,7 @@ func (cli *CLI) Run() error {
|
||||
|
||||
go func() {
|
||||
for range time.Tick(cli.DumpInterval) {
|
||||
if err := cli.saveDumpFile(srv, s3sess); err != nil {
|
||||
if err := cli.saveDumpFile(srv, blobs); err != nil {
|
||||
slog.Error("Failed to write dump file", "error", err)
|
||||
}
|
||||
}
|
||||
@@ -192,8 +205,8 @@ func (cli *CLI) Run() error {
|
||||
return metricsSrv.Serve(urListener)
|
||||
}
|
||||
|
||||
func (cli *CLI) downloadDumpFile(s3sess *s3.Session) error {
|
||||
latestKey, err := s3sess.LatestKey()
|
||||
func (cli *CLI) downloadDumpFile(blobs blob.Store) error {
|
||||
latestKey, err := blobs.LatestKey(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("list latest S3 key: %w", err)
|
||||
}
|
||||
@@ -201,7 +214,7 @@ func (cli *CLI) downloadDumpFile(s3sess *s3.Session) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dump file: %w", err)
|
||||
}
|
||||
if err := s3sess.Download(fd, latestKey); err != nil {
|
||||
if err := blobs.Download(context.Background(), latestKey, fd); err != nil {
|
||||
_ = fd.Close()
|
||||
return fmt.Errorf("download dump file: %w", err)
|
||||
}
|
||||
@@ -212,7 +225,7 @@ func (cli *CLI) downloadDumpFile(s3sess *s3.Session) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *CLI) saveDumpFile(srv *server, s3sess *s3.Session) error {
|
||||
func (cli *CLI) saveDumpFile(srv *server, blobs blob.Store) error {
|
||||
fd, err := os.Create(cli.DumpFile + ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dump file: %w", err)
|
||||
@@ -233,13 +246,13 @@ func (cli *CLI) saveDumpFile(srv *server, s3sess *s3.Session) error {
|
||||
}
|
||||
slog.Info("Dump file saved")
|
||||
|
||||
if s3sess != nil {
|
||||
if blobs != nil {
|
||||
key := fmt.Sprintf("reports-%s.jsons.gz", time.Now().UTC().Format("2006-01-02"))
|
||||
fd, err := os.Open(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening dump file: %w", err)
|
||||
}
|
||||
if err := s3sess.Upload(fd, key); err != nil {
|
||||
if err := blobs.Upload(context.Background(), key, fd); err != nil {
|
||||
return fmt.Errorf("uploading dump file: %w", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
@@ -351,6 +364,9 @@ func (s *server) addReport(rep *contract.Report) bool {
|
||||
break
|
||||
}
|
||||
}
|
||||
rep.DistDist = rep.Distribution
|
||||
rep.DistOS = rep.OS
|
||||
rep.DistArch = rep.Arch
|
||||
|
||||
_, loaded := s.reports.LoadAndStore(rep.UniqueID, rep)
|
||||
return loaded
|
||||
|
||||
@@ -66,7 +66,7 @@ type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool) *apiSrv {
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool, desiredNotFoundRate float64) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
@@ -77,13 +77,13 @@ func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator,
|
||||
seenTracker: &retryAfterTracker{
|
||||
name: "seenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: 250,
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMinSeconds,
|
||||
},
|
||||
notSeenTracker: &retryAfterTracker{
|
||||
name: "notSeenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: 250,
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMaxSeconds / 2,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func BenchmarkAPIRequests(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go db.Serve(ctx)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true, 1000)
|
||||
srv := httptest.NewServer(http.HandlerFunc(api.handler))
|
||||
|
||||
kf := b.TempDir() + "/cert"
|
||||
|
||||
@@ -24,11 +24,11 @@ import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -51,12 +51,12 @@ type inMemoryStore struct {
|
||||
m *xsync.MapOf[protocol.DeviceID, *discosrv.DatabaseRecord]
|
||||
dir string
|
||||
flushInterval time.Duration
|
||||
s3 *s3.Session
|
||||
blobs blob.Store
|
||||
objKey string
|
||||
clock clock
|
||||
}
|
||||
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, s3sess *s3.Session) *inMemoryStore {
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, blobs blob.Store) *inMemoryStore {
|
||||
hn, err := os.Hostname()
|
||||
if err != nil {
|
||||
hn = rand.String(8)
|
||||
@@ -65,25 +65,25 @@ func newInMemoryStore(dir string, flushInterval time.Duration, s3sess *s3.Sessio
|
||||
m: xsync.NewMapOf[protocol.DeviceID, *discosrv.DatabaseRecord](),
|
||||
dir: dir,
|
||||
flushInterval: flushInterval,
|
||||
s3: s3sess,
|
||||
blobs: blobs,
|
||||
objKey: hn + ".db",
|
||||
clock: defaultClock{},
|
||||
}
|
||||
nr, err := s.read()
|
||||
if os.IsNotExist(err) && s3sess != nil {
|
||||
// Try to read from AWS
|
||||
latestKey, cerr := s3sess.LatestKey()
|
||||
if os.IsNotExist(err) && blobs != nil {
|
||||
// Try to read from blob storage
|
||||
latestKey, cerr := blobs.LatestKey(context.Background())
|
||||
if cerr != nil {
|
||||
log.Println("Error reading database from S3:", err)
|
||||
log.Println("Error finding database from blob storage:", cerr)
|
||||
return s
|
||||
}
|
||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||
if cerr != nil {
|
||||
log.Println("Error creating database file:", err)
|
||||
log.Println("Error creating database file:", cerr)
|
||||
return s
|
||||
}
|
||||
if cerr := s3sess.Download(fd, latestKey); cerr != nil {
|
||||
log.Printf("Error reading database from S3: %v", err)
|
||||
if cerr := blobs.Download(context.Background(), latestKey, fd); cerr != nil {
|
||||
log.Printf("Error downloading database from blob storage: %v", cerr)
|
||||
}
|
||||
_ = fd.Close()
|
||||
nr, err = s.read()
|
||||
@@ -310,16 +310,16 @@ func (s *inMemoryStore) write() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload to S3
|
||||
if s.s3 != nil {
|
||||
// Upload to blob storage
|
||||
if s.blobs != nil {
|
||||
fd, err = os.Open(dbf)
|
||||
if err != nil {
|
||||
log.Printf("Error uploading database to S3: %v", err)
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := s.s3.Upload(fd, s.objKey); err != nil {
|
||||
log.Printf("Error uploading database to S3: %v", err)
|
||||
if err := s.blobs.Upload(context.Background(), s.objKey, fd); err != nil {
|
||||
log.Printf("Error uploading database to blob storage: %v", err)
|
||||
}
|
||||
log.Println("Finished uploading database")
|
||||
}
|
||||
|
||||
@@ -21,11 +21,13 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/s3"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
@@ -58,12 +60,13 @@ const (
|
||||
var debug = false
|
||||
|
||||
type CLI struct {
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
DesiredNotFoundRate float64 `group:"Listen" help:"Desired maximum rate of not-found replies (/s)" default:"1000"`
|
||||
|
||||
DBDir string `group:"Database" help:"Database directory" default:"." env:"DISCOVERY_DB_DIR"`
|
||||
DBFlushInterval time.Duration `group:"Database" help:"Interval between database flushes" default:"5m" env:"DISCOVERY_DB_FLUSH_INTERVAL"`
|
||||
@@ -74,6 +77,10 @@ type CLI struct {
|
||||
DBS3AccessKeyID string `name:"db-s3-access-key-id" group:"Database (S3 backup)" hidden:"true" help:"S3 access key ID for database" env:"DISCOVERY_DB_S3_ACCESS_KEY_ID"`
|
||||
DBS3SecretKey string `name:"db-s3-secret-key" group:"Database (S3 backup)" hidden:"true" help:"S3 secret key for database" env:"DISCOVERY_DB_S3_SECRET_KEY"`
|
||||
|
||||
DBAzureBlobAccount string `name:"db-azure-blob-account" env:"DISCOVERY_DB_AZUREBLOB_ACCOUNT"`
|
||||
DBAzureBlobKey string `name:"db-azure-blob-key" env:"DISCOVERY_DB_AZUREBLOB_KEY"`
|
||||
DBAzureBlobContainer string `name:"db-azure-blob-container" env:"DISCOVERY_DB_AZUREBLOB_CONTAINER"`
|
||||
|
||||
AMQPAddress string `group:"AMQP replication" hidden:"true" help:"Address to AMQP broker" env:"DISCOVERY_AMQP_ADDRESS"`
|
||||
|
||||
Debug bool `short:"d" help:"Print debug output" env:"DISCOVERY_DEBUG"`
|
||||
@@ -117,18 +124,20 @@ func main() {
|
||||
Timeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
// If configured, use S3 for database backups.
|
||||
var s3c *s3.Session
|
||||
// If configured, use blob storage for database backups.
|
||||
var blobs blob.Store
|
||||
var err error
|
||||
if cli.DBS3Endpoint != "" {
|
||||
var err error
|
||||
s3c, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create S3 session: %v", err)
|
||||
}
|
||||
blobs, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
} else if cli.DBAzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.DBAzureBlobAccount, cli.DBAzureBlobKey, cli.DBAzureBlobContainer)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create blob store: %v", err)
|
||||
}
|
||||
|
||||
// Start the database.
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, s3c)
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, blobs)
|
||||
main.Add(db)
|
||||
|
||||
// If we have an AMQP broker for replication, start that
|
||||
@@ -141,7 +150,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression)
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression, cli.DesiredNotFoundRate)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
|
||||
@@ -184,7 +184,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
ses := newSession(requestedPeer, id, sessionLimitBps, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
|
||||
@@ -51,7 +51,6 @@ var (
|
||||
globalLimitBps int
|
||||
overLimit atomic.Bool
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
@@ -228,9 +227,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
bytesProxied atomic.Int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionLimitBps int, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
@@ -40,12 +40,17 @@ func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit,
|
||||
return nil
|
||||
}
|
||||
|
||||
var sessionRateLimit *rate.Limiter
|
||||
if sessionLimitBps > 0 {
|
||||
sessionRateLimit = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
limiter: sessionRateLimit,
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
@@ -109,6 +114,7 @@ type session struct {
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int)
|
||||
limiter *rate.Limiter
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
|
||||
@@ -7,9 +7,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
@@ -77,8 +78,8 @@ func indexDumpSize() error {
|
||||
elems = append(elems, ele)
|
||||
}
|
||||
|
||||
sort.Slice(elems, func(i, j int) bool {
|
||||
return elems[i].size > elems[j].size
|
||||
slices.SortFunc(elems, func(a, b sizedElement) int {
|
||||
return cmp.Compare(b.size, a.size)
|
||||
})
|
||||
for _, ele := range elems {
|
||||
fmt.Println(ele.key, ele.size)
|
||||
|
||||
@@ -8,10 +8,11 @@ package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
@@ -207,11 +208,11 @@ func indexCheck() (err error) {
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
|
||||
sort.Slice(missingSeq, func(a, b int) bool {
|
||||
if missingSeq[a].folder != missingSeq[b].folder {
|
||||
return missingSeq[a].folder < missingSeq[b].folder
|
||||
slices.SortFunc(missingSeq, func(a, b sequenceKey) int {
|
||||
if a.folder != b.folder {
|
||||
return cmp.Compare(a.folder, b.folder)
|
||||
}
|
||||
return missingSeq[a].sequence < missingSeq[b].sequence
|
||||
return cmp.Compare(a.sequence, b.sequence)
|
||||
})
|
||||
|
||||
var folder uint32
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -37,7 +37,9 @@ func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
slices.SortFunc(files, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, ".reported.") {
|
||||
// We've already sent this file. It'll be cleaned out at some
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -349,10 +349,12 @@ func (options serveOptions) Run() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure that our home directory exists.
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0o700); err != nil {
|
||||
l.Warnln("Failure on home directory:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
// Ensure that our config and data directories exist.
|
||||
for _, loc := range []locations.BaseDirEnum{locations.ConfigBaseDir, locations.DataBaseDir} {
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(loc), 0o700); err != nil {
|
||||
l.Warnln("Failed to ensure directory exists:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
}
|
||||
|
||||
if options.UpgradeTo != "" {
|
||||
@@ -376,7 +378,7 @@ func (options serveOptions) Run() error {
|
||||
if options.Upgrade {
|
||||
release, err := checkUpgrade()
|
||||
if err == nil {
|
||||
lf := flock.New(locations.Get(locations.CertFile))
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
@@ -386,6 +388,8 @@ func (options serveOptions) Run() error {
|
||||
} else {
|
||||
err = upgrade.To(release)
|
||||
}
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Upgrade:", err)
|
||||
@@ -439,7 +443,7 @@ func debugFacilities() string {
|
||||
maxLen = len(name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
|
||||
// Format the choices
|
||||
b := new(bytes.Buffer)
|
||||
@@ -546,7 +550,7 @@ func syncthingMain(options serveOptions) {
|
||||
}
|
||||
|
||||
// Ensure we are the only running instance
|
||||
lf := flock.New(locations.Get(locations.CertFile))
|
||||
lf := flock.New(locations.Get(locations.LockFile))
|
||||
locked, err := lf.TryLock()
|
||||
if err != nil {
|
||||
l.Warnln("Failed to acquire lock:", err)
|
||||
@@ -574,6 +578,7 @@ func syncthingMain(options serveOptions) {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
earlyService.Add(cfgWrapper)
|
||||
config.RegisterInfoMetrics(cfgWrapper)
|
||||
|
||||
// Candidate builds should auto upgrade. Make sure the option is set,
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
@@ -636,9 +641,21 @@ func syncthingMain(options serveOptions) {
|
||||
DBRecheckInterval: options.DebugDBRecheckInterval,
|
||||
DBIndirectGCInterval: options.DebugDBIndirectGCInterval,
|
||||
}
|
||||
if options.Audit {
|
||||
appOpts.AuditWriter = auditWriter(options.AuditFile)
|
||||
|
||||
if options.Audit || cfgWrapper.Options().AuditEnabled {
|
||||
l.Infoln("Auditing is enabled.")
|
||||
|
||||
auditFile := cfgWrapper.Options().AuditFile
|
||||
|
||||
// Ignore config option if command-line option is set
|
||||
if options.AuditFile != "" {
|
||||
l.Debugln("Using the audit file from the command-line parameter.")
|
||||
auditFile = options.AuditFile
|
||||
}
|
||||
|
||||
appOpts.AuditWriter = auditWriter(auditFile)
|
||||
}
|
||||
|
||||
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
|
||||
appOpts.DBRecheckInterval = dur
|
||||
}
|
||||
@@ -692,6 +709,10 @@ func syncthingMain(options serveOptions) {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
// Best effort remove lockfile, doesn't matter if it succeeds
|
||||
_ = lf.Unlock()
|
||||
_ = os.Remove(locations.Get(locations.LockFile))
|
||||
|
||||
os.Exit(int(status))
|
||||
}
|
||||
|
||||
|
||||
@@ -238,19 +238,18 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
return
|
||||
}
|
||||
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
}
|
||||
if panicFd == nil && (strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:")) {
|
||||
panicFd, err = os.Create(locations.GetTimestamped(locations.PanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
*********************************************************************************
|
||||
* Crash due to corrupt database. *
|
||||
* *
|
||||
@@ -263,22 +262,21 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
*********************************************************************************
|
||||
`)
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
|
||||
panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
|
||||
}
|
||||
|
||||
|
||||
42
go.mod
42
go.mod
@@ -4,15 +4,16 @@ go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f
|
||||
github.com/alecthomas/kong v1.10.0
|
||||
github.com/aws/aws-sdk-go v1.55.6
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1
|
||||
github.com/alecthomas/kong v1.11.0
|
||||
github.com/aws/aws-sdk-go v1.55.7
|
||||
github.com/calmh/incontainer v1.0.0
|
||||
github.com/calmh/xdr v1.2.0
|
||||
github.com/ccding/go-stun v0.1.5
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/getsentry/raven-go v0.2.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/go-ldap/ldap/v3 v3.4.11
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/greatroar/blobloom v0.8.0
|
||||
@@ -27,53 +28,55 @@ require (
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
|
||||
github.com/oschwald/geoip2-golang v1.11.0
|
||||
github.com/pierrec/lz4/v4 v4.1.22
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/quic-go/quic-go v0.52.0
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
|
||||
github.com/shirou/gopsutil/v4 v4.25.3
|
||||
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc
|
||||
github.com/shirou/gopsutil/v4 v4.25.4
|
||||
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
github.com/urfave/cli v1.22.16
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0
|
||||
github.com/willabides/kongplete v0.4.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/net v0.40.0
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/text v0.25.0
|
||||
golang.org/x/time v0.11.0
|
||||
golang.org/x/tools v0.31.0
|
||||
golang.org/x/tools v0.33.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.2 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.20.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@@ -89,10 +92,9 @@ require (
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
go.uber.org/mock v0.5.2 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
|
||||
160
go.sum
160
go.sum
@@ -1,18 +1,30 @@
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f h1:GmH5lT+moM7PbAJFBq57nH9WJ+wRnBXr/tyaYWbSAx8=
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kong v1.10.0 h1:8K4rGDpT7Iu+jEXCIJUeKqvpwZHbsFRoebLbnzlmrpw=
|
||||
github.com/alecthomas/kong v1.10.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/kong v1.11.0 h1:y++1gI7jf8O7G7l4LZo5ASFhrhJvzc+WgF/arranEmM=
|
||||
github.com/alecthomas/kong v1.11.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
|
||||
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/calmh/glob v0.0.0-20220615080505-1d823af5017b h1:Fjm4GuJ+TGMgqfGHN42IQArJb77CfD/mAwLbDUoJe6g=
|
||||
@@ -34,6 +46,8 @@ github.com/chmduquesne/rollinghash v4.0.0+incompatible/go.mod h1:Uc2I36RRfTAf7Dg
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||
@@ -41,8 +55,8 @@ github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkE
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
|
||||
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
|
||||
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
@@ -51,10 +65,10 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
@@ -65,6 +79,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -85,12 +101,10 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4 h1:gD0vax+4I+mAj+jEChEf25Ia07Jq7kYOFO5PPhAxFl4=
|
||||
github.com/google/pprof v0.0.0-20250423184734-337e5dd93bb4/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/greatroar/blobloom v0.8.0 h1:I9RlEkfqK9/6f1v9mFmDYegDQ/x0mISCpiNpAm23Pt4=
|
||||
github.com/greatroar/blobloom v0.8.0/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -99,7 +113,6 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
@@ -132,8 +145,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -162,20 +175,22 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
|
||||
github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w=
|
||||
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
||||
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
|
||||
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -187,8 +202,8 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
@@ -197,22 +212,22 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA=
|
||||
github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY=
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab/go.mod h1:/PfPXh0EntGc3QAAyUaviy4S9tzy4Zp0e2ilq4voC6E=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
|
||||
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/shirou/gopsutil/v4 v4.25.4 h1:cdtFO363VEOOFrUCjZRh4XVJkb548lyF0q0uTeMqYPw=
|
||||
github.com/shirou/gopsutil/v4 v4.25.4/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -223,13 +238,12 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc h1:xc3UfSFlH/X5hRw3h21RF6WXnRUYKmGRx06FEaVxfkM=
|
||||
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
|
||||
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465 h1:yhxdTGmFkAM2TFA65c3NgGwpnIkUM8oVqPX2e9S7IVg=
|
||||
github.com/syncthing/notify v0.0.0-20250528144937-c7027d4f7465/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
|
||||
github.com/thejerf/suture/v4 v4.0.6 h1:QsuCEsCqb03xF9tPAsWAj8QOAJBgQI1c0VqJNaingg8=
|
||||
@@ -246,67 +260,37 @@ github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0/go.mod h1:TTbGUfE+cXXc
|
||||
github.com/willabides/kongplete v0.4.0 h1:eivXxkp5ud5+4+NVN9e4goxC5mSh3n1RHov+gsblM2g=
|
||||
github.com/willabides/kongplete v0.4.0/go.mod h1:0P0jtWD9aTsqPSUAl4de35DLghrr57XcayPyvqSi2X8=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -326,49 +310,25 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"A device with that ID is already added.": "أضيف هذا الجهاز بالفعل.",
|
||||
"A negative number of days doesn't make sense.": "لا يمكن استخدام قيمة سالبة لعدد الأيام.",
|
||||
"A new major version may not be compatible with previous versions.": "الإصدار الجديد قد لا يتوافق مع الإصدارات السابقة.",
|
||||
"API Key": "مفتاح API",
|
||||
"API Key": "مفتاح واجهة برمجة التطبيقات \"API\"",
|
||||
"About": "حول",
|
||||
"Action": "إجراء",
|
||||
"Actions": "الإجراءات",
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "الشبكات المسموح بها",
|
||||
"Alphabetic": "أبجدية",
|
||||
"Altered by ignoring deletes.": "تغير بتجاهل عمليات الحذف.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "مفعل دائمًا عندما يكون نوع المجلد هو \"{{foldertype}}\".",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "الإصدار يعالج بواسطة أمر خارجي. يجب إزالة الملف من المجلدات المشتركة. إذا كان المسار للتطبيق يحتوي على مسافات، يجب وضعها بين علامتي تنصيص دلالة على الاقتباس.",
|
||||
"Anonymous Usage Reporting": "تقارير الإستخدام المجهولة",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "هل تريد الانتقال الى التصميم الجديد لتقرير الاستخدام المجهول ؟",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "جسم:",
|
||||
"Bugs": "أخطاء برمجية",
|
||||
"Cancel": "إلغاء",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "لا يمكن تفعيله عندما يكون نوع المجلد هو \"{{foldertype}}\".",
|
||||
"Changelog": "سجل التغيير",
|
||||
"Clean out after": "نظف بعد",
|
||||
"Cleaning Versions": "إصدارات نظيفة",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Allowed Networks",
|
||||
"Alphabetic": "Alphabetic",
|
||||
"Altered by ignoring deletes.": "Altered by ignoring deletes.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Always turned on when the folder type is \"{{foldertype}}\".",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.",
|
||||
"Anonymous Usage Reporting": "Anonymous Usage Reporting",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Anonymous usage report format has changed. Would you like to move to the new format?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Body:",
|
||||
"Bugs": "Bugs",
|
||||
"Cancel": "Cancel",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Cannot be enabled when the folder type is \"{{foldertype}}\".",
|
||||
"Changelog": "Changelog",
|
||||
"Clean out after": "Clean out after",
|
||||
"Cleaning Versions": "Cleaning Versions",
|
||||
|
||||
@@ -9,9 +9,15 @@
|
||||
"Add Folder": "Lisa kaust",
|
||||
"Add new folder?": "Lisa uus kaust?",
|
||||
"Address": "Aadress",
|
||||
"Addresses": "Aadressid",
|
||||
"All Data": "Kõik andmed",
|
||||
"All Time": "Kõik ajad",
|
||||
"Allowed Networks": "Lubatud võrgud",
|
||||
"Alphabetic": "Tähestikuline",
|
||||
"Automatic upgrades": "Automaatsed uuendused",
|
||||
"Be careful!": "Ettevaatust!",
|
||||
"Cancel": "Loobu",
|
||||
"Changelog": "Muudatuste nimekiri",
|
||||
"Close": "Sulge",
|
||||
"Configured": "Seadistatud",
|
||||
"Connection Error": "Ühenduse viga",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Mga Pinapayagang Network",
|
||||
"Alphabetic": "Alpabetiko",
|
||||
"Altered by ignoring deletes.": "Binago sa pamamagitan ng hindi pagpansin sa mga pagtanggal.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Palaging nakabukas kung ang uri ng folder ay nakatakda bilang \"{{foldertype}}\".",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Pinapamahala ng external na command ang file versioning. Kailangan nitong tanggalin ang file mula sa binabahaging folder. Kung may mga space ang path sa application, kailangan itong i-quote.",
|
||||
"Anonymous Usage Reporting": "Anonymous na Pag-uulat ng Paggamit",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Nagbago ang pormat ng anonymous na ulat ng paggamit. Gusto mo bang lumipat sa bagong pormat?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Body:",
|
||||
"Bugs": "Mga Bug",
|
||||
"Cancel": "Kanselahin",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Hindi maaaring paganahin kapag ang uri ng folder ay \"{{foldertype}}\".",
|
||||
"Changelog": "Mga Pagbabago",
|
||||
"Clean out after": "Linisin pagkatapos",
|
||||
"Cleaning Versions": "Mga Bersyon ng Paglinis",
|
||||
@@ -311,7 +313,7 @@
|
||||
"Receive Encrypted": "Makatanggap Naka-Encrypt",
|
||||
"Receive Only": "Makatanggap Lamang",
|
||||
"Received data is already encrypted": "Naka-encrypt na ang natanggap na data",
|
||||
"Recent Changes": "Mga Kamakilang Pagbabago",
|
||||
"Recent Changes": "Mga Kamakailang Pagbabago",
|
||||
"Reduced by ignore patterns": "Binabawasan ng mga ignore pattern",
|
||||
"Relay LAN": "Relay na LAN",
|
||||
"Relay WAN": "Relay na WAN",
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
"Allow Anonymous Usage Reporting?": "Permiteţi raportarea anonimă de folosire a aplicaţiei?",
|
||||
"Allowed Networks": "Rețele permise",
|
||||
"Alphabetic": "Alfabetic",
|
||||
"Altered by ignoring deletes.": "Modificat prin ignorarea ștergerilor.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "O comandă externă gestionează versiunea. Trebuie să elimine fișierul din mapa partajat. Dacă calea către aplicație conține spații, ar trebui să fie pusă între ghilimele.",
|
||||
"Anonymous Usage Reporting": "Raport Anonim despre Folosirea Aplicației",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Formatul raportului de utilizare anonim s-a schimbat. Doriți să vă mutați în noul format?",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Разрешённые сети",
|
||||
"Alphabetic": "По алфавиту",
|
||||
"Altered by ignoring deletes.": "Изменено, игнорируя удаления.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Всегда включено для папок с типом «{{foldertype}}».",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Для версионирования используется внешняя программа. Ей нужно удалить файл из общей папки. Если путь к приложению содержит пробелы, его нужно взять в кавычки.",
|
||||
"Anonymous Usage Reporting": "Анонимный отчет об использовании",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Формат анонимных отчётов изменился. Хотите переключиться на новый формат?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Тело:",
|
||||
"Bugs": "Ошибки",
|
||||
"Cancel": "Отмена",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Не может быть включено для папок с типом «{{foldertype}}».",
|
||||
"Changelog": "Журнал изменений",
|
||||
"Clean out after": "Очистить после",
|
||||
"Cleaning Versions": "Очистка версий",
|
||||
@@ -171,8 +173,8 @@
|
||||
"Folder Path": "Путь к папке",
|
||||
"Folder Status": "Статус папки",
|
||||
"Folder Type": "Тип папки",
|
||||
"Folder type \"{%receiveEncrypted%}\" can only be set when adding a new folder.": "Тип папки «{{receiveEncrypted}}» может быть указан только при создании новой папки.",
|
||||
"Folder type \"{%receiveEncrypted%}\" cannot be changed after adding the folder. You need to remove the folder, delete or decrypt the data on disk, and add the folder again.": "Тип папки «{{receiveEncrypted}}» не может быть изменён после добавления. Вам необходимо убрать папку, удалить или дешифровать данные на диске, а затем снова добавить папку.",
|
||||
"Folder type \"{%receiveEncrypted%}\" can only be set when adding a new folder.": "Тип папки «{{receiveEncrypted}}» может быть выбран только при добавлении новой папки.",
|
||||
"Folder type \"{%receiveEncrypted%}\" cannot be changed after adding the folder. You need to remove the folder, delete or decrypt the data on disk, and add the folder again.": "Тип папки «{{receiveEncrypted}}» не может быть изменён после добавления. Вам необходимо убрать папку, удалить или дешифровать данные на диске и затем снова её добавить.",
|
||||
"Folders": "Папки",
|
||||
"For the following folders an error occurred while starting to watch for changes. It will be retried every minute, so the errors might go away soon. If they persist, try to fix the underlying issue and ask for help if you can't.": "Для следующих папок произошла ошибка при запуске отслеживания изменений. Попытки будут повторяться раз в минуту, и ошибки скоро могут быть устранены. Если этого не произойдёт, попробуйте разобраться в причинах и попросите поддержки, если у вас не получится.",
|
||||
"Forever": "Вечно",
|
||||
|
||||
36
gui/default/assets/lang/lang-sr.json
Normal file
36
gui/default/assets/lang/lang-sr.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"A device with that ID is already added.": "Уређај са тим идентификатором је већ додат.",
|
||||
"A negative number of days doesn't make sense.": "Негативан број дана нема смисла.",
|
||||
"A new major version may not be compatible with previous versions.": "Нова верзија можда неће радити са претходним верзијама.",
|
||||
"API Key": "АПИ кључ",
|
||||
"About": "Информације",
|
||||
"Action": "Радња",
|
||||
"Actions": "Радње",
|
||||
"Active filter rules": "Активна правила филтера",
|
||||
"Add": "Додај",
|
||||
"Add Device": "Додај уређај",
|
||||
"Add Folder": "Додај фасциклу",
|
||||
"Add Remote Device": "Додаај удаљени уређај",
|
||||
"Add devices from the introducer to our device list, for mutually shared folders.": "Додај уређаје од иницијатора на нашу листу уређаја, за међусобно дељене фасцикле.",
|
||||
"Add filter entry": "Додај ставку филтера",
|
||||
"Add ignore patterns": "Додај правила за игнорисање",
|
||||
"Add new folder?": "Додај нову фасциклу?",
|
||||
"Additionally the full rescan interval will be increased (times 60, i.e. new default of 1h). You can also configure it manually for every folder later after choosing No.": "Додатно, интервал потпуног поновног скенирања ће бити повећан (60 пута, тј. нови подразумевани интервал од 1 сат). Такође можете ручно да га подесите за сваку фасциклу касније након што изаберете Не.",
|
||||
"Address": "Адреса",
|
||||
"Addresses": "Адресе",
|
||||
"Advanced": "Напредно",
|
||||
"Advanced Configuration": "Напредна конфигурација",
|
||||
"All Data": "Сви подаци",
|
||||
"All Time": "Све време",
|
||||
"All folders shared with this device must be protected by a password, such that all sent data is unreadable without the given password.": "Све фасцикле које се деле са овим уређајем морају бити заштићене лозинком, тако да сви послати подаци не могу бити прочитани без дате лозинке.",
|
||||
"Allow Anonymous Usage Reporting?": "Дозволити анонимно слање података о коришћењу?",
|
||||
"Allowed Networks": "Дозвољене мреже",
|
||||
"Alphabetic": "Абецедним редом",
|
||||
"Altered by ignoring deletes.": "Промењено због игнорисања брисања.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Увек укључено када је тип фасцикле „{{foldertype}}\".",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Екстерна команда управља верзионирањем. Она мора да уклони фајл из дељене фасцикле. Ако путања до апликације садржи размаке, треба да буде под наводницима.",
|
||||
"Anonymous Usage Reporting": "Анонимно слање података о употреби",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Формат анонимног слања података о коришћењу је промењен. Желите ли да пређете на нови формат?",
|
||||
"Applied to LAN": "Важи за локалну мрежу",
|
||||
"Apply": "Примени"
|
||||
}
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Tillåtna nätverk",
|
||||
"Alphabetic": "Alfabetisk",
|
||||
"Altered by ignoring deletes.": "Ändrad genom att ignorera borttagningar.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Alltid på när mapptypen är \"{{foldertype}}\".",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Ett externt kommando hanterar versionen. Det måste ta bort filen från den delade mappen. Om sökvägen till applikationen innehåller mellanslag bör den citeras.",
|
||||
"Anonymous Usage Reporting": "Anonym användarstatistiksrapportering",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Anonymt användningsrapportformat har ändrats. Vill du flytta till det nya formatet?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Meddelande:",
|
||||
"Bugs": "Felrapporter",
|
||||
"Cancel": "Avbryt",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Kan inte aktiveras när mapptypen är \"{{foldertype}}\".",
|
||||
"Changelog": "Ändringslogg",
|
||||
"Clean out after": "Rensa efteråt",
|
||||
"Cleaning Versions": "Rensningsversioner",
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
<h4 class="text-center" translate>The Syncthing Authors</h4>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Alexander Graf, Alexandre Viau, Anderson Mesquita, André Colomb, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Emil Lundberg, Eric P, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ross Smith II, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, bt90, greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Aleksey Vasenev, Alessandro G., Alex Ionescu, Alex Lindeman, Alex Xu, Alexander Seiler, Alexandre Alves, Aman Gupta, Anatoli Babenia, Andreas Sommer, Andrew Dunham, Andrew Meyer, Andrew Rabert, Andrey D, Anjan Momi, Anthony Goeckner, Antoine Lamielle, Anur, Aranjedeath, Arkadiusz Tymiński, Aroun, Arthur Axel fREW Schmidt, Artur Zubilewicz, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Beat Reichenbach, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benjamin Nater, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Catfriend1, Cathryne Linenweaver, Cedric Staniewski, Chih-Hsuan Yen, Choongkyu, Chris Howie, Chris Joel, Chris Tonkinson, Christian Kujau, Christian Prescott, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Barczyk, Daniel Bergmann, Daniel Martí, Daniel Padrta, Darshil Chanpura, David Rimmer, DeflateAwning, Denis A., Dennis Wilson, DerRockWolf, Devon G. Redekopp, Dimitri Papadopoulos Orfanos, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Eng Zer Jun, Eric Lesiuta, Erik Meitner, Evan Spensley, Federico Castagnini, Felix, Felix Ableitner, Felix Lampe, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gahl Saraf, Gilli Sigurdsson, Gleb Sinyavskiy, Graham Miln, Greg, Gusted, Han Boetes, HansK-p, Harrison Jones, Heiko Zuerker, Hireworks, Hugo Locurcio, Iain Barnett, Ian Johnson, Ikko Ashimine, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jack Croft, Jacob, Jake Peterson, James O'Beirne, James Patterson, Jaroslav Lichtblau, Jaroslav Malec, Jaspitta, Jauder Ho, Jaya Chithra, Jaya Kumar, Jeffery To, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan, Jonathan Cross, Jonta, Jose Manuel Delicado, Julian Lehrhuber, Jörg Thalheim, Jędrzej Kula, K.B.Dharun Krishna, Kalle Laine, Kapil Sareen, Karol Różycki, Kebin Liu, Keith Harrison, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, LSmithx2, Lars Lehtonen, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Luke Hamburg, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Marcus B Spencer, Marcus Legendre, Mario Majila, Mark Pulford, Martchus, Martin Polehla, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max, Max Schulze, MaximAL, Maxime Thirouin, Maximilian, MichaIng, Michael Jephcote, Michael Rienstra, Michael Tilli, Migelo, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Naveen, Nicholas Rishel, Nick Busey, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, NinoM4ster, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phani Rithvij, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Quentin Hibon, Rahmi Pruitt, Richard Hartmann, Robert Carosi, Roberto Santalla, Robin Schoonover, Roman Zaynetdinov, Ruslan Yevdokymov, Ryan Qian, Sacheendra Talluri, Scott Klupfel, Sertonix, Severin von Wnuck-Lipinski, Shaarad Dalvi, Simon Mwepu, Simon Pickup, Sly_tom_cat, Sonu Kumar Saw, Stefan Kuntz, Steven Eckhoff, Suhas Gundimeda, Sven Bachmann, Sébastien WENSKE, Taylor Khan, Terrance, Thomas, Thomas Hipp, Tim Abell, Tim Howes, Tim Nordenfur, Tobias Frölich, Tobias Klauser, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tommy Thorn, Tommy van der Vorst, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vik, Vil Brekin, Vladimir Rusinov, WangXi, Will Rouesnel, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, cjc7373, cui fliter, d-volution, dashangcun, derekriemer, desbma, diemade, digital, entity0xfe, georgespatton, ghjklw, guangwu, gudvinr, ignacy123, janost, jaseg, jelle van der Waa, jtagcat, klemens, kylosus, luchenhan, luzpaz, marco-m, mathias4833, maxice8, mclang, mv1005, nf, orangekame3, otbutz, overkill, perewa, polyfloyd, red_led, rubenbe, sec65, vapatel2, villekalliomaki, wangguoliang, wouter bolsterlee, xarx00, xjtdy888, 佛跳墙, 落心
|
||||
Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Alexander Graf, Alexandre Viau, Anderson Mesquita, André Colomb, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Emil Lundberg, Eric P, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ross Smith II, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, bt90, greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Aleksey Vasenev, Alessandro G., Alex Ionescu, Alex Lindeman, Alex Xu, Alexander Seiler, Alexandre Alves, Aman Gupta, Anatoli Babenia, Andreas Sommer, Andrew Dunham, Andrew Meyer, Andrew Rabert, Andrey D, Anjan Momi, Anthony Goeckner, Antoine Lamielle, Anur, Aranjedeath, Arkadiusz Tymiński, Aroun, Arthur Axel fREW Schmidt, Artur Zubilewicz, Ashish Bhate, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Beat Reichenbach, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benjamin Nater, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Catfriend1, Cathryne Linenweaver, Cedric Staniewski, Chih-Hsuan Yen, Choongkyu, Chris Howie, Chris Joel, Chris Tonkinson, Christian Kujau, Christian Prescott, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Barczyk, Daniel Bergmann, Daniel Martí, Daniel Padrta, Darshil Chanpura, David Rimmer, DeflateAwning, Denis A., Dennis Wilson, DerRockWolf, Devon G. Redekopp, Dimitri Papadopoulos Orfanos, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Eng Zer Jun, Eric Lesiuta, Erik Meitner, Evan Spensley, Federico Castagnini, Felix, Felix Ableitner, Felix Lampe, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gahl Saraf, Gilli Sigurdsson, Gleb Sinyavskiy, Graham Miln, Greg, Gusted, Han Boetes, HansK-p, Harrison Jones, Hazem Krimi, Heiko Zuerker, Hireworks, Hugo Locurcio, Iain Barnett, Ian Johnson, Ikko Ashimine, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jack Croft, Jacob, Jake Peterson, James O'Beirne, James Patterson, Jaroslav Lichtblau, Jaroslav Malec, Jaspitta, Jauder Ho, Jaya Chithra, Jaya Kumar, Jeffery To, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan, Jonathan Cross, Jonta, Jose Manuel Delicado, Julian Lehrhuber, Jörg Thalheim, Jędrzej Kula, K.B.Dharun Krishna, Kalle Laine, Kapil Sareen, Karol Różycki, Kebin Liu, Keith Harrison, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, LSmithx2, Lars Lehtonen, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Luke Hamburg, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcel Meyer, Marcin Dziadus, Marcus B Spencer, Marcus Legendre, Mario Majila, Mark Pulford, Martchus, Martin Polehla, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max, Max Schulze, MaximAL, Maxime Thirouin, Maximilian, MichaIng, Michael Jephcote, Michael Rienstra, Michael Tilli, Migelo, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Naveen, Nicholas Rishel, Nick Busey, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, NinoM4ster, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Paul Donald, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phani Rithvij, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Quentin Hibon, Rahmi Pruitt, Richard Hartmann, Robert Carosi, Roberto Santalla, Robin Schoonover, Roman Zaynetdinov, Ruslan Yevdokymov, Ryan Qian, Sacheendra Talluri, Scott Klupfel, Sertonix, Severin von Wnuck-Lipinski, Shaarad Dalvi, Simon Mwepu, Simon Pickup, Sly_tom_cat, Sonu Kumar Saw, Stefan Kuntz, Steven Eckhoff, Suhas Gundimeda, Sven Bachmann, Sébastien WENSKE, Taylor Khan, Terrance, TheCreeper, Thomas, Thomas Hipp, Tim Abell, Tim Howes, Tim Nordenfur, Tobias Frölich, Tobias Klauser, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tommy Thorn, Tommy van der Vorst, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vik, Vil Brekin, Vladimir Rusinov, WangXi, Will Rouesnel, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, cjc7373, cui fliter, d-volution, dashangcun, derekriemer, desbma, diemade, digital, domain, entity0xfe, georgespatton, ghjklw, guangwu, gudvinr, ignacy123, janost, jaseg, jelle van der Waa, jtagcat, klemens, kylosus, luchenhan, luzpaz, marco-m, mathias4833, maxice8, mclang, mv1005, nf, orangekame3, otbutz, overkill, perewa, polyfloyd, pullmerge, red_led, rubenbe, sec65, vapatel2, villekalliomaki, wangguoliang, wouter bolsterlee, xarx00, xjtdy888, 佛跳墙, 落心
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -38,45 +38,70 @@ Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Al
|
||||
<div id="about-includes" class="tab-pane">
|
||||
<p translate>Syncthing includes the following software or portions thereof:</p>
|
||||
<ul class="list-unstyled two-columns" id="copyright-notices">
|
||||
<li><a href="http://getbootstrap.com/">Bootstrap</a>, Copyright © 2011-2016 Twitter, Inc.</li>
|
||||
<li><a href="https://getbootstrap.com/">Bootstrap</a>, Copyright © 2011-2016 Twitter, Inc.</li>
|
||||
<li><a href="https://angularjs.org/">AngularJS</a>, Copyright © 2010-2014, 2016 Google, Inc.</li>
|
||||
<li><a href="http://www.daterangepicker.com/">Date Range Picker</a>, Copyright © 2012-2018 Dan Grossman.</li>
|
||||
<li><a href="https://www.daterangepicker.com/">Date Range Picker</a>, Copyright © 2012-2018 Dan Grossman.</li>
|
||||
<li><a href="https://github.com/mar10/fancytree">JQuery Fancytree Plugin</a>, Copyright © 2008-2018 Martin Wendt.</li>
|
||||
<li><a href="https://fontawesome.com/">Font Awesome</a>Copyright © 2024 Fonticons, Inc.</li>
|
||||
<li><a href="https://forkaweso.me/Fork-Awesome/">Fork Awesome</a>, Copyright © 2018 Dave Gandy & Fork Awesome.</li>
|
||||
<li><a href="http://jquery.com/">jQuery JavaScript Library</a>, Copyright © jQuery Foundation and other contributors.</li>
|
||||
<li><a href="http://momentjs.com/">moment.js</a>, Copyright © JS Foundation and other contributors.</li>
|
||||
<li><a href="https://evanhahn.github.io/HumanizeDuration.js/">HumanDuration.js</a>, Copyright © 2013-2024 Evan Hahn, portions copyright © 2024 Ross Smith II.</li>
|
||||
<li><a href="https://jquery.com/">jQuery JavaScript Library</a>, Copyright © jQuery Foundation and other contributors.</li>
|
||||
<li><a href="https://leafletjs.com/">leaflet.js</a>, Copyright © 2010-2025 Volodymyr Agafonkin, Copyright © 2010-2011 CloudMade.</li>
|
||||
<li><a href="https://momentjs.com/">moment.js</a>, Copyright © JS Foundation and other contributors.</li>
|
||||
<li><a href="https://golang.org/">The Go Programming Language</a>, Copyright © 2009 The Go Authors.</li>
|
||||
<li><a href="https://prometheus.io/">Prometheus</a>, Copyright © 2012-2015 The Prometheus Authors.</li>
|
||||
<li><a href="https://github.com/AudriusButkevicius/go-nat-pmp">AudriusButkevicius/go-nat-pmp</a>, Copyright © 2013 John Howard Palevich.</li>
|
||||
<li><a href="https://github.com/AudriusButkevicius/recli">AudriusButkevicius/recli</a>, Copyright © 2019 Audrius Butkevicius.</li>
|
||||
<li><a href="https://github.com/Azure/go-ntlmssp">Azure/go-ntlmssp</a>, Copyright © 2016 Microsoft.</li>
|
||||
<li><a href="https://github.com/alecthomas/kong">alecthomas/kong</a>, Copyright © 2018 Alec Thomas.</li>
|
||||
<li><a href="https://github.com/beorn7/perks">beorn7/perks</a>, Copyright © 2013 Blake Mizerany.</li>
|
||||
<li><a href="https://github.com/pierrec/lz4">pierrec/lz4</a>, Copyright © 2015 Pierre Curto.</li>
|
||||
<li><a href="https://github.com/calmh/du">calmh/du</a>, Public domain.</li>
|
||||
<li><a href="https://github.com/calmh/incontainer">calmh/incontainer</a>, Copyright © 2022 calmh.</li>
|
||||
<li><a href="https://github.com/calmh/xdr">calmh/xdr</a>, Copyright © 2014 Jakob Borg.</li>
|
||||
<li><a href="https://github.com/ccding/go-stun">ccding/go-stun</a>, Copyright © 2016 Cong Ding.</li>
|
||||
<li><a href="https://github.com/cespare/xxhash/v2">cespare/xxhash/v2</a>, Copyright © 2016 Caleb Spare.</li>
|
||||
<li><a href="https://github.com/chmduquesne/rollinghash">chmduquesne/rollinghash</a>, Copyright © 2015 Christophe-Marie Duquesne.</li>
|
||||
<li><a href="https://github.com/d4l3k/messagediff">d4l3k/messagediff</a>, Copyright © 2015 Tristan Rice.</li>
|
||||
<li><a href="https://github.com/cpuguy83/go-md2man/v2">cpuguy83/go-md2man/v2</a>, Copyright © 2014 Brian Goff.</li>
|
||||
<li><a href="https://github.com/davecgh/go-spew">davecgh/go-spew</a>, Copyright © 2012-2016 Dave Collins.</li>
|
||||
<li><a href="https://github.com/go-asn1-ber/asn1-ber">go-asn1-ber/asn1-ber</a>, Copyright © 2011-2015 Michael Mitton (mmitton@gmail.com).</li>
|
||||
<li><a href="https://github.com/go-ldap/ldap">go-ldap/ldap</a>, Copyright © 2011-2015 Michael Mitton (mmitton@gmail.com).</li>
|
||||
<li><a href="https://github.com/uber-go/automaxprocs">go.uber.org/automaxprocs</a>, Copyright © 2017 Uber Technologies, Inc.</li>
|
||||
<li><a href="https://github.com/gobwas/glob">gobwas/glob</a>, Copyright © 2016 Sergey Kamardin.</li>
|
||||
<li><a href="https://github.com/golang/groupcache">golang/groupcache</a>, Copyright © 2013 Google Inc.</li>
|
||||
<li><a href="https://github.com/golang/protobuf">golang/protobuf</a>, Copyright © 2010 The Go Authors.</li>
|
||||
<li><a href="https://github.com/gofrs/flock">gofrs/flock</a>, Copyright © 2018-2025, The Gofrs.</li>
|
||||
<li><a href="https://github.com/golang/snappy">golang/snappy</a>, Copyright © 2011 The Snappy-Go Authors.</li>
|
||||
<li><a href="https://github.com/protocolbuffers/protobuf-go">google.golang.org/protobuf</a>, Copyright © 2018 The Go Authors.</li>
|
||||
<li><a href="https://github.com/google/uuid">google/uuid</a>, Copyright © 2009,2014 Google Inc.</li>
|
||||
<li><a href="https://gopkg.in/yaml.v3">gopkg.in/yaml.v3</a>, Copyright © 2025, the gopkg.in/yaml.v3 authors.</li>
|
||||
<li><a href="https://github.com/greatroar/blobloom">greatroar/blobloom</a>, Copyright © 2020-2024 the Blobloom authors.</li>
|
||||
<li><a href="https://github.com/hashicorp/errwrap">hashicorp/errwrap</a>, Copyright © 2014 HashiCorp, Inc.</li>
|
||||
<li><a href="https://github.com/hashicorp/go-multierror">hashicorp/go-multierror</a>, Copyright © 2014 HashiCorp, Inc.</li>
|
||||
<li><a href="https://github.com/hashicorp/golang-lru">hashicorp/golang-lru</a>, Copyright © 2014 HashiCorp, Inc.</li>
|
||||
<li><a href="https://github.com/jackpal/gateway">jackpal/gateway</a>, Copyright © 2010 Jack Palevich.</li>
|
||||
<li><a href="https://github.com/jackpal/go-nat-pmp">jackpal/go-nat-pmp</a>, Copyright 2013 John Howard Palevich.</li>
|
||||
<li><a href="https://github.com/julienschmidt/httprouter">julienschmidt/httprouter</a>, Copyright © 2013, Julien Schmidt.</li>
|
||||
<li><a href="https://github.com/kballard/go-shellquote">kballard/go-shellquote</a>, Copyright © 2014 Kevin Ballard.</li>
|
||||
<li><a href="https://github.com/mattn/go-isatty">mattn/go-isatty</a>, Copyright © Yasuhiro MATSUMOTO.</li>
|
||||
<li><a href="https://github.com/matttproud/golang_protobuf_extensions">matttproud/golang_protobuf_extensions</a>, Copyright © 2012 Matt T. Proud.</li>
|
||||
<li><a href="https://github.com/oschwald/geoip2-golang">oschwald/geoip2-golang</a>, Copyright © 2015, Gregory J. Oschwald.</li>
|
||||
<li><a href="https://github.com/oschwald/maxminddb-golang">oschwald/maxminddb-golang</a>, Copyright © 2015, Gregory J. Oschwald.</li>
|
||||
<li><a href="https://github.com/petermattis/goid">petermattis/goid</a>, Copyright © 2015-2016 Peter Mattis.</li>
|
||||
<li><a href="https://github.com/miscreant/miscreant.go">miscreant/miscreant.go</a>, Copyright © 2017-2019 The Miscreant Developers.</li>
|
||||
<li><a href="https://github.com/munnerz/goautoneg">munnerz/goautoneg</a>, Copyright © 2011, Open Knowledge Foundation Ltd.</li>
|
||||
<li><a href="https://github.com/pierrec/lz4">pierrec/lz4</a>, Copyright © 2015 Pierre Curto.</li>
|
||||
<li><a href="https://github.com/pkg/errors">pkg/errors</a>, Copyright © 2015, Dave Cheney.</li>
|
||||
<li><a href="https://github.com/pmezard/go-difflib">pmezard/go-difflib</a>, Copyright © 2013, Patrick Mezard.</li>
|
||||
<li><a href="https://github.com/posener/complete">posener/complete</a>, Copyright © 2017 Eyal Posener.</li>
|
||||
<li><a href="https://github.com/prometheus/client_golang">prometheus/client_golang</a>, Copyright 2012-2015 The Prometheus Authors.</li>
|
||||
<li><a href="https://github.com/prometheus/client_model">prometheus/client_model</a>, Copyright © 2025, the prometheus/client_model authors.</li>
|
||||
<li><a href="https://github.com/prometheus/common">prometheus/common</a>, Copyright © 2025, the prometheus/common authors.</li>
|
||||
<li><a href="https://github.com/prometheus/procfs">prometheus/procfs</a>, Copyright © 2025, the prometheus/procfs authors.</li>
|
||||
<li><a href="https://github.com/quic-go/quic-go">quic-go/quic-go</a>, Copyright © 2016 the quic-go authors & Google, Inc.</li>
|
||||
<li><a href="https://github.com/rcrowley/go-metrics">rcrowley/go-metrics</a>, Copyright © 2012 Richard Crowley.</li>
|
||||
<li><a href="https://github.com/sasha-s/go-deadlock">sasha-s/go-deadlock</a>, Copyright © 2016 sasha-s.</li>
|
||||
<li><a href="https://github.com/syncthing/notify">syncthing/notify</a>, Copyright © 2014-2015 The Notify Authors.</li>
|
||||
<li><a href="https://github.com/riywo/loginshell">riywo/loginshell</a>, Copyright © 2019 Ryosuke IWANAGA.</li>
|
||||
<li><a href="https://github.com/russross/blackfriday/v2">russross/blackfriday/v2</a>, Copyright © 2011 Russ Ross.</li>
|
||||
<li><a href="https://github.com/shirou/gopsutil">shirou/gopsutil</a>, Copyright © 2014, WAKAYAMA Shirou.</li>
|
||||
<li><a href="https://github.com/stretchr/objx">stretchr/objx</a>, Copyright © 2014 Stretchr, Inc.</li>
|
||||
<li><a href="https://github.com/stretchr/testify">stretchr/testify</a>, Copyright © 2012-2020 Mat Ryer, Tyler Bunnell and contributors.</li>
|
||||
<li><a href="https://github.com/syndtr/goleveldb">syndtr/goleveldb</a>, Copyright © 2012 Suryandaru Triandana.</li>
|
||||
<li><a href="https://github.com/thejerf/suture">thejerf/suture</a>, Copyright © 2014-2015 Barracuda Networks, Inc.</li>
|
||||
<li><a href="https://github.com/urfave/cli">urfave/cli</a>, Copyright © 2016 Jeremy Saenz & Contributors.</li>
|
||||
<li><a href="https://github.com/tklauser/go-sysconf">tklauser/go-sysconf</a>, Copyright © 2018-2022, Tobias Klauser.</li>
|
||||
<li><a href="https://github.com/tklauser/numcpus">tklauser/numcpus</a>, Copyright © 2018-2024 Tobias Klauser.</li>
|
||||
<li><a href="https://github.com/urfave/cli">urfave/cli</a>, Copyright © 2016 Jeremy Saenz & Contributors.</li>
|
||||
<li><a href="https://github.com/vitrun/qart">vitrun/qart</a>, Copyright © 2010-2011 The Go Authors.</li>
|
||||
<li><a href="https://gopkg.in/asn1-ber.v1">gopkg.in/asn1-ber.v1</a>, Copyright © 2011-2015 Michael Mitton, portions Copyright © 2015-2016 go-asn1-ber Authors.</li>
|
||||
<li><a href="https://gopkg.in/ldap.v2">gopkg.in/ldap.v2</a>, Copyright © 2011-2015 Michael Mitton, portions Copyright © 2015-2016 go-ldap Authors.</li>
|
||||
<li><a href="https://golang.org">The Go Programming Language</a>, Copyright © 2009 The Go Authors.</li>
|
||||
<li>Font Awesome by Dave Gandy - <a href="http://fontawesome.io/">http://fontawesome.io</a></li>
|
||||
<li><a href="https://github.com/willabides/kongplete">willabides/kongplete</a>, Copyright © 2020 WillAbides.</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -16,6 +16,14 @@ angular.module('syncthing.core')
|
||||
},
|
||||
link: function (scope, element, attrs) {
|
||||
|
||||
$(element).on('click', function (event) {
|
||||
const closestTabAnchor = event.target.closest('a[data-toggle="tab"]');
|
||||
|
||||
if (closestTabAnchor && closestTabAnchor.href.includes('#')) {
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
|
||||
// before modal show animation
|
||||
$(element).on('show.bs.modal', function () {
|
||||
|
||||
|
||||
@@ -1,27 +1,39 @@
|
||||
angular.module('syncthing.core')
|
||||
.filter('uncamel', function () {
|
||||
const reservedStrings = [
|
||||
'IDs', 'ID', // substrings must come AFTER longer keywords containing them
|
||||
'URL', 'UR',
|
||||
'API', 'QUIC', 'TCP', 'UDP', 'NAT', 'LAN', 'WAN',
|
||||
'KiB', 'MiB', 'GiB', 'TiB'
|
||||
];
|
||||
return function (input) {
|
||||
input = input.replace(/(.)([A-Z][a-z]+)/g, '$1 $2').replace(/([a-z0-9])([A-Z])/g, '$1 $2');
|
||||
var parts = input.split(' ');
|
||||
var lastPart = parts.splice(-1)[0];
|
||||
if (!input || typeof input !== 'string') return '';
|
||||
const placeholders = {};
|
||||
let counter = 0;
|
||||
reservedStrings.forEach(word => {
|
||||
const placeholder = `__RSV${counter}__`;
|
||||
const re = new RegExp(word, 'g');
|
||||
input = input.replace(re, placeholder);
|
||||
placeholders[placeholder] = word;
|
||||
counter++;
|
||||
});
|
||||
input = input.replace(/([a-z0-9])([A-Z])/g, '$1 $2');
|
||||
Object.entries(placeholders).forEach(([ph, word]) => {
|
||||
input = input.replace(new RegExp(ph, 'g'), ` ${word} `);
|
||||
});
|
||||
let parts = input.split(' ');
|
||||
const lastPart = parts.pop();
|
||||
switch (lastPart) {
|
||||
case "S":
|
||||
parts.push('(seconds)');
|
||||
break;
|
||||
case "M":
|
||||
parts.push('(minutes)');
|
||||
break;
|
||||
case "H":
|
||||
parts.push('(hours)');
|
||||
break;
|
||||
case "Ms":
|
||||
parts.push('(milliseconds)');
|
||||
break;
|
||||
default:
|
||||
parts.push(lastPart);
|
||||
break;
|
||||
case 'S': parts.push('(seconds)'); break;
|
||||
case 'M': parts.push('(minutes)'); break;
|
||||
case 'H': parts.push('(hours)'); break;
|
||||
case 'Ms': parts.push('(milliseconds)'); break;
|
||||
default: parts.push(lastPart); break;
|
||||
}
|
||||
input = parts.join(' ');
|
||||
return input.charAt(0).toUpperCase() + input.slice(1);
|
||||
parts = parts.map(part => {
|
||||
const match = reservedStrings.find(w => w.toUpperCase() === part.toUpperCase());
|
||||
return match || part.charAt(0).toUpperCase() + part.slice(1);
|
||||
});
|
||||
return parts.join(' ').replace(/\s+/g, ' ').trim();
|
||||
};
|
||||
});
|
||||
|
||||
80
internal/blob/azureblob/azureblob.go
Normal file
80
internal/blob/azureblob/azureblob.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
stblob "github.com/syncthing/syncthing/internal/blob"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
)
|
||||
|
||||
var _ stblob.Store = (*BlobStore)(nil)
|
||||
|
||||
type BlobStore struct {
|
||||
client *azblob.Client
|
||||
container string
|
||||
}
|
||||
|
||||
func NewBlobStore(accountName, accountKey, containerName string) (*BlobStore, error) {
|
||||
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := "https://" + accountName + ".blob.core.windows.net/"
|
||||
sc, err := azblob.NewClientWithSharedKeyCredential(url, credential, &azblob.ClientOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This errors when the container already exists, which we ignore.
|
||||
_, _ = sc.CreateContainer(context.Background(), containerName, &container.CreateOptions{})
|
||||
return &BlobStore{
|
||||
client: sc,
|
||||
container: containerName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *BlobStore) Upload(ctx context.Context, key string, data io.Reader) error {
|
||||
_, err := a.client.UploadStream(ctx, a.container, key, data, &blockblob.UploadStreamOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *BlobStore) Download(ctx context.Context, key string, w stblob.Writer) error {
|
||||
resp, err := a.client.DownloadStream(ctx, a.container, key, &blob.DownloadStreamOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
_, err = io.Copy(w, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *BlobStore) LatestKey(ctx context.Context) (string, error) {
|
||||
opts := &azblob.ListBlobsFlatOptions{}
|
||||
pager := a.client.NewListBlobsFlatPager(a.container, opts)
|
||||
var latest string
|
||||
var lastModified time.Time
|
||||
for pager.More() {
|
||||
page, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, blob := range page.Segment.BlobItems {
|
||||
if latest == "" || blob.Properties.LastModified.After(lastModified) {
|
||||
latest = *blob.Name
|
||||
lastModified = *blob.Properties.LastModified
|
||||
}
|
||||
}
|
||||
}
|
||||
return latest, nil
|
||||
}
|
||||
23
internal/blob/interface.go
Normal file
23
internal/blob/interface.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package blob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
Upload(ctx context.Context, key string, r io.Reader) error
|
||||
Download(ctx context.Context, key string, w Writer) error
|
||||
LatestKey(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Writer interface {
|
||||
io.Writer
|
||||
io.WriterAt
|
||||
}
|
||||
@@ -7,6 +7,7 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@@ -15,8 +16,11 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
)
|
||||
|
||||
var _ blob.Store = (*Session)(nil)
|
||||
|
||||
type Session struct {
|
||||
bucket string
|
||||
s3sess *session.Session
|
||||
@@ -26,9 +30,10 @@ type Object = s3.Object
|
||||
|
||||
func NewSession(endpoint, region, bucket, accessKeyID, secretKey string) (*Session, error) {
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Region: aws.String(region),
|
||||
Endpoint: aws.String(endpoint),
|
||||
Credentials: credentials.NewStaticCredentials(accessKeyID, secretKey, ""),
|
||||
Region: aws.String(region),
|
||||
Endpoint: aws.String(endpoint),
|
||||
Credentials: credentials.NewStaticCredentials(accessKeyID, secretKey, ""),
|
||||
S3ForcePathStyle: aws.Bool(true),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -39,7 +44,7 @@ func NewSession(endpoint, region, bucket, accessKeyID, secretKey string) (*Sessi
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Session) Upload(r io.Reader, key string) error {
|
||||
func (s *Session) Upload(_ context.Context, key string, r io.Reader) error {
|
||||
uploader := s3manager.NewUploader(s.s3sess)
|
||||
_, err := uploader.Upload(&s3manager.UploadInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
@@ -49,7 +54,31 @@ func (s *Session) Upload(r io.Reader, key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Session) List(fn func(*Object) bool) error {
|
||||
func (s *Session) Download(_ context.Context, key string, w blob.Writer) error {
|
||||
downloader := s3manager.NewDownloader(s.s3sess)
|
||||
_, err := downloader.Download(w, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Session) LatestKey(_ context.Context) (string, error) {
|
||||
var latestKey string
|
||||
var lastModified time.Time
|
||||
if err := s.list(func(obj *Object) bool {
|
||||
if latestKey == "" || obj.LastModified.After(lastModified) {
|
||||
latestKey = *obj.Key
|
||||
lastModified = *obj.LastModified
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return latestKey, nil
|
||||
}
|
||||
|
||||
func (s *Session) list(fn func(*Object) bool) error {
|
||||
svc := s3.New(s.s3sess)
|
||||
|
||||
opts := &s3.ListObjectsV2Input{
|
||||
@@ -75,27 +104,3 @@ func (s *Session) List(fn func(*Object) bool) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Session) LatestKey() (string, error) {
|
||||
var latestKey string
|
||||
var lastModified time.Time
|
||||
if err := s.List(func(obj *Object) bool {
|
||||
if latestKey == "" || obj.LastModified.After(lastModified) {
|
||||
latestKey = *obj.Key
|
||||
lastModified = *obj.LastModified
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return latestKey, nil
|
||||
}
|
||||
|
||||
func (s *Session) Download(w io.WriterAt, key string) error {
|
||||
downloader := s3manager.NewDownloader(s.s3sess)
|
||||
_, err := downloader.Download(w, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -8,6 +8,7 @@ package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
@@ -24,7 +25,7 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -750,7 +751,7 @@ func (*service) getSystemVersion(w http.ResponseWriter, _ *http.Request) {
|
||||
func (*service) getSystemDebug(w http.ResponseWriter, _ *http.Request) {
|
||||
names := l.Facilities()
|
||||
enabled := l.FacilityDebugging()
|
||||
sort.Strings(enabled)
|
||||
slices.Sort(enabled)
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"facilities": names,
|
||||
"enabled": enabled,
|
||||
@@ -1535,8 +1536,8 @@ func (*service) getLang(w http.ResponseWriter, r *http.Request) {
|
||||
langs = append(langs, code)
|
||||
}
|
||||
// Reorder by descending q value
|
||||
sort.SliceStable(langs, func(i, j int) bool {
|
||||
return weights[langs[i]] > weights[langs[j]]
|
||||
slices.SortStableFunc(langs, func(i, j string) int {
|
||||
return cmp.Compare(weights[j], weights[i])
|
||||
})
|
||||
sendJSON(w, langs)
|
||||
}
|
||||
@@ -1822,8 +1823,8 @@ func browseFiles(ffs fs.Filesystem, search string) []string {
|
||||
}
|
||||
|
||||
// sort to return matches in deterministic order (don't depend on file system order)
|
||||
sort.Strings(exactMatches)
|
||||
sort.Strings(caseInsMatches)
|
||||
slices.Sort(exactMatches)
|
||||
slices.Sort(caseInsMatches)
|
||||
return append(exactMatches, caseInsMatches...)
|
||||
}
|
||||
|
||||
@@ -1920,7 +1921,7 @@ func dirNames(dir string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(dirs)
|
||||
slices.Sort(dirs)
|
||||
return dirs
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
ldap "github.com/go-ldap/ldap/v3"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
)
|
||||
|
||||
@@ -27,15 +28,54 @@ const (
|
||||
randomTokenLength = 64
|
||||
)
|
||||
|
||||
func emitLoginAttempt(success bool, username, address string, evLogger events.Logger) {
|
||||
evLogger.Log(events.LoginAttempt, map[string]interface{}{
|
||||
func emitLoginAttempt(success bool, username string, r *http.Request, evLogger events.Logger) {
|
||||
remoteAddress, proxy := remoteAddress(r)
|
||||
evData := map[string]any{
|
||||
"success": success,
|
||||
"username": username,
|
||||
"remoteAddress": address,
|
||||
})
|
||||
if !success {
|
||||
l.Infof("Wrong credentials supplied during API authorization from %s", address)
|
||||
"remoteAddress": remoteAddress,
|
||||
}
|
||||
if proxy != "" {
|
||||
evData["proxy"] = proxy
|
||||
}
|
||||
evLogger.Log(events.LoginAttempt, evData)
|
||||
|
||||
if success {
|
||||
return
|
||||
}
|
||||
if proxy != "" {
|
||||
l.Infof("Wrong credentials supplied during API authorization from %s proxied by %s", remoteAddress, proxy)
|
||||
} else {
|
||||
l.Infof("Wrong credentials supplied during API authorization from %s", remoteAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func remoteAddress(r *http.Request) (remoteAddr, proxy string) {
|
||||
remoteAddr = r.RemoteAddr
|
||||
remoteIP := osutil.IPFromString(r.RemoteAddr)
|
||||
|
||||
// parse X-Forwarded-For only if the proxy connects via unix socket, localhost or a LAN IP
|
||||
var localProxy bool
|
||||
if remoteIP != nil {
|
||||
remoteAddr = remoteIP.String()
|
||||
localProxy = remoteIP.IsLoopback() || remoteIP.IsPrivate() || remoteIP.IsLinkLocalUnicast()
|
||||
} else if remoteAddr == "@" {
|
||||
localProxy = true
|
||||
}
|
||||
|
||||
if !localProxy {
|
||||
return
|
||||
}
|
||||
|
||||
forwardedAddr, _, _ := strings.Cut(r.Header.Get("X-Forwarded-For"), ",")
|
||||
forwardedAddr = strings.TrimSpace(forwardedAddr)
|
||||
forwardedIP := osutil.IPFromString(forwardedAddr)
|
||||
|
||||
if forwardedIP != nil {
|
||||
proxy = remoteAddr
|
||||
remoteAddr = forwardedIP.String()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func antiBruteForceSleep() {
|
||||
@@ -51,7 +91,7 @@ func forbidden(w http.ResponseWriter) {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}
|
||||
|
||||
func isNoAuthPath(path string) bool {
|
||||
func isNoAuthPath(path string, metricsWithoutAuth bool) bool {
|
||||
// Local variable instead of module var to prevent accidental mutation
|
||||
noAuthPaths := []string{
|
||||
"/",
|
||||
@@ -60,6 +100,10 @@ func isNoAuthPath(path string) bool {
|
||||
"/rest/svc/lang", // Required to load language settings on login page
|
||||
}
|
||||
|
||||
if metricsWithoutAuth {
|
||||
noAuthPaths = append(noAuthPaths, "/metrics")
|
||||
}
|
||||
|
||||
// Local variable instead of module var to prevent accidental mutation
|
||||
noAuthPrefixes := []string{
|
||||
// Static assets
|
||||
@@ -115,7 +159,7 @@ func (m *basicAuthAndSessionMiddleware) ServeHTTP(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Exception for static assets and REST calls that don't require authentication.
|
||||
if isNoAuthPath(r.URL.Path) {
|
||||
if isNoAuthPath(r.URL.Path, m.guiCfg.MetricsWithoutAuth) {
|
||||
m.next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
@@ -148,7 +192,7 @@ func (m *basicAuthAndSessionMiddleware) passwordAuthHandler(w http.ResponseWrite
|
||||
return
|
||||
}
|
||||
|
||||
emitLoginAttempt(false, req.Username, r.RemoteAddr, m.evLogger)
|
||||
emitLoginAttempt(false, req.Username, r, m.evLogger)
|
||||
antiBruteForceSleep()
|
||||
forbidden(w)
|
||||
}
|
||||
@@ -171,7 +215,7 @@ func attemptBasicAuth(r *http.Request, guiCfg config.GUIConfiguration, ldapCfg c
|
||||
return usernameFromIso, true
|
||||
}
|
||||
|
||||
emitLoginAttempt(false, username, r.RemoteAddr, evLogger)
|
||||
emitLoginAttempt(false, username, r, evLogger)
|
||||
antiBruteForceSleep()
|
||||
return "", false
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func (m *csrfManager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if isNoAuthPath(r.URL.Path) {
|
||||
if isNoAuthPath(r.URL.Path, false) {
|
||||
// REST calls that don't require authentication also do not
|
||||
// need a CSRF token.
|
||||
m.next.ServeHTTP(w, r)
|
||||
|
||||
@@ -117,8 +117,8 @@ func (m *tokenManager) saveLocked() {
|
||||
for token, expiry := range m.tokens.Tokens {
|
||||
tokens = append(tokens, tokenExpiry{token, expiry})
|
||||
}
|
||||
slices.SortFunc(tokens, func(i, j tokenExpiry) int {
|
||||
return int(i.expiry - j.expiry)
|
||||
slices.SortFunc(tokens, func(a, b tokenExpiry) int {
|
||||
return int(a.expiry - b.expiry)
|
||||
})
|
||||
// Remove the oldest tokens.
|
||||
for _, token := range tokens[:len(tokens)-m.maxItems] {
|
||||
@@ -189,7 +189,7 @@ func (m *tokenCookieManager) createSession(username string, persistent bool, w h
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
emitLoginAttempt(true, username, r.RemoteAddr, m.evLogger)
|
||||
emitLoginAttempt(true, username, r, m.evLogger)
|
||||
}
|
||||
|
||||
func (m *tokenCookieManager) hasValidSession(r *http.Request) bool {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -109,7 +109,7 @@ func TagsList() []string {
|
||||
tags = append(tags, Extra)
|
||||
}
|
||||
|
||||
sort.Strings(tags)
|
||||
slices.Sort(tags)
|
||||
return tags
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -235,6 +235,10 @@ func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
|
||||
func (cfg Configuration) Copy() Configuration {
|
||||
newCfg := cfg
|
||||
|
||||
// Deep copy Defaults
|
||||
newCfg.Defaults.Folder = cfg.Defaults.Folder.Copy()
|
||||
newCfg.Defaults.Device = cfg.Defaults.Device.Copy()
|
||||
|
||||
// Deep copy FolderConfigurations
|
||||
newCfg.Folders = make([]FolderConfiguration, len(cfg.Folders))
|
||||
for i := range newCfg.Folders {
|
||||
@@ -333,8 +337,8 @@ func (cfg *Configuration) prepareDeviceList() map[protocol.DeviceID]*DeviceConfi
|
||||
// - sorted by ID
|
||||
// Happen before preparting folders as that needs a correct device list.
|
||||
cfg.Devices = ensureNoDuplicateOrEmptyIDDevices(cfg.Devices)
|
||||
sort.Slice(cfg.Devices, func(a, b int) bool {
|
||||
return cfg.Devices[a].DeviceID.Compare(cfg.Devices[b].DeviceID) == -1
|
||||
slices.SortFunc(cfg.Devices, func(a, b DeviceConfiguration) int {
|
||||
return a.DeviceID.Compare(b.DeviceID)
|
||||
})
|
||||
|
||||
// Build a list of available devices
|
||||
@@ -376,8 +380,8 @@ func (cfg *Configuration) prepareFolders(myID protocol.DeviceID, existingDevices
|
||||
}
|
||||
}
|
||||
// Ensure that the folder list is sorted by ID
|
||||
sort.Slice(cfg.Folders, func(a, b int) bool {
|
||||
return cfg.Folders[a].ID < cfg.Folders[b].ID
|
||||
slices.SortFunc(cfg.Folders, func(a, b FolderConfiguration) int {
|
||||
return strings.Compare(a.ID, b.ID)
|
||||
})
|
||||
return sharedFolders, nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -92,6 +92,8 @@ func TestDefaultValues(t *testing.T) {
|
||||
RawStunServers: []string{"default"},
|
||||
AnnounceLANAddresses: true,
|
||||
FeatureFlags: []string{},
|
||||
AuditEnabled: false,
|
||||
AuditFile: "",
|
||||
ConnectionPriorityTCPLAN: 10,
|
||||
ConnectionPriorityQUICLAN: 20,
|
||||
ConnectionPriorityTCPWAN: 30,
|
||||
@@ -295,6 +297,8 @@ func TestOverriddenValues(t *testing.T) {
|
||||
StunKeepaliveMinS: 900,
|
||||
RawStunServers: []string{"foo"},
|
||||
FeatureFlags: []string{"feature"},
|
||||
AuditEnabled: true,
|
||||
AuditFile: "nggyu",
|
||||
ConnectionPriorityTCPLAN: 40,
|
||||
ConnectionPriorityQUICLAN: 45,
|
||||
ConnectionPriorityTCPWAN: 50,
|
||||
@@ -907,7 +911,7 @@ func TestV14ListenAddressesMigration(t *testing.T) {
|
||||
t.Error("Configuration was not converted")
|
||||
}
|
||||
|
||||
sort.Strings(tc[2])
|
||||
slices.Sort(tc[2])
|
||||
if !reflect.DeepEqual(cfg.Options.RawListenAddresses, tc[2]) {
|
||||
t.Errorf("Migration error; actual %#v != expected %#v", cfg.Options.RawListenAddresses, tc[2])
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
@@ -100,8 +100,8 @@ func sortedObservedFolderSlice(input map[string]ObservedFolder) []ObservedFolder
|
||||
for _, folder := range input {
|
||||
output = append(output, folder)
|
||||
}
|
||||
sort.Slice(output, func(i, j int) bool {
|
||||
return output[i].Time.Before(output[j].Time)
|
||||
slices.SortFunc(output, func(a, b ObservedFolder) int {
|
||||
return a.Time.Compare(b.Time)
|
||||
})
|
||||
return output
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -291,8 +291,8 @@ func (f *FolderConfiguration) prepare(myID protocol.DeviceID, existingDevices ma
|
||||
f.Devices = ensureDevicePresent(f.Devices, myID)
|
||||
f.Devices = ensureNoUntrustedTrustingSharing(f, f.Devices, existingDevices)
|
||||
|
||||
sort.Slice(f.Devices, func(a, b int) bool {
|
||||
return f.Devices[a].DeviceID.Compare(f.Devices[b].DeviceID) == -1
|
||||
slices.SortFunc(f.Devices, func(a, b FolderDeviceConfiguration) int {
|
||||
return a.DeviceID.Compare(b.DeviceID)
|
||||
})
|
||||
|
||||
if f.RescanIntervalS > MaxRescanIntervalS {
|
||||
|
||||
@@ -25,6 +25,7 @@ type GUIConfiguration struct {
|
||||
User string `json:"user" xml:"user,omitempty"`
|
||||
Password string `json:"password" xml:"password,omitempty"`
|
||||
AuthMode AuthMode `json:"authMode" xml:"authMode,omitempty"`
|
||||
MetricsWithoutAuth bool `json:"metricsWithoutAuth" xml:"metricsWithoutAuth" default:"false"`
|
||||
RawUseTLS bool `json:"useTLS" xml:"tls,attr"`
|
||||
APIKey string `json:"apiKey" xml:"apikey,omitempty"`
|
||||
InsecureAdminAccess bool `json:"insecureAdminAccess" xml:"insecureAdminAccess,omitempty"`
|
||||
|
||||
62
lib/config/metrics.go
Normal file
62
lib/config/metrics.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// RegisterInfoMetrics registers Prometheus metrics for the given config
|
||||
// wrapper.
|
||||
func RegisterInfoMetrics(cfg Wrapper) {
|
||||
prometheus.DefaultRegisterer.MustRegister(prometheus.CollectorFunc((&folderInfoMetric{cfg}).Collect))
|
||||
prometheus.DefaultRegisterer.MustRegister(prometheus.CollectorFunc((&folderDeviceMetric{cfg}).Collect))
|
||||
}
|
||||
|
||||
type folderInfoMetric struct {
|
||||
cfg Wrapper
|
||||
}
|
||||
|
||||
var folderInfoMetricDesc = prometheus.NewDesc(
|
||||
"syncthing_config_folder_info",
|
||||
"Provides additional information labels on folders",
|
||||
[]string{"folder", "label", "type", "path", "paused"},
|
||||
nil,
|
||||
)
|
||||
|
||||
func (m *folderInfoMetric) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, folder := range m.cfg.FolderList() {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
folderInfoMetricDesc,
|
||||
prometheus.GaugeValue, 1,
|
||||
folder.ID, folder.Label, folder.Type.String(), folder.Path, strconv.FormatBool(folder.Paused),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
type folderDeviceMetric struct {
|
||||
cfg Wrapper
|
||||
}
|
||||
|
||||
var folderDeviceMetricDesc = prometheus.NewDesc(
|
||||
"syncthing_config_device_info",
|
||||
"Provides additional information labels on devices",
|
||||
[]string{"device", "name", "introducer", "paused", "untrusted"},
|
||||
nil,
|
||||
)
|
||||
|
||||
func (m *folderDeviceMetric) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, device := range m.cfg.DeviceList() {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
folderDeviceMetricDesc,
|
||||
prometheus.GaugeValue, 1,
|
||||
device.DeviceID.String(), device.Name, strconv.FormatBool(device.Introducer), strconv.FormatBool(device.Paused), strconv.FormatBool(device.Untrusted),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -7,11 +7,12 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -65,8 +66,8 @@ type migrationSet []migration
|
||||
func (ms migrationSet) apply(cfg *Configuration) {
|
||||
// Make sure we apply the migrations in target version order regardless
|
||||
// of how it was defined.
|
||||
sort.Slice(ms, func(a, b int) bool {
|
||||
return ms[a].targetVersion < ms[b].targetVersion
|
||||
slices.SortFunc(ms, func(a, b migration) int {
|
||||
return cmp.Compare(a.targetVersion, b.targetVersion)
|
||||
})
|
||||
|
||||
// Apply all migrations.
|
||||
@@ -349,7 +350,7 @@ func migrateToConfigV14(cfg *Configuration) {
|
||||
cfg.Options.DeprecatedRelayServers = nil
|
||||
|
||||
// For consistency
|
||||
sort.Strings(cfg.Options.RawListenAddresses)
|
||||
slices.Sort(cfg.Options.RawListenAddresses)
|
||||
|
||||
var newAddrs []string
|
||||
for _, addr := range cfg.Options.RawGlobalAnnServers {
|
||||
|
||||
@@ -68,22 +68,21 @@ type OptionsConfiguration struct {
|
||||
AnnounceLANAddresses bool `json:"announceLANAddresses" xml:"announceLANAddresses" default:"true"`
|
||||
SendFullIndexOnUpgrade bool `json:"sendFullIndexOnUpgrade" xml:"sendFullIndexOnUpgrade"`
|
||||
FeatureFlags []string `json:"featureFlags" xml:"featureFlag"`
|
||||
AuditEnabled bool `json:"auditEnabled" xml:"auditEnabled" default:"false" restart:"true"`
|
||||
AuditFile string `json:"auditFile" xml:"auditFile" restart:"true"`
|
||||
// The number of connections at which we stop trying to connect to more
|
||||
// devices, zero meaning no limit. Does not affect incoming connections.
|
||||
ConnectionLimitEnough int `json:"connectionLimitEnough" xml:"connectionLimitEnough"`
|
||||
// The maximum number of connections which we will allow in total, zero
|
||||
// meaning no limit. Affects incoming connections and prevents
|
||||
// attempting outgoing connections.
|
||||
ConnectionLimitMax int `json:"connectionLimitMax" xml:"connectionLimitMax"`
|
||||
// When set, this allows TLS 1.2 on sync connections, where we otherwise
|
||||
// default to TLS 1.3+ only.
|
||||
InsecureAllowOldTLSVersions bool `json:"insecureAllowOldTLSVersions" xml:"insecureAllowOldTLSVersions"`
|
||||
ConnectionPriorityTCPLAN int `json:"connectionPriorityTcpLan" xml:"connectionPriorityTcpLan" default:"10"`
|
||||
ConnectionPriorityQUICLAN int `json:"connectionPriorityQuicLan" xml:"connectionPriorityQuicLan" default:"20"`
|
||||
ConnectionPriorityTCPWAN int `json:"connectionPriorityTcpWan" xml:"connectionPriorityTcpWan" default:"30"`
|
||||
ConnectionPriorityQUICWAN int `json:"connectionPriorityQuicWan" xml:"connectionPriorityQuicWan" default:"40"`
|
||||
ConnectionPriorityRelay int `json:"connectionPriorityRelay" xml:"connectionPriorityRelay" default:"50"`
|
||||
ConnectionPriorityUpgradeThreshold int `json:"connectionPriorityUpgradeThreshold" xml:"connectionPriorityUpgradeThreshold" default:"0"`
|
||||
ConnectionLimitMax int `json:"connectionLimitMax" xml:"connectionLimitMax"`
|
||||
ConnectionPriorityTCPLAN int `json:"connectionPriorityTcpLan" xml:"connectionPriorityTcpLan" default:"10"`
|
||||
ConnectionPriorityQUICLAN int `json:"connectionPriorityQuicLan" xml:"connectionPriorityQuicLan" default:"20"`
|
||||
ConnectionPriorityTCPWAN int `json:"connectionPriorityTcpWan" xml:"connectionPriorityTcpWan" default:"30"`
|
||||
ConnectionPriorityQUICWAN int `json:"connectionPriorityQuicWan" xml:"connectionPriorityQuicWan" default:"40"`
|
||||
ConnectionPriorityRelay int `json:"connectionPriorityRelay" xml:"connectionPriorityRelay" default:"50"`
|
||||
ConnectionPriorityUpgradeThreshold int `json:"connectionPriorityUpgradeThreshold" xml:"connectionPriorityUpgradeThreshold" default:"0"`
|
||||
// Legacy deprecated
|
||||
DeprecatedUPnPEnabled bool `json:"-" xml:"upnpEnabled,omitempty"` // Deprecated: Do not use.
|
||||
DeprecatedUPnPLeaseM int `json:"-" xml:"upnpLeaseMinutes,omitempty"` // Deprecated: Do not use.
|
||||
@@ -188,7 +187,7 @@ func (opts OptionsConfiguration) StunServers() []string {
|
||||
case "default":
|
||||
_, records, err := net.LookupSRV("stun", "udp", "syncthing.net")
|
||||
if err != nil {
|
||||
l.Warnln("Unable to resolve primary STUN servers via DNS:", err)
|
||||
l.Debugf("Unable to resolve primary STUN servers via DNS:", err)
|
||||
}
|
||||
|
||||
for _, record := range records {
|
||||
|
||||
2
lib/config/testdata/overridenvalues.xml
vendored
2
lib/config/testdata/overridenvalues.xml
vendored
@@ -45,6 +45,8 @@
|
||||
<unackedNotificationID>asdfasdf</unackedNotificationID>
|
||||
<announceLANAddresses>false</announceLANAddresses>
|
||||
<featureFlag>feature</featureFlag>
|
||||
<auditEnabled>true</auditEnabled>
|
||||
<auditFile>nggyu</auditFile>
|
||||
<connectionPriorityTcpLan>40</connectionPriorityTcpLan>
|
||||
<connectionPriorityQuicLan>45</connectionPriorityQuicLan>
|
||||
<connectionPriorityTcpWan>50</connectionPriorityTcpWan>
|
||||
|
||||
@@ -9,7 +9,8 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/structutil"
|
||||
)
|
||||
@@ -84,8 +85,8 @@ func (c *VersioningConfiguration) toInternal() internalVersioningConfiguration {
|
||||
for k, v := range c.Params {
|
||||
tmp.Params = append(tmp.Params, internalParam{k, v})
|
||||
}
|
||||
sort.Slice(tmp.Params, func(a, b int) bool {
|
||||
return tmp.Params[a].Key < tmp.Params[b].Key
|
||||
slices.SortFunc(tmp.Params, func(a, b internalParam) int {
|
||||
return strings.Compare(a.Key, b.Key)
|
||||
})
|
||||
return tmp
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
@@ -445,7 +444,7 @@ func (s *service) handleHellos(ctx context.Context) error {
|
||||
// connections are limited.
|
||||
rd, wr := s.limiter.getLimiters(remoteID, c, c.IsLocal())
|
||||
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, c, s.model, c, deviceCfg.Compression.ToProtocol(), s.cfg.FolderPasswords(remoteID), s.keyGen)
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, c, s.model, c, deviceCfg.Compression.ToProtocol(), s.keyGen)
|
||||
s.accountAddedConnection(protoConn, hello, s.cfg.Options().ConnectionPriorityUpgradeThreshold)
|
||||
go func() {
|
||||
<-protoConn.Closed()
|
||||
@@ -1151,7 +1150,7 @@ func (s *service) dialParallel(ctx context.Context, deviceID protocol.DeviceID,
|
||||
}
|
||||
|
||||
// Sort the priorities so that we dial lowest first (which means highest...)
|
||||
sort.Ints(priorities)
|
||||
slices.Sort(priorities)
|
||||
|
||||
sema := semaphore.MultiSemaphore{semaphore.New(dialMaxParallelPerDevice), parentSema}
|
||||
for _, prio := range priorities {
|
||||
|
||||
@@ -8,7 +8,7 @@ package db
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"sort"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
@@ -71,8 +71,8 @@ func TestMetaDevices(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that we got the two devices we expect
|
||||
sort.Slice(devs, func(a, b int) bool {
|
||||
return devs[a].Compare(devs[b]) == -1
|
||||
slices.SortFunc(devs, func(a, b protocol.DeviceID) int {
|
||||
return a.Compare(b)
|
||||
})
|
||||
if devs[0] != d1 {
|
||||
t.Error("first device should be d1")
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -102,16 +103,8 @@ func needList(t testing.TB, s *db.FileSet, n protocol.DeviceID) []protocol.FileI
|
||||
|
||||
type fileList []protocol.FileInfo
|
||||
|
||||
func (l fileList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileList) Less(a, b int) bool {
|
||||
return l[a].Name < l[b].Name
|
||||
}
|
||||
|
||||
func (l fileList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
func compareByName(a, b protocol.FileInfo) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
}
|
||||
|
||||
func (l fileList) String() string {
|
||||
@@ -218,7 +211,7 @@ func TestGlobalSet(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
g := fileList(globalList(t, m))
|
||||
sort.Sort(g)
|
||||
slices.SortFunc(g, compareByName)
|
||||
|
||||
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
||||
@@ -255,7 +248,7 @@ func TestGlobalSet(t *testing.T) {
|
||||
}
|
||||
|
||||
h := fileList(haveList(t, m, protocol.LocalDeviceID))
|
||||
sort.Sort(h)
|
||||
slices.SortFunc(h, compareByName)
|
||||
|
||||
if fmt.Sprint(h) != fmt.Sprint(localTot) {
|
||||
t.Errorf("Have incorrect (local);\n A: %v !=\n E: %v", h, localTot)
|
||||
@@ -292,14 +285,14 @@ func TestGlobalSet(t *testing.T) {
|
||||
}
|
||||
|
||||
h = fileList(haveList(t, m, remoteDevice0))
|
||||
sort.Sort(h)
|
||||
slices.SortFunc(h, compareByName)
|
||||
|
||||
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
|
||||
t.Errorf("Have incorrect (remote);\n A: %v !=\n E: %v", h, remoteTot)
|
||||
}
|
||||
|
||||
n := fileList(needList(t, m, protocol.LocalDeviceID))
|
||||
sort.Sort(n)
|
||||
slices.SortFunc(n, compareByName)
|
||||
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
|
||||
t.Errorf("Need incorrect (local);\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
||||
@@ -308,7 +301,7 @@ func TestGlobalSet(t *testing.T) {
|
||||
checkNeed(t, m, protocol.LocalDeviceID, expectedLocalNeed)
|
||||
|
||||
n = fileList(needList(t, m, remoteDevice0))
|
||||
sort.Sort(n)
|
||||
slices.SortFunc(n, compareByName)
|
||||
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
|
||||
t.Errorf("Need incorrect (remote);\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
||||
@@ -428,14 +421,14 @@ func TestGlobalSet(t *testing.T) {
|
||||
check()
|
||||
|
||||
h := fileList(haveList(t, m, remoteDevice1))
|
||||
sort.Sort(h)
|
||||
slices.SortFunc(h, compareByName)
|
||||
|
||||
if fmt.Sprint(h) != fmt.Sprint(secRemote) {
|
||||
t.Errorf("Have incorrect (secRemote);\n A: %v !=\n E: %v", h, secRemote)
|
||||
}
|
||||
|
||||
n := fileList(needList(t, m, remoteDevice1))
|
||||
sort.Sort(n)
|
||||
slices.SortFunc(n, compareByName)
|
||||
|
||||
if fmt.Sprint(n) != fmt.Sprint(expectedSecRemoteNeed) {
|
||||
t.Errorf("Need incorrect (secRemote);\n A: %v !=\n E: %v", n, expectedSecRemoteNeed)
|
||||
@@ -475,7 +468,7 @@ func TestNeedWithInvalid(t *testing.T) {
|
||||
replace(s, remoteDevice1, remote1Have)
|
||||
|
||||
need := fileList(needList(t, s, protocol.LocalDeviceID))
|
||||
sort.Sort(need)
|
||||
slices.SortFunc(need, compareByName)
|
||||
|
||||
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, expectedNeed)
|
||||
@@ -503,7 +496,7 @@ func TestUpdateToInvalid(t *testing.T) {
|
||||
replace(s, protocol.LocalDeviceID, localHave)
|
||||
|
||||
have := fileList(haveList(t, s, protocol.LocalDeviceID))
|
||||
sort.Sort(have)
|
||||
slices.SortFunc(have, compareByName)
|
||||
|
||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||
@@ -519,8 +512,8 @@ func TestUpdateToInvalid(t *testing.T) {
|
||||
|
||||
s.Update(protocol.LocalDeviceID, append(fileList{}, localHave[1], localHave[4]))
|
||||
|
||||
have = fileList(haveList(t, s, protocol.LocalDeviceID))
|
||||
sort.Sort(have)
|
||||
have = haveList(t, s, protocol.LocalDeviceID)
|
||||
slices.SortFunc(have, compareByName)
|
||||
|
||||
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||
@@ -605,7 +598,7 @@ func TestGlobalReset(t *testing.T) {
|
||||
|
||||
replace(m, protocol.LocalDeviceID, local)
|
||||
g := globalList(t, m)
|
||||
sort.Sort(fileList(g))
|
||||
slices.SortFunc(g, compareByName)
|
||||
|
||||
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
|
||||
t.Errorf("Global incorrect;\nglobal: %v\n!=\nlocal: %v\ndiff:\n%s", g, local, diff)
|
||||
@@ -615,7 +608,7 @@ func TestGlobalReset(t *testing.T) {
|
||||
replace(m, remoteDevice0, nil)
|
||||
|
||||
g = globalList(t, m)
|
||||
sort.Sort(fileList(g))
|
||||
slices.SortFunc(g, compareByName)
|
||||
|
||||
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
|
||||
t.Errorf("Global incorrect;\nglobal: %v\n!=\nlocal: %v\ndiff:\n%s", g, local, diff)
|
||||
@@ -653,8 +646,8 @@ func TestNeed(t *testing.T) {
|
||||
|
||||
need := needList(t, m, protocol.LocalDeviceID)
|
||||
|
||||
sort.Sort(fileList(need))
|
||||
sort.Sort(fileList(shouldNeed))
|
||||
slices.SortFunc(need, compareByName)
|
||||
slices.SortFunc(shouldNeed, compareByName)
|
||||
|
||||
if fmt.Sprint(need) != fmt.Sprint(shouldNeed) {
|
||||
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
|
||||
|
||||
@@ -8,7 +8,7 @@ package db
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -147,6 +147,6 @@ func (i *smallIndex) Values() []string {
|
||||
}
|
||||
i.mut.Unlock()
|
||||
|
||||
sort.Strings(vals)
|
||||
slices.Sort(vals)
|
||||
return vals
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/thejerf/suture/v4"
|
||||
@@ -159,7 +159,7 @@ func (m *manager) Lookup(ctx context.Context, deviceID protocol.DeviceID) (addre
|
||||
m.mut.RUnlock()
|
||||
|
||||
addresses = stringutil.UniqueTrimmedStrings(addresses)
|
||||
sort.Strings(addresses)
|
||||
slices.Sort(addresses)
|
||||
|
||||
l.Debugln("lookup results for", deviceID)
|
||||
l.Debugln(" addresses: ", addresses)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -218,7 +218,7 @@ func TestDirNames(t *testing.T) {
|
||||
"a",
|
||||
"bC",
|
||||
}
|
||||
sort.Strings(testCases)
|
||||
slices.Sort(testCases)
|
||||
|
||||
for _, sub := range testCases {
|
||||
if err := os.Mkdir(filepath.Join(dir, sub), 0o777); err != nil {
|
||||
@@ -229,7 +229,7 @@ func TestDirNames(t *testing.T) {
|
||||
if dirs, err := fs.DirNames("."); err != nil || len(dirs) != len(testCases) {
|
||||
t.Errorf("%s %s %s", err, dirs, testCases)
|
||||
} else {
|
||||
sort.Strings(dirs)
|
||||
slices.Sort(dirs)
|
||||
for i := range dirs {
|
||||
if dirs[i] != testCases[i] {
|
||||
t.Errorf("%s != %s", dirs[i], testCases[i])
|
||||
@@ -321,8 +321,8 @@ func TestGlob(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
results, err := fs.Glob(testCase.pattern)
|
||||
sort.Strings(results)
|
||||
sort.Strings(testCase.matches)
|
||||
slices.Sort(results)
|
||||
slices.Sort(testCase.matches)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -628,8 +628,7 @@ func TestXattr(t *testing.T) {
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
sort.Slice(attrs, func(i, j int) bool { return attrs[i].Name < attrs[j].Name })
|
||||
|
||||
slices.SortFunc(attrs, func(a, b protocol.Xattr) int { return strings.Compare(a.Name, b.Name) })
|
||||
// Set the xattrs, read them back and compare
|
||||
if err := tfs.SetXattr("/test", attrs, testXattrFilter{}); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -12,7 +12,7 @@ package fs
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -69,7 +69,7 @@ func listXattr(path string) ([]string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(attrs)
|
||||
slices.Sort(attrs)
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ package fs
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -38,6 +38,6 @@ func listXattr(path string) ([]string, error) {
|
||||
buf = buf[:size]
|
||||
attrs := compact(strings.Split(string(buf), "\x00"))
|
||||
|
||||
sort.Strings(attrs)
|
||||
slices.Sort(attrs)
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -344,7 +344,7 @@ func fakefsForBenchmark(nfiles int, latency time.Duration) (Filesystem, []string
|
||||
return nil, nil, errors.New("didn't find enough stuff")
|
||||
}
|
||||
|
||||
sort.Strings(paths)
|
||||
slices.Sort(paths)
|
||||
|
||||
return fsys, paths, nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -369,8 +369,8 @@ func assertDir(t *testing.T, fs Filesystem, directory string, filenames []string
|
||||
if path.Clean(directory) == "/" {
|
||||
filenames = append(filenames, ".stfolder")
|
||||
}
|
||||
sort.Strings(filenames)
|
||||
sort.Strings(got)
|
||||
slices.Sort(filenames)
|
||||
slices.Sort(got)
|
||||
|
||||
if len(filenames) != len(got) {
|
||||
t.Errorf("want %s, got %s", filenames, got)
|
||||
|
||||
@@ -33,6 +33,7 @@ const (
|
||||
AuditLog LocationEnum = "auditLog"
|
||||
GUIAssets LocationEnum = "guiAssets"
|
||||
DefFolder LocationEnum = "defFolder"
|
||||
LockFile LocationEnum = "lockFile"
|
||||
)
|
||||
|
||||
type BaseDirEnum string
|
||||
@@ -124,6 +125,7 @@ var locationTemplates = map[LocationEnum]string{
|
||||
AuditLog: "${data}/audit-%{timestamp}.log",
|
||||
GUIAssets: "${config}/gui",
|
||||
DefFolder: "${userHome}/Sync",
|
||||
LockFile: "${data}/syncthing.lock",
|
||||
}
|
||||
|
||||
var locations = make(map[LocationEnum]string)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
@@ -52,8 +52,8 @@ type standardBlockPullReorderer struct {
|
||||
|
||||
func newStandardBlockPullReorderer(id protocol.DeviceID, otherDevices []protocol.DeviceID) *standardBlockPullReorderer {
|
||||
allDevices := append(otherDevices, id)
|
||||
sort.Slice(allDevices, func(i, j int) bool {
|
||||
return allDevices[i].Compare(allDevices[j]) == -1
|
||||
slices.SortFunc(allDevices, func(a, b protocol.DeviceID) int {
|
||||
return a.Compare(b)
|
||||
})
|
||||
// Find our index
|
||||
myIndex := -1
|
||||
|
||||
@@ -8,7 +8,7 @@ package model
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
@@ -65,8 +65,8 @@ func Test_inOrderBlockPullReorderer_Reorder(t *testing.T) {
|
||||
func Test_standardBlockPullReorderer_Reorder(t *testing.T) {
|
||||
// Order the devices, so we know their ordering ahead of time.
|
||||
devices := []protocol.DeviceID{myID, device1, device2}
|
||||
sort.Slice(devices, func(i, j int) bool {
|
||||
return devices[i].Compare(devices[j]) == -1
|
||||
slices.SortFunc(devices, func(a, b protocol.DeviceID) int {
|
||||
return a.Compare(b)
|
||||
})
|
||||
|
||||
blocks := func(i ...int) []protocol.BlockInfo {
|
||||
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -1200,7 +1201,9 @@ func (f *folder) Errors() []FileError {
|
||||
errors := make([]FileError, scanLen+len(f.pullErrors))
|
||||
copy(errors[:scanLen], f.scanErrors)
|
||||
copy(errors[scanLen:], f.pullErrors)
|
||||
sort.Sort(fileErrorList(errors))
|
||||
slices.SortFunc(errors, func(a, b FileError) int {
|
||||
return strings.Compare(a.Path, b.Path)
|
||||
})
|
||||
return errors
|
||||
}
|
||||
|
||||
@@ -1341,7 +1344,7 @@ func unifySubs(dirs []string, exists func(dir string) bool) []string {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Strings(dirs)
|
||||
slices.Sort(dirs)
|
||||
if dirs[0] == "" || dirs[0] == "." || dirs[0] == string(fs.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
@@ -112,7 +113,9 @@ func (f *receiveEncryptedFolder) revertHandleDirs(dirs []string, snap *db.Snapsh
|
||||
go f.pullScannerRoutine(scanChan)
|
||||
defer close(scanChan)
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(dirs)))
|
||||
slices.SortFunc(dirs, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
for _, dir := range dirs {
|
||||
if err := f.deleteDirOnDisk(dir, snap, scanChan); err != nil {
|
||||
f.newScanError(dir, fmt.Errorf("deleting unexpected dir: %w", err))
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -207,7 +208,9 @@ func (q *deleteQueue) handle(fi protocol.FileInfo, snap *db.Snapshot) (bool, err
|
||||
|
||||
func (q *deleteQueue) flush(snap *db.Snapshot) ([]string, error) {
|
||||
// Process directories from the leaves inward.
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(q.dirs)))
|
||||
slices.SortFunc(q.dirs, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
|
||||
var firstError error
|
||||
var deleted []string
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -1867,7 +1867,9 @@ func (f *sendReceiveFolder) moveForConflict(name, lastModBy string, scanChan cha
|
||||
if f.MaxConflicts > -1 {
|
||||
matches := existingConflicts(name, f.mtimefs)
|
||||
if len(matches) > f.MaxConflicts {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
|
||||
slices.SortFunc(matches, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
for _, match := range matches[f.MaxConflicts:] {
|
||||
if gerr := f.mtimefs.Remove(match); gerr != nil {
|
||||
l.Debugln(f, "removing extra conflict", gerr)
|
||||
@@ -2206,20 +2208,6 @@ type FileError struct {
|
||||
Err string `json:"error"`
|
||||
}
|
||||
|
||||
type fileErrorList []FileError
|
||||
|
||||
func (l fileErrorList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileErrorList) Less(a, b int) bool {
|
||||
return l[a].Path < l[b].Path
|
||||
}
|
||||
|
||||
func (l fileErrorList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func conflictName(name, lastModBy string) string {
|
||||
ext := filepath.Ext(name)
|
||||
return name[:len(name)-len(ext)] + time.Now().Format(".sync-conflict-20060102-150405-") + lastModBy + ext
|
||||
|
||||
@@ -8,6 +8,7 @@ package model
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
@@ -117,20 +118,11 @@ func unifySubsCases() []unifySubsCase {
|
||||
return cases
|
||||
}
|
||||
|
||||
func unifyExists(f string, tc unifySubsCase) bool {
|
||||
for _, e := range tc.exists {
|
||||
if f == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestUnifySubs(t *testing.T) {
|
||||
cases := unifySubsCases()
|
||||
for i, tc := range cases {
|
||||
exists := func(f string) bool {
|
||||
return unifyExists(f, tc)
|
||||
return slices.Contains(tc.exists, f)
|
||||
}
|
||||
out := unifySubs(tc.in, exists)
|
||||
if diff, equal := messagediff.PrettyDiff(tc.out, out); !equal {
|
||||
@@ -146,7 +138,7 @@ func BenchmarkUnifySubs(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, tc := range cases {
|
||||
exists := func(f string) bool {
|
||||
return unifyExists(f, tc)
|
||||
return slices.Contains(tc.exists, f)
|
||||
}
|
||||
unifySubs(tc.in, exists)
|
||||
}
|
||||
|
||||
@@ -31,17 +31,17 @@ func TestIndexhandlerConcurrency(t *testing.T) {
|
||||
ci := &protomock.ConnectionInfo{}
|
||||
|
||||
m1 := &mocks.Model{}
|
||||
c1 := protocol.NewConnection(protocol.EmptyDeviceID, ar, bw, testutil.NoopCloser{}, m1, ci, protocol.CompressionNever, nil, nil)
|
||||
c1 := protocol.NewConnection(protocol.EmptyDeviceID, ar, bw, testutil.NoopCloser{}, m1, ci, protocol.CompressionNever, nil)
|
||||
c1.Start()
|
||||
defer c1.Close(io.EOF)
|
||||
|
||||
m2 := &mocks.Model{}
|
||||
c2 := protocol.NewConnection(protocol.EmptyDeviceID, br, aw, testutil.NoopCloser{}, m2, ci, protocol.CompressionNever, nil, nil)
|
||||
c2 := protocol.NewConnection(protocol.EmptyDeviceID, br, aw, testutil.NoopCloser{}, m2, ci, protocol.CompressionNever, nil)
|
||||
c2.Start()
|
||||
defer c2.Close(io.EOF)
|
||||
|
||||
c1.ClusterConfig(&protocol.ClusterConfig{})
|
||||
c2.ClusterConfig(&protocol.ClusterConfig{})
|
||||
c1.ClusterConfig(&protocol.ClusterConfig{}, nil)
|
||||
c2.ClusterConfig(&protocol.ClusterConfig{}, nil)
|
||||
c1.Index(ctx, &protocol.Index{Folder: "foo"})
|
||||
c2.Index(ctx, &protocol.Index{Folder: "foo"})
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
stdsync "sync"
|
||||
"sync/atomic"
|
||||
@@ -1628,8 +1629,7 @@ func (m *model) sendClusterConfig(ids []protocol.DeviceID) {
|
||||
// Generating cluster-configs acquires the mutex.
|
||||
for _, conn := range ccConns {
|
||||
cm, passwords := m.generateClusterConfig(conn.DeviceID())
|
||||
conn.SetFolderPasswords(passwords)
|
||||
go conn.ClusterConfig(cm)
|
||||
go conn.ClusterConfig(cm, passwords)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1804,11 +1804,9 @@ func (m *model) handleAutoAccepts(deviceID protocol.DeviceID, folder protocol.Fo
|
||||
l.Infof("Failed to auto-accept folder %s from %s due to path conflict", folder.Description(), deviceID)
|
||||
return config.FolderConfiguration{}, false
|
||||
} else {
|
||||
for _, device := range cfg.DeviceIDs() {
|
||||
if device == deviceID {
|
||||
// Already shared nothing todo.
|
||||
return config.FolderConfiguration{}, false
|
||||
}
|
||||
if slices.Contains(cfg.DeviceIDs(), deviceID) {
|
||||
// Already shared nothing todo.
|
||||
return config.FolderConfiguration{}, false
|
||||
}
|
||||
if cfg.Type == config.FolderTypeReceiveEncrypted {
|
||||
if len(ccDeviceInfos.remote.EncryptionPasswordToken) == 0 && len(ccDeviceInfos.local.EncryptionPasswordToken) == 0 {
|
||||
@@ -2385,8 +2383,14 @@ func (m *model) scheduleConnectionPromotion() {
|
||||
// be called after adding new connections, and after closing a primary
|
||||
// device connection.
|
||||
func (m *model) promoteConnections() {
|
||||
// Slice of actions to take on connections after releasing the main
|
||||
// mutex. We do this so that we do not perform blocking network actions
|
||||
// inside the loop, and also to avoid a possible deadlock with calling
|
||||
// Start() on connections that are already executing a Close() with a
|
||||
// callback into the model...
|
||||
var postLockActions []func()
|
||||
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
for deviceID, connIDs := range m.deviceConnIDs {
|
||||
cm, passwords := m.generateClusterConfigRLocked(deviceID)
|
||||
@@ -2399,11 +2403,12 @@ func (m *model) promoteConnections() {
|
||||
// on where we get ClusterConfigs from the peer.)
|
||||
conn := m.connections[connIDs[0]]
|
||||
l.Debugf("Promoting connection to %s at %s", deviceID.Short(), conn)
|
||||
if conn.Statistics().StartedAt.IsZero() {
|
||||
conn.SetFolderPasswords(passwords)
|
||||
conn.Start()
|
||||
}
|
||||
conn.ClusterConfig(cm)
|
||||
postLockActions = append(postLockActions, func() {
|
||||
if conn.Statistics().StartedAt.IsZero() {
|
||||
conn.Start()
|
||||
}
|
||||
conn.ClusterConfig(cm, passwords)
|
||||
})
|
||||
m.promotedConnID[deviceID] = connIDs[0]
|
||||
}
|
||||
|
||||
@@ -2412,12 +2417,19 @@ func (m *model) promoteConnections() {
|
||||
for _, connID := range connIDs[1:] {
|
||||
conn := m.connections[connID]
|
||||
if conn.Statistics().StartedAt.IsZero() {
|
||||
conn.SetFolderPasswords(passwords)
|
||||
conn.Start()
|
||||
conn.ClusterConfig(&protocol.ClusterConfig{Secondary: true})
|
||||
postLockActions = append(postLockActions, func() {
|
||||
conn.Start()
|
||||
conn.ClusterConfig(&protocol.ClusterConfig{Secondary: true}, passwords)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.mut.Unlock()
|
||||
|
||||
for _, action := range postLockActions {
|
||||
action()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *model) DownloadProgress(conn protocol.Connection, p *protocol.DownloadProgress) error {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -1253,10 +1253,8 @@ func TestAutoAcceptPausedWhenFolderConfigChanged(t *testing.T) {
|
||||
} else if fcfg.Path != idOther {
|
||||
t.Error("folder path changed")
|
||||
} else {
|
||||
for _, dev := range fcfg.DeviceIDs() {
|
||||
if dev == device1 {
|
||||
return
|
||||
}
|
||||
if slices.Contains(fcfg.DeviceIDs(), device1) {
|
||||
return
|
||||
}
|
||||
t.Error("device missing")
|
||||
}
|
||||
@@ -1302,10 +1300,8 @@ func TestAutoAcceptPausedWhenFolderConfigNotChanged(t *testing.T) {
|
||||
} else if fcfg.Path != idOther {
|
||||
t.Error("folder path changed")
|
||||
} else {
|
||||
for _, dev := range fcfg.DeviceIDs() {
|
||||
if dev == device1 {
|
||||
return
|
||||
}
|
||||
if slices.Contains(fcfg.DeviceIDs(), device1) {
|
||||
return
|
||||
}
|
||||
t.Error("device missing")
|
||||
}
|
||||
@@ -2973,7 +2969,7 @@ func TestConnCloseOnRestart(t *testing.T) {
|
||||
nw := &testutil.NoopRW{}
|
||||
ci := &protocolmocks.ConnectionInfo{}
|
||||
ci.ConnectionIDReturns(srand.String(16))
|
||||
m.AddConnection(protocol.NewConnection(device1, br, nw, testutil.NoopCloser{}, m, ci, protocol.CompressionNever, nil, m.keyGen), protocol.Hello{})
|
||||
m.AddConnection(protocol.NewConnection(device1, br, nw, testutil.NoopCloser{}, m, ci, protocol.CompressionNever, m.keyGen), protocol.Hello{})
|
||||
m.mut.RLock()
|
||||
if len(m.closed) != 1 {
|
||||
t.Fatalf("Expected just one conn (len(m.closed) == %v)", len(m.closed))
|
||||
@@ -3632,11 +3628,11 @@ func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSec
|
||||
cc1 := make(chan struct{}, 1)
|
||||
cc2 := make(chan struct{}, 1)
|
||||
fc1 := newFakeConnection(device1, m)
|
||||
fc1.ClusterConfigCalls(func(_ *protocol.ClusterConfig) {
|
||||
fc1.ClusterConfigCalls(func(_ *protocol.ClusterConfig, _ map[string]string) {
|
||||
cc1 <- struct{}{}
|
||||
})
|
||||
fc2 := newFakeConnection(device2, m)
|
||||
fc2.ClusterConfigCalls(func(_ *protocol.ClusterConfig) {
|
||||
fc2.ClusterConfigCalls(func(_ *protocol.ClusterConfig, _ map[string]string) {
|
||||
cc2 <- struct{}{}
|
||||
})
|
||||
m.AddConnection(fc1, protocol.Hello{})
|
||||
@@ -4095,8 +4091,8 @@ func equalStringsInAnyOrder(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(a)
|
||||
sort.Strings(b)
|
||||
slices.Sort(a)
|
||||
slices.Sort(b)
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"cmp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
@@ -157,40 +158,34 @@ func (q *jobQueue) SortSmallestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(smallestFirst(q.queued))
|
||||
slices.SortFunc(q.queued, func(a, b jobQueueEntry) int {
|
||||
return cmp.Compare(a.size, b.size)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortLargestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(sort.Reverse(smallestFirst(q.queued)))
|
||||
slices.SortFunc(q.queued, func(a, b jobQueueEntry) int {
|
||||
return cmp.Compare(b.size, a.size)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortOldestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(oldestFirst(q.queued))
|
||||
slices.SortFunc(q.queued, func(a, b jobQueueEntry) int {
|
||||
return cmp.Compare(a.modified, b.modified)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortNewestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(sort.Reverse(oldestFirst(q.queued)))
|
||||
slices.SortFunc(q.queued, func(a, b jobQueueEntry) int {
|
||||
return cmp.Compare(b.modified, a.modified)
|
||||
})
|
||||
}
|
||||
|
||||
// The usual sort.Interface boilerplate
|
||||
|
||||
type smallestFirst []jobQueueEntry
|
||||
|
||||
func (q smallestFirst) Len() int { return len(q) }
|
||||
func (q smallestFirst) Less(a, b int) bool { return q[a].size < q[b].size }
|
||||
func (q smallestFirst) Swap(a, b int) { q[a], q[b] = q[b], q[a] }
|
||||
|
||||
type oldestFirst []jobQueueEntry
|
||||
|
||||
func (q oldestFirst) Len() int { return len(q) }
|
||||
func (q oldestFirst) Less(a, b int) bool { return q[a].modified < q[b].modified }
|
||||
func (q oldestFirst) Swap(a, b int) { q[a], q[b] = q[b], q[a] }
|
||||
|
||||
@@ -1240,7 +1240,7 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
fc.ClusterConfigCalls(func(cc *protocol.ClusterConfig) {
|
||||
fc.ClusterConfigCalls(func(cc *protocol.ClusterConfig, _ map[string]string) {
|
||||
select {
|
||||
case ccChan <- cc:
|
||||
case <-done:
|
||||
|
||||
@@ -15,32 +15,6 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ioprioClassShift = 13
|
||||
|
||||
type ioprioClass int
|
||||
|
||||
const (
|
||||
ioprioClassRT ioprioClass = iota + 1
|
||||
ioprioClassBE
|
||||
ioprioClassIdle
|
||||
)
|
||||
|
||||
const (
|
||||
ioprioWhoProcess = iota + 1
|
||||
ioprioWhoPGRP
|
||||
ioprioWhoUser
|
||||
)
|
||||
|
||||
func ioprioSet(class ioprioClass, value int) error {
|
||||
res, _, err := syscall.Syscall(syscall.SYS_IOPRIO_SET,
|
||||
uintptr(ioprioWhoProcess), 0,
|
||||
uintptr(class)<<ioprioClassShift|uintptr(value))
|
||||
if res == 0 {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetLowPriority lowers the process CPU scheduling priority, and possibly
|
||||
// I/O priority depending on the platform and OS.
|
||||
func SetLowPriority() error {
|
||||
@@ -89,14 +63,13 @@ func SetLowPriority() error {
|
||||
}
|
||||
}
|
||||
|
||||
// For any new process, the default is to be assigned the IOPRIO_CLASS_BE
|
||||
// scheduling class. This class directly maps the BE prio level to the
|
||||
// niceness of a process, determined as: io_nice = (cpu_nice + 20) / 5.
|
||||
// For example, a niceness of 11 results in an I/O priority of B6.
|
||||
// https://www.kernel.org/doc/Documentation/block/ioprio.txt
|
||||
if err := syscall.Setpriority(syscall.PRIO_PGRP, pidSelf, wantNiceLevel); err != nil {
|
||||
return fmt.Errorf("set niceness: %w", err)
|
||||
}
|
||||
|
||||
// Best effort, somewhere to the end of the scale (0 through 7 being the
|
||||
// range).
|
||||
if err := ioprioSet(ioprioClassBE, 5); err != nil {
|
||||
return fmt.Errorf("set I/O priority: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ package osutil
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetInterfaceAddrs returns the IP networks of all interfaces that are up.
|
||||
@@ -46,6 +47,17 @@ func GetInterfaceAddrs(includePtP bool) ([]*net.IPNet, error) {
|
||||
return nets, nil
|
||||
}
|
||||
|
||||
func IPFromString(addr string) net.IP {
|
||||
// strip the port
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
host = addr
|
||||
}
|
||||
// strip IPv6 zone identifier
|
||||
host, _, _ = strings.Cut(host, "%")
|
||||
return net.ParseIP(host)
|
||||
}
|
||||
|
||||
func IPFromAddr(addr net.Addr) (net.IP, error) {
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
|
||||
@@ -135,3 +135,35 @@ func TestRenameOrCopy(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPFromString(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"192.168.178.1", "192.168.178.1"},
|
||||
{"192.168.178.1:8384", "192.168.178.1"},
|
||||
{"fe80::20c:29ff:fe9a:46d2", "fe80::20c:29ff:fe9a:46d2"},
|
||||
{"[fe80::20c:29ff:fe9a:46d2]:8384", "fe80::20c:29ff:fe9a:46d2"},
|
||||
{"[fe80::20c:29ff:fe9a:46d2%eno1]:8384", "fe80::20c:29ff:fe9a:46d2"},
|
||||
{"google.com", ""},
|
||||
{"1.1.1.1.1", ""},
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ip := osutil.IPFromString(c.in)
|
||||
var address string
|
||||
if ip != nil {
|
||||
address = ip.String()
|
||||
} else {
|
||||
address = ""
|
||||
}
|
||||
|
||||
if c.out != address {
|
||||
t.Fatalf("result should be %s != %s", c.out, address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,14 +64,14 @@ func benchmarkRequestsTLS(b *testing.B, conn0, conn1 net.Conn) {
|
||||
|
||||
func benchmarkRequestsConnPair(b *testing.B, conn0, conn1 net.Conn) {
|
||||
// Start up Connections on them
|
||||
c0 := NewConnection(LocalDeviceID, conn0, conn0, testutil.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil, testKeyGen)
|
||||
c0 := NewConnection(LocalDeviceID, conn0, conn0, testutil.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, testKeyGen)
|
||||
c0.Start()
|
||||
c1 := NewConnection(LocalDeviceID, conn1, conn1, testutil.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, nil, testKeyGen)
|
||||
c1 := NewConnection(LocalDeviceID, conn1, conn1, testutil.NoopCloser{}, new(fakeModel), new(mockedConnectionInfo), CompressionMetadata, testKeyGen)
|
||||
c1.Start()
|
||||
|
||||
// Satisfy the assertions in the protocol by sending an initial cluster config
|
||||
c0.ClusterConfig(&ClusterConfig{})
|
||||
c1.ClusterConfig(&ClusterConfig{})
|
||||
c0.ClusterConfig(&ClusterConfig{}, nil)
|
||||
c1.ClusterConfig(&ClusterConfig{}, nil)
|
||||
|
||||
// Report some useful stats and reset the timer for the actual test
|
||||
b.ReportAllocs()
|
||||
|
||||
@@ -849,9 +849,13 @@ func unixOwnershipEqual(a, b *UnixData) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
ownerEqual := a.OwnerName == "" || b.OwnerName == "" || a.OwnerName == b.OwnerName
|
||||
groupEqual := a.GroupName == "" || b.GroupName == "" || a.GroupName == b.GroupName
|
||||
return a.UID == b.UID && a.GID == b.GID && ownerEqual && groupEqual
|
||||
if a.UID == b.UID && a.GID == b.GID {
|
||||
return true
|
||||
}
|
||||
if a.OwnerName == b.OwnerName && a.GroupName == b.GroupName {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func windowsOwnershipEqual(a, b *WindowsData) bool {
|
||||
|
||||
@@ -196,6 +196,42 @@ func TestIsEquivalent(t *testing.T) {
|
||||
b: FileInfo{Type: FileInfoTypeFile, SymlinkTarget: []byte("b")},
|
||||
eq: true,
|
||||
},
|
||||
// Unix Ownership should be the same
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
eq: true,
|
||||
},
|
||||
// ... but matching ID is enough
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "B", GroupName: "B", UID: 1000, GID: 1000}}},
|
||||
eq: true,
|
||||
},
|
||||
// ... or matching name
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1001, GID: 1001}}},
|
||||
eq: true,
|
||||
},
|
||||
// ... or empty name
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "", GroupName: "", UID: 1000, GID: 1000}}},
|
||||
eq: true,
|
||||
},
|
||||
// ... but not different ownership
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "B", GroupName: "B", UID: 1001, GID: 1001}}},
|
||||
eq: false,
|
||||
},
|
||||
// or missing ownership
|
||||
{
|
||||
a: FileInfo{Platform: PlatformData{Unix: &UnixData{OwnerName: "A", GroupName: "A", UID: 1000, GID: 1000}}},
|
||||
b: FileInfo{Platform: PlatformData{}},
|
||||
eq: false,
|
||||
},
|
||||
}
|
||||
|
||||
if build.IsWindows {
|
||||
|
||||
@@ -50,10 +50,10 @@ type encryptedModel struct {
|
||||
keyGen *KeyGenerator
|
||||
}
|
||||
|
||||
func newEncryptedModel(model rawModel, folderKeys *folderKeyRegistry, keyGen *KeyGenerator) encryptedModel {
|
||||
func newEncryptedModel(model rawModel, keyGen *KeyGenerator) encryptedModel {
|
||||
return encryptedModel{
|
||||
model: model,
|
||||
folderKeys: folderKeys,
|
||||
folderKeys: newFolderKeyRegistry(),
|
||||
keyGen: keyGen,
|
||||
}
|
||||
}
|
||||
@@ -187,10 +187,6 @@ func (e encryptedConnection) Start() {
|
||||
e.conn.Start()
|
||||
}
|
||||
|
||||
func (e encryptedConnection) SetFolderPasswords(passwords map[string]string) {
|
||||
e.folderKeys.setPasswords(passwords)
|
||||
}
|
||||
|
||||
func (e encryptedConnection) DeviceID() DeviceID {
|
||||
return e.conn.DeviceID()
|
||||
}
|
||||
@@ -262,8 +258,9 @@ func (e encryptedConnection) DownloadProgress(ctx context.Context, dp *DownloadP
|
||||
// No need to send these
|
||||
}
|
||||
|
||||
func (e encryptedConnection) ClusterConfig(config *ClusterConfig) {
|
||||
e.conn.ClusterConfig(config)
|
||||
func (e encryptedConnection) ClusterConfig(config *ClusterConfig, passwords map[string]string) {
|
||||
e.folderKeys.setPasswords(e.keyGen, passwords)
|
||||
e.conn.ClusterConfig(config, passwords)
|
||||
}
|
||||
|
||||
func (e encryptedConnection) Close(err error) {
|
||||
@@ -680,15 +677,13 @@ func IsEncryptedParent(pathComponents []string) bool {
|
||||
}
|
||||
|
||||
type folderKeyRegistry struct {
|
||||
keyGen *KeyGenerator
|
||||
keys map[string]*[keySize]byte // folder ID -> key
|
||||
mut sync.RWMutex
|
||||
keys map[string]*[keySize]byte // folder ID -> key
|
||||
mut sync.RWMutex
|
||||
}
|
||||
|
||||
func newFolderKeyRegistry(keyGen *KeyGenerator, passwords map[string]string) *folderKeyRegistry {
|
||||
func newFolderKeyRegistry() *folderKeyRegistry {
|
||||
return &folderKeyRegistry{
|
||||
keyGen: keyGen,
|
||||
keys: keysFromPasswords(keyGen, passwords),
|
||||
keys: make(map[string]*[keySize]byte),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -699,8 +694,8 @@ func (r *folderKeyRegistry) get(folder string) (*[keySize]byte, bool) {
|
||||
return key, ok
|
||||
}
|
||||
|
||||
func (r *folderKeyRegistry) setPasswords(passwords map[string]string) {
|
||||
func (r *folderKeyRegistry) setPasswords(keyGen *KeyGenerator, passwords map[string]string) {
|
||||
r.mut.Lock()
|
||||
r.keys = keysFromPasswords(r.keyGen, passwords)
|
||||
r.keys = keysFromPasswords(keyGen, passwords)
|
||||
r.mut.Unlock()
|
||||
}
|
||||
|
||||
@@ -26,10 +26,11 @@ type Connection struct {
|
||||
closedReturnsOnCall map[int]struct {
|
||||
result1 <-chan struct{}
|
||||
}
|
||||
ClusterConfigStub func(*protocol.ClusterConfig)
|
||||
ClusterConfigStub func(*protocol.ClusterConfig, map[string]string)
|
||||
clusterConfigMutex sync.RWMutex
|
||||
clusterConfigArgsForCall []struct {
|
||||
arg1 *protocol.ClusterConfig
|
||||
arg2 map[string]string
|
||||
}
|
||||
ConnectionIDStub func() string
|
||||
connectionIDMutex sync.RWMutex
|
||||
@@ -145,11 +146,6 @@ type Connection struct {
|
||||
result1 []byte
|
||||
result2 error
|
||||
}
|
||||
SetFolderPasswordsStub func(map[string]string)
|
||||
setFolderPasswordsMutex sync.RWMutex
|
||||
setFolderPasswordsArgsForCall []struct {
|
||||
arg1 map[string]string
|
||||
}
|
||||
StartStub func()
|
||||
startMutex sync.RWMutex
|
||||
startArgsForCall []struct {
|
||||
@@ -283,16 +279,17 @@ func (fake *Connection) ClosedReturnsOnCall(i int, result1 <-chan struct{}) {
|
||||
}{result1}
|
||||
}
|
||||
|
||||
func (fake *Connection) ClusterConfig(arg1 *protocol.ClusterConfig) {
|
||||
func (fake *Connection) ClusterConfig(arg1 *protocol.ClusterConfig, arg2 map[string]string) {
|
||||
fake.clusterConfigMutex.Lock()
|
||||
fake.clusterConfigArgsForCall = append(fake.clusterConfigArgsForCall, struct {
|
||||
arg1 *protocol.ClusterConfig
|
||||
}{arg1})
|
||||
arg2 map[string]string
|
||||
}{arg1, arg2})
|
||||
stub := fake.ClusterConfigStub
|
||||
fake.recordInvocation("ClusterConfig", []interface{}{arg1})
|
||||
fake.recordInvocation("ClusterConfig", []interface{}{arg1, arg2})
|
||||
fake.clusterConfigMutex.Unlock()
|
||||
if stub != nil {
|
||||
fake.ClusterConfigStub(arg1)
|
||||
fake.ClusterConfigStub(arg1, arg2)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,17 +299,17 @@ func (fake *Connection) ClusterConfigCallCount() int {
|
||||
return len(fake.clusterConfigArgsForCall)
|
||||
}
|
||||
|
||||
func (fake *Connection) ClusterConfigCalls(stub func(*protocol.ClusterConfig)) {
|
||||
func (fake *Connection) ClusterConfigCalls(stub func(*protocol.ClusterConfig, map[string]string)) {
|
||||
fake.clusterConfigMutex.Lock()
|
||||
defer fake.clusterConfigMutex.Unlock()
|
||||
fake.ClusterConfigStub = stub
|
||||
}
|
||||
|
||||
func (fake *Connection) ClusterConfigArgsForCall(i int) *protocol.ClusterConfig {
|
||||
func (fake *Connection) ClusterConfigArgsForCall(i int) (*protocol.ClusterConfig, map[string]string) {
|
||||
fake.clusterConfigMutex.RLock()
|
||||
defer fake.clusterConfigMutex.RUnlock()
|
||||
argsForCall := fake.clusterConfigArgsForCall[i]
|
||||
return argsForCall.arg1
|
||||
return argsForCall.arg1, argsForCall.arg2
|
||||
}
|
||||
|
||||
func (fake *Connection) ConnectionID() string {
|
||||
@@ -908,38 +905,6 @@ func (fake *Connection) RequestReturnsOnCall(i int, result1 []byte, result2 erro
|
||||
}{result1, result2}
|
||||
}
|
||||
|
||||
func (fake *Connection) SetFolderPasswords(arg1 map[string]string) {
|
||||
fake.setFolderPasswordsMutex.Lock()
|
||||
fake.setFolderPasswordsArgsForCall = append(fake.setFolderPasswordsArgsForCall, struct {
|
||||
arg1 map[string]string
|
||||
}{arg1})
|
||||
stub := fake.SetFolderPasswordsStub
|
||||
fake.recordInvocation("SetFolderPasswords", []interface{}{arg1})
|
||||
fake.setFolderPasswordsMutex.Unlock()
|
||||
if stub != nil {
|
||||
fake.SetFolderPasswordsStub(arg1)
|
||||
}
|
||||
}
|
||||
|
||||
func (fake *Connection) SetFolderPasswordsCallCount() int {
|
||||
fake.setFolderPasswordsMutex.RLock()
|
||||
defer fake.setFolderPasswordsMutex.RUnlock()
|
||||
return len(fake.setFolderPasswordsArgsForCall)
|
||||
}
|
||||
|
||||
func (fake *Connection) SetFolderPasswordsCalls(stub func(map[string]string)) {
|
||||
fake.setFolderPasswordsMutex.Lock()
|
||||
defer fake.setFolderPasswordsMutex.Unlock()
|
||||
fake.SetFolderPasswordsStub = stub
|
||||
}
|
||||
|
||||
func (fake *Connection) SetFolderPasswordsArgsForCall(i int) map[string]string {
|
||||
fake.setFolderPasswordsMutex.RLock()
|
||||
defer fake.setFolderPasswordsMutex.RUnlock()
|
||||
argsForCall := fake.setFolderPasswordsArgsForCall[i]
|
||||
return argsForCall.arg1
|
||||
}
|
||||
|
||||
func (fake *Connection) Start() {
|
||||
fake.startMutex.Lock()
|
||||
fake.startArgsForCall = append(fake.startArgsForCall, struct {
|
||||
@@ -1207,8 +1172,6 @@ func (fake *Connection) Invocations() map[string][][]interface{} {
|
||||
defer fake.remoteAddrMutex.RUnlock()
|
||||
fake.requestMutex.RLock()
|
||||
defer fake.requestMutex.RUnlock()
|
||||
fake.setFolderPasswordsMutex.RLock()
|
||||
defer fake.setFolderPasswordsMutex.RUnlock()
|
||||
fake.startMutex.RLock()
|
||||
defer fake.startMutex.RUnlock()
|
||||
fake.statisticsMutex.RLock()
|
||||
|
||||
@@ -129,7 +129,9 @@ type Connection interface {
|
||||
// Send a Cluster Configuration message to the peer device. The message
|
||||
// in the parameter may be altered by the connection and should not be
|
||||
// used further by the caller.
|
||||
ClusterConfig(config *ClusterConfig)
|
||||
// For any folder that must be encrypted for the connected device, the
|
||||
// password must be provided.
|
||||
ClusterConfig(config *ClusterConfig, passwords map[string]string)
|
||||
|
||||
// Send a Download Progress message to the peer device. The message in
|
||||
// the parameter may be altered by the connection and should not be used
|
||||
@@ -137,7 +139,6 @@ type Connection interface {
|
||||
DownloadProgress(ctx context.Context, dp *DownloadProgress)
|
||||
|
||||
Start()
|
||||
SetFolderPasswords(passwords map[string]string)
|
||||
Close(err error)
|
||||
DeviceID() DeviceID
|
||||
Statistics() Statistics
|
||||
@@ -215,7 +216,7 @@ const (
|
||||
// Should not be modified in production code, just for testing.
|
||||
var CloseTimeout = 10 * time.Second
|
||||
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, closer io.Closer, model Model, connInfo ConnectionInfo, compress Compression, passwords map[string]string, keyGen *KeyGenerator) Connection {
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, closer io.Closer, model Model, connInfo ConnectionInfo, compress Compression, keyGen *KeyGenerator) Connection {
|
||||
// We create the wrapper for the model first, as it needs to be passed
|
||||
// in at the lowest level in the stack. At the end of construction,
|
||||
// before returning, we add the connection to cwm so that it can be used
|
||||
@@ -225,7 +226,7 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, closer
|
||||
// Encryption / decryption is first (outermost) before conversion to
|
||||
// native path formats.
|
||||
nm := makeNative(cwm)
|
||||
em := newEncryptedModel(nm, newFolderKeyRegistry(keyGen, passwords), keyGen)
|
||||
em := newEncryptedModel(nm, keyGen)
|
||||
|
||||
// We do the wire format conversion first (outermost) so that the
|
||||
// metadata is in wire format when it reaches the encryption step.
|
||||
@@ -265,10 +266,22 @@ func newRawConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, clo
|
||||
}
|
||||
|
||||
// Start creates the goroutines for sending and receiving of messages. It must
|
||||
// be called exactly once after creating a connection.
|
||||
// be called once after creating a connection. It should only be called once,
|
||||
// subsequent calls will have no effect.
|
||||
func (c *rawConnection) Start() {
|
||||
c.startStopMut.Lock()
|
||||
defer c.startStopMut.Unlock()
|
||||
|
||||
select {
|
||||
case <-c.started:
|
||||
return
|
||||
case <-c.closed:
|
||||
// we have already closed the connection before starting processing
|
||||
// on it.
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
c.loopWG.Add(5)
|
||||
go func() {
|
||||
c.readerLoop()
|
||||
@@ -291,6 +304,7 @@ func (c *rawConnection) Start() {
|
||||
c.pingReceiver()
|
||||
c.loopWG.Done()
|
||||
}()
|
||||
|
||||
c.startTime = time.Now().Truncate(time.Second)
|
||||
close(c.started)
|
||||
}
|
||||
@@ -369,7 +383,7 @@ func (c *rawConnection) Request(ctx context.Context, req *Request) ([]byte, erro
|
||||
}
|
||||
|
||||
// ClusterConfig sends the cluster configuration message to the peer.
|
||||
func (c *rawConnection) ClusterConfig(config *ClusterConfig) {
|
||||
func (c *rawConnection) ClusterConfig(config *ClusterConfig, _ map[string]string) {
|
||||
select {
|
||||
case c.clusterConfigBox <- config:
|
||||
case <-c.closed:
|
||||
@@ -950,9 +964,9 @@ func (c *rawConnection) Close(err error) {
|
||||
|
||||
// internalClose is called if there is an unexpected error during normal operation.
|
||||
func (c *rawConnection) internalClose(err error) {
|
||||
c.startStopMut.Lock()
|
||||
defer c.startStopMut.Unlock()
|
||||
c.closeOnce.Do(func() {
|
||||
c.startStopMut.Lock()
|
||||
|
||||
l.Debugf("close connection to %s at %s due to %v", c.deviceID.Short(), c.ConnectionInfo, err)
|
||||
if cerr := c.closer.Close(); cerr != nil {
|
||||
l.Debugf("failed to close underlying conn %s at %s %v:", c.deviceID.Short(), c.ConnectionInfo, cerr)
|
||||
@@ -974,6 +988,10 @@ func (c *rawConnection) internalClose(err error) {
|
||||
<-c.dispatcherLoopStopped
|
||||
}
|
||||
|
||||
c.startStopMut.Unlock()
|
||||
|
||||
// We don't want to call into the model while holding the
|
||||
// startStopMut.
|
||||
c.model.Closed(err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,14 +35,14 @@ func TestPing(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c0.Start()
|
||||
defer closeAndWait(c0, ar, bw)
|
||||
c1 := getRawConnection(NewConnection(c1ID, br, aw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c1 := getRawConnection(NewConnection(c1ID, br, aw, testutil.NoopCloser{}, newTestModel(), new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c1.Start()
|
||||
defer closeAndWait(c1, ar, bw)
|
||||
c0.ClusterConfig(&ClusterConfig{})
|
||||
c1.ClusterConfig(&ClusterConfig{})
|
||||
c0.ClusterConfig(&ClusterConfig{}, nil)
|
||||
c1.ClusterConfig(&ClusterConfig{}, nil)
|
||||
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
@@ -61,14 +61,14 @@ func TestClose(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c0.Start()
|
||||
defer closeAndWait(c0, ar, bw)
|
||||
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen)
|
||||
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionAlways, testKeyGen)
|
||||
c1.Start()
|
||||
defer closeAndWait(c1, ar, bw)
|
||||
c0.ClusterConfig(&ClusterConfig{})
|
||||
c1.ClusterConfig(&ClusterConfig{})
|
||||
c0.ClusterConfig(&ClusterConfig{}, nil)
|
||||
c1.ClusterConfig(&ClusterConfig{}, nil)
|
||||
|
||||
c0.internalClose(errManual)
|
||||
|
||||
@@ -106,7 +106,7 @@ func TestCloseOnBlockingSend(t *testing.T) {
|
||||
m := newTestModel()
|
||||
|
||||
rw := testutil.NewBlockingRW()
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c.Start()
|
||||
defer closeAndWait(c, rw)
|
||||
|
||||
@@ -114,7 +114,7 @@ func TestCloseOnBlockingSend(t *testing.T) {
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
c.ClusterConfig(&ClusterConfig{})
|
||||
c.ClusterConfig(&ClusterConfig{}, nil)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
@@ -157,14 +157,14 @@ func TestCloseRace(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen))
|
||||
c0 := getRawConnection(NewConnection(c0ID, ar, bw, testutil.NoopCloser{}, m0, new(mockedConnectionInfo), CompressionNever, testKeyGen))
|
||||
c0.Start()
|
||||
defer closeAndWait(c0, ar, bw)
|
||||
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, nil, testKeyGen)
|
||||
c1 := NewConnection(c1ID, br, aw, testutil.NoopCloser{}, m1, new(mockedConnectionInfo), CompressionNever, testKeyGen)
|
||||
c1.Start()
|
||||
defer closeAndWait(c1, ar, bw)
|
||||
c0.ClusterConfig(&ClusterConfig{})
|
||||
c1.ClusterConfig(&ClusterConfig{})
|
||||
c0.ClusterConfig(&ClusterConfig{}, nil)
|
||||
c1.ClusterConfig(&ClusterConfig{}, nil)
|
||||
|
||||
c1.Index(context.Background(), &Index{Folder: "default"})
|
||||
select {
|
||||
@@ -197,7 +197,7 @@ func TestClusterConfigFirst(t *testing.T) {
|
||||
m := newTestModel()
|
||||
|
||||
rw := testutil.NewBlockingRW()
|
||||
c := getRawConnection(NewConnection(c0ID, rw, &testutil.NoopRW{}, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c := getRawConnection(NewConnection(c0ID, rw, &testutil.NoopRW{}, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c.Start()
|
||||
defer closeAndWait(c, rw)
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestClusterConfigFirst(t *testing.T) {
|
||||
// Allow some time for c.writerLoop to set up after c.Start
|
||||
}
|
||||
|
||||
c.ClusterConfig(&ClusterConfig{})
|
||||
c.ClusterConfig(&ClusterConfig{}, nil)
|
||||
|
||||
done := make(chan struct{})
|
||||
if ok := c.send(context.Background(), &bep.Ping{}, done); !ok {
|
||||
@@ -249,7 +249,7 @@ func TestCloseTimeout(t *testing.T) {
|
||||
m := newTestModel()
|
||||
|
||||
rw := testutil.NewBlockingRW()
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c.Start()
|
||||
defer closeAndWait(c, rw)
|
||||
|
||||
@@ -531,7 +531,7 @@ func TestClusterConfigAfterClose(t *testing.T) {
|
||||
m := newTestModel()
|
||||
|
||||
rw := testutil.NewBlockingRW()
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c := getRawConnection(NewConnection(c0ID, rw, rw, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
c.Start()
|
||||
defer closeAndWait(c, rw)
|
||||
|
||||
@@ -539,7 +539,7 @@ func TestClusterConfigAfterClose(t *testing.T) {
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
c.ClusterConfig(&ClusterConfig{})
|
||||
c.ClusterConfig(&ClusterConfig{}, nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
@@ -555,7 +555,7 @@ func TestDispatcherToCloseDeadlock(t *testing.T) {
|
||||
// the model callbacks (ClusterConfig).
|
||||
m := newTestModel()
|
||||
rw := testutil.NewBlockingRW()
|
||||
c := getRawConnection(NewConnection(c0ID, rw, &testutil.NoopRW{}, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
|
||||
c := getRawConnection(NewConnection(c0ID, rw, &testutil.NoopRW{}, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, testKeyGen))
|
||||
m.ccFn = func(*ClusterConfig) {
|
||||
c.Close(errManual)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -166,7 +166,7 @@ func relayAddressesOrder(ctx context.Context, input []string) []string {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
sort.Ints(ids)
|
||||
slices.Sort(ids)
|
||||
|
||||
addresses := make([]string, 0, len(input))
|
||||
for _, id := range ids {
|
||||
|
||||
@@ -16,7 +16,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
rdebug "runtime/debug"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -145,7 +146,7 @@ func TestWalk(t *testing.T) {
|
||||
}
|
||||
tmp = append(tmp, f.File)
|
||||
}
|
||||
sort.Sort(fileList(tmp))
|
||||
slices.SortFunc(fileList(tmp), compareByName)
|
||||
files := fileList(tmp).testfiles()
|
||||
|
||||
if diff, equal := messagediff.PrettyDiff(testdata, files); !equal {
|
||||
@@ -584,23 +585,15 @@ func walkDir(fs fs.Filesystem, dir string, cfiler CurrentFiler, matcher *ignore.
|
||||
tmp = append(tmp, f.File)
|
||||
}
|
||||
}
|
||||
sort.Sort(fileList(tmp))
|
||||
slices.SortFunc(fileList(tmp), compareByName)
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
type fileList []protocol.FileInfo
|
||||
|
||||
func (l fileList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l fileList) Less(a, b int) bool {
|
||||
return l[a].Name < l[b].Name
|
||||
}
|
||||
|
||||
func (l fileList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
func compareByName(a, b protocol.FileInfo) int {
|
||||
return strings.Compare(a.Name, b.Name)
|
||||
}
|
||||
|
||||
func (l fileList) testfiles() testfileList {
|
||||
@@ -825,7 +818,7 @@ func TestIssue4841(t *testing.T) {
|
||||
}
|
||||
files = append(files, f.File)
|
||||
}
|
||||
sort.Sort(fileList(files))
|
||||
slices.SortFunc(fileList(files), compareByName)
|
||||
|
||||
if len(files) != 1 {
|
||||
t.Fatalf("Expected 1 file, got %d: %v", len(files), files)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -252,13 +252,7 @@ func (a *App) startup() error {
|
||||
// The TLS configuration is used for both the listening socket and outgoing
|
||||
// connections.
|
||||
|
||||
var tlsCfg *tls.Config
|
||||
if a.cfg.Options().InsecureAllowOldTLSVersions {
|
||||
l.Infoln("TLS 1.2 is allowed on sync connections. This is less than optimally secure.")
|
||||
tlsCfg = tlsutil.SecureDefaultWithTLS12()
|
||||
} else {
|
||||
tlsCfg = tlsutil.SecureDefaultTLS13()
|
||||
}
|
||||
tlsCfg := tlsutil.SecureDefaultTLS13()
|
||||
tlsCfg.Certificates = []tls.Certificate{a.cert}
|
||||
tlsCfg.NextProtos = []string{bepProtocolName}
|
||||
tlsCfg.ClientAuth = tls.RequestClientCert
|
||||
@@ -443,8 +437,8 @@ func printServiceTree(w io.Writer, sup supervisor, level int) {
|
||||
printService(w, sup, level)
|
||||
|
||||
svcs := sup.Services()
|
||||
sort.Slice(svcs, func(a, b int) bool {
|
||||
return fmt.Sprint(svcs[a]) < fmt.Sprint(svcs[b])
|
||||
slices.SortFunc(svcs, func(a, b suture.Service) int {
|
||||
return strings.Compare(fmt.Sprint(a), fmt.Sprint(b))
|
||||
})
|
||||
|
||||
for _, svc := range svcs {
|
||||
|
||||
@@ -190,6 +190,11 @@ type Report struct {
|
||||
Country string `json:"country" metric:"location,gaugeVec:country"`
|
||||
CountryCode string `json:"countryCode" metric:"location,gaugeVec:countryCode"`
|
||||
MajorVersion string `json:"majorVersion" metric:"reports_by_major_total,gaugeVec:version"`
|
||||
|
||||
// Once more to create a metric on OS, arch, distribution
|
||||
DistDist string `json:"distDist" metric:"distribution,gaugeVec:distribution"`
|
||||
DistOS string `json:"distOS" metric:"distribution,gaugeVec:os"`
|
||||
DistArch string `json:"distArch" metric:"distribution,gaugeVec:arch"`
|
||||
}
|
||||
|
||||
func New() *Report {
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -162,7 +162,7 @@ func (s *Service) reportData(ctx context.Context, urVersion int, preview bool) (
|
||||
l.Warnf("Unhandled versioning type for usage reports: %s", cfg.Versioning.Type)
|
||||
}
|
||||
}
|
||||
sort.Ints(report.RescanIntvs)
|
||||
slices.Sort(report.RescanIntvs)
|
||||
|
||||
for _, cfg := range s.cfg.Devices() {
|
||||
if cfg.Introducer {
|
||||
@@ -295,7 +295,7 @@ func (s *Service) reportData(ctx context.Context, urVersion int, preview bool) (
|
||||
report.FolderUsesV3.SyncOwnership++
|
||||
}
|
||||
}
|
||||
sort.Ints(report.FolderUsesV3.FsWatcherDelays)
|
||||
slices.Sort(report.FolderUsesV3.FsWatcherDelays)
|
||||
|
||||
for _, cfg := range s.cfg.Devices() {
|
||||
if cfg.Untrusted {
|
||||
|
||||
@@ -8,7 +8,8 @@ package versioner
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
@@ -37,7 +38,9 @@ func (t emptyDirTracker) emptyDirs() []string {
|
||||
for dir := range t {
|
||||
empty = append(empty, dir)
|
||||
}
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(empty)))
|
||||
slices.SortFunc(empty, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
return empty
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user