chore: switch database engine to sqlite (fixes #9954) (#9965)

Switch the database from LevelDB to SQLite, for greater stability and
simpler code.

Co-authored-by: Tommy van der Vorst <tommy@pixelspark.nl>
Co-authored-by: bt90 <btom1990@googlemail.com>
This commit is contained in:
Jakob Borg
2025-03-29 12:50:08 +00:00
committed by GitHub
parent b1c8f88a44
commit 025905fcdf
146 changed files with 8315 additions and 11984 deletions

View File

@@ -13,8 +13,6 @@ env:
GO_VERSION: "~1.24.0"
# Optimize compatibility on the slow archictures.
GO386: softfloat
GOARM: "5"
GOMIPS: softfloat
# Avoid hilarious amounts of obscuring log output when running tests.
@@ -24,6 +22,8 @@ env:
BUILD_USER: builder
BUILD_HOST: github.syncthing.net
TAGS: "netgo osusergo sqlite_omit_load_extension"
# A note on actions and third party code... The actions under actions/ (like
# `uses: actions/checkout`) are maintained by GitHub, and we need to trust
# GitHub to maintain their code and infrastructure or we're in deep shit in
@@ -85,6 +85,7 @@ jobs:
LOKI_USER: ${{ secrets.LOKI_USER }}
LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }}
LOKI_LABELS: "go=${{ matrix.go }},runner=${{ matrix.runner }},repo=${{ github.repository }},ref=${{ github.ref }}"
CGO_ENABLED: "1"
#
# Meta checks for formatting, copyright, etc
@@ -136,17 +137,8 @@ jobs:
package-windows:
name: Package for Windows
runs-on: windows-latest
runs-on: ubuntu-latest
steps:
- name: Set git to use LF
# Without this, the checkout will happen with CRLF line endings,
# which is fine for the source code but messes up tests that depend
# on data on disk being as expected. Ideally, those tests should be
# fixed, but not today.
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -158,17 +150,14 @@ jobs:
cache: false
check-latest: true
- name: Get actual Go version
run: |
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: mlugg/setup-zig@v1
- uses: actions/cache@v4
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-windows-${{ hashFiles('**/go.sum') }}
- name: Install dependencies
run: |
@@ -176,15 +165,14 @@ jobs:
- name: Create packages
run: |
$targets = 'syncthing', 'stdiscosrv', 'strelaysrv'
$archs = 'amd64', 'arm', 'arm64', '386'
foreach ($arch in $archs) {
foreach ($tgt in $targets) {
go run build.go -goarch $arch zip $tgt
}
}
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -tags "${{env.TAGS}}" -goos windows -goarch amd64 -cc "zig cc -target x86_64-windows" zip $tgt
go run build.go -tags "${{env.TAGS}}" -goos windows -goarch 386 -cc "zig cc -target x86-windows" zip $tgt
go run build.go -tags "${{env.TAGS}}" -goos windows -goarch arm64 -cc "zig cc -target aarch64-windows" zip $tgt
# go run build.go -tags "${{env.TAGS}}" -goos windows -goarch arm -cc "zig cc -target thumb-windows" zip $tgt # failes with linker errors
done
env:
CGO_ENABLED: "0"
CGO_ENABLED: "1"
- name: Archive artifacts
uses: actions/upload-artifact@v4
@@ -194,7 +182,7 @@ jobs:
codesign-windows:
name: Codesign for Windows
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
environment: release
runs-on: windows-latest
needs:
@@ -269,6 +257,8 @@ jobs:
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: mlugg/setup-zig@v1
- uses: actions/cache@v4
with:
path: |
@@ -278,14 +268,25 @@ jobs:
- name: Create packages
run: |
archs=$(go tool dist list | grep linux | sed 's#linux/##')
for goarch in $archs ; do
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -goarch "$goarch" tar "$tgt"
done
sudo apt-get install -y gcc-mips64-linux-gnuabi64 gcc-mips64el-linux-gnuabi64
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch amd64 -cc "zig cc -target x86_64-linux-musl" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch 386 -cc "zig cc -target x86-linux-musl" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch arm -cc "zig cc -target arm-linux-musleabi" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch arm64 -cc "zig cc -target aarch64-linux-musl" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch mips -cc "zig cc -target mips-linux-musleabi" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch mipsle -cc "zig cc -target mipsel-linux-musleabi" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch mips64 -cc mips64-linux-gnuabi64-gcc tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch mips64le -cc mips64el-linux-gnuabi64-gcc tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch riscv64 -cc "zig cc -target riscv64-linux-musl" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch s390x -cc "zig cc -target s390x-linux-musl" tar "$tgt"
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch loong64 -cc "zig cc -target loongarch64-linux-musl" tar "$tgt"
# go run build.go -tags "${{env.TAGS}}" -goos linux -goarch ppc64 -cc "zig cc -target powerpc64-linux-musl" tar "$tgt" # fails with linkmode not supported
go run build.go -tags "${{env.TAGS}}" -goos linux -goarch ppc64le -cc "zig cc -target powerpc64le-linux-musl" tar "$tgt"
done
env:
CGO_ENABLED: "0"
CGO_ENABLED: "1"
EXTRA_LDFLAGS: "-linkmode=external -extldflags=-static"
- name: Archive artifacts
uses: actions/upload-artifact@v4
@@ -303,6 +304,8 @@ jobs:
name: Package for macOS
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
environment: release
env:
CODESIGN_IDENTITY: ${{ secrets.CODESIGN_IDENTITY }}
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
@@ -329,6 +332,7 @@ jobs:
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
- name: Import signing certificate
if: env.CODESIGN_IDENTITY != ''
run: |
# Set up a run-specific keychain, making it available for the
# `codesign` tool.
@@ -356,7 +360,7 @@ jobs:
- name: Create package (amd64)
run: |
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -goarch amd64 zip "$tgt"
go run build.go -tags "${{env.TAGS}}" -goarch amd64 zip "$tgt"
done
env:
CGO_ENABLED: "1"
@@ -372,7 +376,7 @@ jobs:
EOT
chmod 755 xgo.sh
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -gocmd ./xgo.sh -goarch arm64 zip "$tgt"
go run build.go -tags "${{env.TAGS}}" -gocmd ./xgo.sh -goarch arm64 zip "$tgt"
done
env:
CGO_ENABLED: "1"
@@ -401,7 +405,7 @@ jobs:
notarize-macos:
name: Notarize for macOS
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
environment: release
needs:
- package-macos
@@ -483,7 +487,7 @@ jobs:
goarch="${plat#*/}"
echo "::group ::$plat"
for tgt in syncthing stdiscosrv strelaysrv ; do
if ! go run build.go -goos "$goos" -goarch "$goarch" tar "$tgt" 2>/dev/null; then
if ! go run build.go -goos "$goos" -goarch "$goarch" tar "$tgt" ; then
echo "::warning ::Failed to build $tgt for $plat"
fi
done
@@ -545,7 +549,7 @@ jobs:
sign-for-upgrade:
name: Sign for upgrade
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
environment: release
needs:
- codesign-windows
@@ -663,6 +667,8 @@ jobs:
run: |
gem install fpm
- uses: mlugg/setup-zig@v1
- uses: actions/cache@v4
with:
path: |
@@ -670,15 +676,17 @@ jobs:
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-debian-${{ hashFiles('**/go.sum') }}
- name: Package for Debian
- name: Package for Debian (CGO)
run: |
for arch in amd64 i386 armhf armel arm64 ; do
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -no-upgrade -installsuffix=no-upgrade -goarch "$arch" deb "$tgt"
done
for tgt in syncthing stdiscosrv strelaysrv ; do
go run build.go -no-upgrade -installsuffix=no-upgrade -tags "${{env.TAGS}}" -goos linux -goarch amd64 -cc "zig cc -target x86_64-linux-musl" deb "$tgt"
go run build.go -no-upgrade -installsuffix=no-upgrade -tags "${{env.TAGS}}" -goos linux -goarch arm -cc "zig cc -target arm-linux-musleabi" deb "$tgt"
go run build.go -no-upgrade -installsuffix=no-upgrade -tags "${{env.TAGS}}" -goos linux -goarch arm64 -cc "zig cc -target aarch64-linux-musl" deb "$tgt"
done
env:
BUILD_USER: debian
CGO_ENABLED: "1"
EXTRA_LDFLAGS: "-linkmode=external -extldflags=-static"
- name: Archive artifacts
uses: actions/upload-artifact@v4
@@ -692,7 +700,7 @@ jobs:
publish-nightly:
name: Publish nightly build
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && startsWith(github.ref, 'refs/heads/release-nightly')
environment: release
needs:
- sign-for-upgrade
@@ -742,7 +750,7 @@ jobs:
publish-release-files:
name: Publish release files
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/tags/v'))
environment: release
needs:
- sign-for-upgrade
@@ -809,7 +817,7 @@ jobs:
publish-apt:
name: Publish APT
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
if: github.repository_owner == 'syncthing' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/release' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v1'))
environment: release
needs:
- package-debian
@@ -836,7 +844,9 @@ jobs:
- name: Prepare packages
run: |
kind=stable
if [[ $VERSION == *-rc.[0-9] ]] ; then
if [[ $VERSION == v2* ]] ; then
kind=v2
elif [[ $VERSION == *-rc.[0-9] ]] ; then
kind=candidate
elif [[ $VERSION == *-* ]] ; then
kind=nightly
@@ -888,8 +898,10 @@ jobs:
docker-syncthing:
name: Build and push Docker images
runs-on: ubuntu-latest
if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch') && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/release' || github.ref == 'refs/heads/infrastructure' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v'))
if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
environment: docker
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
permissions:
contents: read
packages: write
@@ -902,13 +914,13 @@ jobs:
include:
- pkg: syncthing
dockerfile: Dockerfile
image: syncthing/syncthing
image: syncthing
- pkg: strelaysrv
dockerfile: Dockerfile.strelaysrv
image: syncthing/relaysrv
image: relaysrv
- pkg: stdiscosrv
dockerfile: Dockerfile.stdiscosrv
image: syncthing/discosrv
image: discosrv
steps:
- uses: actions/checkout@v4
with:
@@ -926,6 +938,8 @@ jobs:
go version
echo "GO_VERSION=$(go version | sed 's#^.*go##;s# .*##')" >> $GITHUB_ENV
- uses: mlugg/setup-zig@v1
- uses: actions/cache@v4
with:
path: |
@@ -933,33 +947,34 @@ jobs:
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-docker-${{ matrix.pkg }}-${{ hashFiles('**/go.sum') }}
- name: Build binaries
- name: Build binaries (CGO)
run: |
for arch in amd64 arm64 arm; do
go run build.go -goos linux -goarch "$arch" -no-upgrade build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-"$arch"
done
env:
CGO_ENABLED: "0"
BUILD_USER: docker
# amd64
go run build.go -goos linux -goarch amd64 -tags "${{env.TAGS}}" -cc "zig cc -target x86_64-linux-musl" -no-upgrade build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-amd64
- name: Check if we will be able to push images
run: |
if [[ "${{ secrets.DOCKERHUB_TOKEN }}" != "" ]]; then
echo "DOCKER_PUSH=true" >> $GITHUB_ENV;
fi
# arm64
go run build.go -goos linux -goarch arm64 -tags "${{env.TAGS}}" -cc "zig cc -target aarch64-linux-musl" -no-upgrade build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-arm64
# arm
go run build.go -goos linux -goarch arm -tags "${{env.TAGS}}" -cc "zig cc -target arm-linux-musleabi" -no-upgrade build ${{ matrix.pkg }}
mv ${{ matrix.pkg }} ${{ matrix.pkg }}-linux-arm
env:
CGO_ENABLED: "1"
BUILD_USER: docker
EXTRA_LDFLAGS: "-linkmode=external -extldflags=-static"
- name: Login to Docker Hub
uses: docker/login-action@v3
if: env.DOCKER_PUSH == 'true'
if: env.DOCKERHUB_USERNAME != ''
with:
registry: docker.io
username: ${{ secrets.DOCKERHUB_USERNAME }}
username: ${{ env.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
if: env.DOCKER_PUSH == 'true'
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -972,18 +987,31 @@ jobs:
run: |
version=$(go run build.go version)
version=${version#v}
repo=ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}
ref="${{github.ref_name}}"
ref=${ref//\//-} # slashes to dashes
# List of tags for ghcr.io
if [[ $version == @([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]).@([0-9]|[0-9][0-9]) ]] ; then
echo Release version, pushing to :latest and version tags
major=${version%.*.*}
minor=${version%.*}
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:$major,ghcr.io/${{ matrix.image }}:$major,docker.io/${{ matrix.image }}:$minor,ghcr.io/${{ matrix.image }}:$minor,docker.io/${{ matrix.image }}:latest,ghcr.io/${{ matrix.image }}:latest
tags=$repo:$version,$repo:$major,$repo:$minor,$repo:latest
elif [[ $version == *-rc.@([0-9]|[0-9][0-9]) ]] ; then
echo Release candidate, pushing to :rc and version tags
tags=docker.io/${{ matrix.image }}:$version,ghcr.io/${{ matrix.image }}:$version,docker.io/${{ matrix.image }}:rc,ghcr.io/${{ matrix.image }}:rc
tags=$repo:$version,$repo:rc
elif [[ $ref == "main" ]] ; then
tags=$repo:edge
else
echo Development version, pushing to :edge
tags=docker.io/${{ matrix.image }}:edge,ghcr.io/${{ matrix.image }}:edge
tags=$repo:$ref
fi
# If we have a Docker Hub secret, also push to there.
if [[ $DOCKERHUB_USERNAME != "" ]] ; then
dockerhubtags="${tags//ghcr.io\/syncthing/docker.io\/syncthing}"
tags="$tags,$dockerhubtags"
fi
echo Pushing to $tags
echo "DOCKER_TAGS=$tags" >> $GITHUB_ENV
echo "VERSION=$version" >> $GITHUB_ENV
@@ -993,8 +1021,8 @@ jobs:
context: .
file: ${{ matrix.dockerfile }}
platforms: linux/amd64,linux/arm64,linux/arm/7
push: ${{ env.DOCKER_PUSH == 'true' }}
tags: ${{ env.DOCKER_TAGS }}
push: true
labels: |
org.opencontainers.image.version=${{ env.VERSION }}
org.opencontainers.image.revision=${{ github.sha }}

View File

@@ -3,6 +3,7 @@ linters:
disable:
- cyclop
- depguard
- err113
- exhaustive
- exhaustruct
- funlen
@@ -12,6 +13,7 @@ linters:
- gocognit
- goconst
- gocyclo
- godot
- godox
- gofmt
- goimports
@@ -21,15 +23,19 @@ linters:
- ireturn
- lll
- maintidx
- musttag
- nestif
- nlreturn
- nonamedreturns
- paralleltest
- prealloc
- protogetter
- scopelint
- tagalign
- tagliatelle
- testpackage
- varnamelen
- wrapcheck
- wsl
issues:

View File

@@ -288,10 +288,10 @@ func runCommand(cmd string, target target) {
build(target, tags)
case "test":
test(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
test(strings.Fields(extraTags), "github.com/syncthing/syncthing/internal/...", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
case "bench":
bench(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
bench(strings.Fields(extraTags), "github.com/syncthing/syncthing/internal/...", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
case "integration":
integration(false)
@@ -474,7 +474,7 @@ func install(target target, tags []string) {
defer shouldCleanupSyso(sysoPath)
}
args := []string{"install", "-v"}
args := []string{"install"}
args = appendParameters(args, tags, target.buildPkgs...)
runPrint(goCmd, args...)
}
@@ -502,7 +502,7 @@ func build(target target, tags []string) {
defer shouldCleanupSyso(sysoPath)
}
args := []string{"build", "-v"}
args := []string{"build"}
if buildOut != "" {
args = append(args, "-o", buildOut)
}
@@ -514,13 +514,6 @@ func setBuildEnvVars() {
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
os.Setenv("CC", cc)
if os.Getenv("CGO_ENABLED") == "" {
switch goos {
case "darwin", "solaris":
default:
os.Setenv("CGO_ENABLED", "0")
}
}
}
func appendParameters(args []string, tags []string, pkgs ...string) []string {
@@ -736,12 +729,9 @@ func shouldBuildSyso(dir string) (string, error) {
sysoPath := filepath.Join(dir, "cmd", "syncthing", "resource.syso")
// See https://github.com/josephspurrier/goversioninfo#command-line-flags
armOption := ""
if strings.Contains(goarch, "arm") {
armOption = "-arm=true"
}
if _, err := runError("goversioninfo", "-o", sysoPath, armOption); err != nil {
arm := strings.HasPrefix(goarch, "arm")
a64 := strings.Contains(goarch, "64")
if _, err := runError("goversioninfo", "-o", sysoPath, fmt.Sprintf("-arm=%v", arm), fmt.Sprintf("-64=%v", a64)); err != nil {
return "", errors.New("failed to create " + sysoPath + ": " + err.Error())
}

View File

@@ -41,5 +41,4 @@ func (p *profileCommand) Run(ctx Context) error {
type debugCommand struct {
File fileCommand `cmd:"" help:"Show information about a file (or directory/symlink)"`
Profile profileCommand `cmd:"" help:"Save a profile to help figuring out what Syncthing does"`
Index indexCommand `cmd:"" help:"Show information about the index (database)"`
}

View File

@@ -1,32 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"github.com/alecthomas/kong"
)
type indexCommand struct {
Dump struct{} `cmd:"" help:"Print the entire db"`
DumpSize struct{} `cmd:"" help:"Print the db size of different categories of information"`
Check struct{} `cmd:"" help:"Check the database for inconsistencies"`
Account struct{} `cmd:"" help:"Print key and value size statistics per key type"`
}
func (*indexCommand) Run(kongCtx *kong.Context) error {
switch kongCtx.Selected().Name {
case "dump":
return indexDump()
case "dump-size":
return indexDumpSize()
case "check":
return indexCheck()
case "account":
return indexAccount()
}
return nil
}

View File

@@ -1,62 +0,0 @@
// Copyright (C) 2020 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"fmt"
"os"
"text/tabwriter"
)
// indexAccount prints key and data size statistics per class
func indexAccount() error {
ldb, err := getDB()
if err != nil {
return err
}
it, err := ldb.NewPrefixIterator(nil)
if err != nil {
return err
}
var ksizes [256]int
var dsizes [256]int
var counts [256]int
var max [256]int
for it.Next() {
key := it.Key()
t := key[0]
ds := len(it.Value())
ks := len(key)
s := ks + ds
counts[t]++
ksizes[t] += ks
dsizes[t] += ds
if s > max[t] {
max[t] = s
}
}
tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', tabwriter.AlignRight)
toti, totds, totks := 0, 0, 0
for t := range ksizes {
if ksizes[t] > 0 {
// yes metric kilobytes 🤘
fmt.Fprintf(tw, "0x%02x:\t%d items,\t%d KB keys +\t%d KB data,\t%d B +\t%d B avg,\t%d B max\t\n", t, counts[t], ksizes[t]/1000, dsizes[t]/1000, ksizes[t]/counts[t], dsizes[t]/counts[t], max[t])
toti += counts[t]
totds += dsizes[t]
totks += ksizes[t]
}
}
fmt.Fprintf(tw, "Total\t%d items,\t%d KB keys +\t%d KB data.\t\n", toti, totks/1000, totds/1000)
tw.Flush()
return nil
}

View File

@@ -1,162 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"encoding/binary"
"fmt"
"time"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/bep"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol"
)
func indexDump() error {
ldb, err := getDB()
if err != nil {
return err
}
it, err := ldb.NewPrefixIterator(nil)
if err != nil {
return err
}
for it.Next() {
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := binary.BigEndian.Uint32(key[1:])
device := binary.BigEndian.Uint32(key[1+4:])
name := nulString(key[1+4+4:])
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
var f bep.FileInfo
err := proto.Unmarshal(it.Value(), &f)
if err != nil {
return err
}
fmt.Printf(" V:%v\n", &f)
case db.KeyTypeGlobal:
folder := binary.BigEndian.Uint32(key[1:])
name := nulString(key[1+4:])
var flv dbproto.VersionList
proto.Unmarshal(it.Value(), &flv)
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, &flv)
case db.KeyTypeBlock:
folder := binary.BigEndian.Uint32(key[1:])
hash := key[1+4 : 1+4+32]
name := nulString(key[1+4+32:])
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
case db.KeyTypeDeviceStatistic:
fmt.Printf("[dstat] K:%x V:%x\n", key, it.Value())
case db.KeyTypeFolderStatistic:
fmt.Printf("[fstat] K:%x V:%x\n", key, it.Value())
case db.KeyTypeVirtualMtime:
folder := binary.BigEndian.Uint32(key[1:])
name := nulString(key[1+4:])
val := it.Value()
var realTime, virtualTime time.Time
realTime.UnmarshalBinary(val[:len(val)/2])
virtualTime.UnmarshalBinary(val[len(val)/2:])
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, realTime, virtualTime)
case db.KeyTypeFolderIdx:
key := binary.BigEndian.Uint32(key[1:])
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
case db.KeyTypeDeviceIdx:
key := binary.BigEndian.Uint32(key[1:])
val := it.Value()
device := "<nil>"
if len(val) > 0 {
dev, err := protocol.DeviceIDFromBytes(val)
if err != nil {
device = fmt.Sprintf("<invalid %d bytes>", len(val))
} else {
device = dev.String()
}
}
fmt.Printf("[deviceidx] K:%d V:%s\n", key, device)
case db.KeyTypeIndexID:
device := binary.BigEndian.Uint32(key[1:])
folder := binary.BigEndian.Uint32(key[5:])
fmt.Printf("[indexid] D:%d F:%d I:%x\n", device, folder, it.Value())
case db.KeyTypeFolderMeta:
folder := binary.BigEndian.Uint32(key[1:])
fmt.Printf("[foldermeta] F:%d", folder)
var cs dbproto.CountsSet
if err := proto.Unmarshal(it.Value(), &cs); err != nil {
fmt.Printf(" (invalid)\n")
} else {
fmt.Printf(" V:%v\n", &cs)
}
case db.KeyTypeMiscData:
fmt.Printf("[miscdata] K:%q V:%q\n", key[1:], it.Value())
case db.KeyTypeSequence:
folder := binary.BigEndian.Uint32(key[1:])
seq := binary.BigEndian.Uint64(key[5:])
fmt.Printf("[sequence] F:%d S:%d V:%q\n", folder, seq, it.Value())
case db.KeyTypeNeed:
folder := binary.BigEndian.Uint32(key[1:])
file := string(key[5:])
fmt.Printf("[need] F:%d V:%q\n", folder, file)
case db.KeyTypeBlockList:
fmt.Printf("[blocklist] H:%x\n", key[1:])
case db.KeyTypeBlockListMap:
folder := binary.BigEndian.Uint32(key[1:])
hash := key[5:37]
fileName := string(key[37:])
fmt.Printf("[blocklistmap] F:%d H:%x N:%s\n", folder, hash, fileName)
case db.KeyTypeVersion:
fmt.Printf("[version] H:%x", key[1:])
var v bep.Vector
err := proto.Unmarshal(it.Value(), &v)
if err != nil {
fmt.Printf(" (invalid)\n")
} else {
fmt.Printf(" V:%v\n", &v)
}
case db.KeyTypePendingFolder:
device := binary.BigEndian.Uint32(key[1:])
folder := string(key[5:])
var of dbproto.ObservedFolder
proto.Unmarshal(it.Value(), &of)
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, &of)
case db.KeyTypePendingDevice:
device := "<invalid>"
dev, err := protocol.DeviceIDFromBytes(key[1:])
if err == nil {
device = dev.String()
}
var od dbproto.ObservedDevice
proto.Unmarshal(it.Value(), &od)
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, &od)
default:
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
}
}
return nil
}

View File

@@ -1,88 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"encoding/binary"
"fmt"
"sort"
"github.com/syncthing/syncthing/lib/db"
)
func indexDumpSize() error {
type sizedElement struct {
key string
size int
}
ldb, err := getDB()
if err != nil {
return err
}
it, err := ldb.NewPrefixIterator(nil)
if err != nil {
return err
}
var elems []sizedElement
for it.Next() {
var ele sizedElement
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := binary.BigEndian.Uint32(key[1:])
device := binary.BigEndian.Uint32(key[1+4:])
name := nulString(key[1+4+4:])
ele.key = fmt.Sprintf("DEVICE:%d:%d:%s", folder, device, name)
case db.KeyTypeGlobal:
folder := binary.BigEndian.Uint32(key[1:])
name := nulString(key[1+4:])
ele.key = fmt.Sprintf("GLOBAL:%d:%s", folder, name)
case db.KeyTypeBlock:
folder := binary.BigEndian.Uint32(key[1:])
hash := key[1+4 : 1+4+32]
name := nulString(key[1+4+32:])
ele.key = fmt.Sprintf("BLOCK:%d:%x:%s", folder, hash, name)
case db.KeyTypeDeviceStatistic:
ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:])
case db.KeyTypeFolderStatistic:
ele.key = fmt.Sprintf("FOLDERSTATS:%s", key[1:])
case db.KeyTypeVirtualMtime:
ele.key = fmt.Sprintf("MTIME:%s", key[1:])
case db.KeyTypeFolderIdx:
id := binary.BigEndian.Uint32(key[1:])
ele.key = fmt.Sprintf("FOLDERIDX:%d", id)
case db.KeyTypeDeviceIdx:
id := binary.BigEndian.Uint32(key[1:])
ele.key = fmt.Sprintf("DEVICEIDX:%d", id)
default:
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
}
ele.size = len(it.Value())
elems = append(elems, ele)
}
sort.Slice(elems, func(i, j int) bool {
return elems[i].size > elems[j].size
})
for _, ele := range elems {
fmt.Println(ele.key, ele.size)
}
return nil
}

View File

@@ -1,434 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package cli
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"sort"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/bep"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol"
)
type fileInfoKey struct {
folder uint32
device uint32
name string
}
type globalKey struct {
folder uint32
name string
}
type sequenceKey struct {
folder uint32
sequence uint64
}
func indexCheck() (err error) {
ldb, err := getDB()
if err != nil {
return err
}
folders := make(map[uint32]string)
devices := make(map[uint32]string)
deviceToIDs := make(map[string]uint32)
fileInfos := make(map[fileInfoKey]*bep.FileInfo)
globals := make(map[globalKey]*dbproto.VersionList)
sequences := make(map[sequenceKey]string)
needs := make(map[globalKey]struct{})
blocklists := make(map[string]struct{})
versions := make(map[string]*bep.Vector)
usedBlocklists := make(map[string]struct{})
usedVersions := make(map[string]struct{})
var localDeviceKey uint32
success := true
defer func() {
if err == nil {
if success {
fmt.Println("Index check completed successfully.")
} else {
err = errors.New("Inconsistencies found in the index")
}
}
}()
it, err := ldb.NewPrefixIterator(nil)
if err != nil {
return err
}
for it.Next() {
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := binary.BigEndian.Uint32(key[1:])
device := binary.BigEndian.Uint32(key[1+4:])
name := nulString(key[1+4+4:])
var f bep.FileInfo
err := proto.Unmarshal(it.Value(), &f)
if err != nil {
fmt.Println("Unable to unmarshal FileInfo:", err)
success = false
continue
}
fileInfos[fileInfoKey{folder, device, name}] = &f
case db.KeyTypeGlobal:
folder := binary.BigEndian.Uint32(key[1:])
name := nulString(key[1+4:])
var flv dbproto.VersionList
if err := proto.Unmarshal(it.Value(), &flv); err != nil {
fmt.Println("Unable to unmarshal VersionList:", err)
success = false
continue
}
globals[globalKey{folder, name}] = &flv
case db.KeyTypeFolderIdx:
key := binary.BigEndian.Uint32(it.Key()[1:])
folders[key] = string(it.Value())
case db.KeyTypeDeviceIdx:
key := binary.BigEndian.Uint32(it.Key()[1:])
devices[key] = string(it.Value())
deviceToIDs[string(it.Value())] = key
if bytes.Equal(it.Value(), protocol.LocalDeviceID[:]) {
localDeviceKey = key
}
case db.KeyTypeSequence:
folder := binary.BigEndian.Uint32(key[1:])
seq := binary.BigEndian.Uint64(key[5:])
val := it.Value()
sequences[sequenceKey{folder, seq}] = string(val[9:])
case db.KeyTypeNeed:
folder := binary.BigEndian.Uint32(key[1:])
name := nulString(key[1+4:])
needs[globalKey{folder, name}] = struct{}{}
case db.KeyTypeBlockList:
hash := string(key[1:])
blocklists[hash] = struct{}{}
case db.KeyTypeVersion:
hash := string(key[1:])
var v bep.Vector
if err := proto.Unmarshal(it.Value(), &v); err != nil {
fmt.Println("Unable to unmarshal Vector:", err)
success = false
continue
}
versions[hash] = &v
}
}
if localDeviceKey == 0 {
fmt.Println("Missing key for local device in device index (bailing out)")
success = false
return
}
var missingSeq []sequenceKey
for fk, fi := range fileInfos {
if fk.name != fi.Name {
fmt.Printf("Mismatching FileInfo name, %q (key) != %q (actual)\n", fk.name, fi.Name)
success = false
}
folder := folders[fk.folder]
if folder == "" {
fmt.Printf("Unknown folder ID %d for FileInfo %q\n", fk.folder, fk.name)
success = false
continue
}
if devices[fk.device] == "" {
fmt.Printf("Unknown device ID %d for FileInfo %q, folder %q\n", fk.folder, fk.name, folder)
success = false
}
if fk.device == localDeviceKey {
sk := sequenceKey{fk.folder, uint64(fi.Sequence)}
name, ok := sequences[sk]
if !ok {
fmt.Printf("Sequence entry missing for FileInfo %q, folder %q, seq %d\n", fi.Name, folder, fi.Sequence)
missingSeq = append(missingSeq, sk)
success = false
continue
}
if name != fi.Name {
fmt.Printf("Sequence entry refers to wrong name, %q (seq) != %q (FileInfo), folder %q, seq %d\n", name, fi.Name, folder, fi.Sequence)
success = false
}
}
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
key := string(fi.BlocksHash)
if _, ok := blocklists[key]; !ok {
fmt.Printf("Missing block list for file %q, block list hash %x\n", fi.Name, fi.BlocksHash)
success = false
} else {
usedBlocklists[key] = struct{}{}
}
}
if fi.VersionHash != nil {
key := string(fi.VersionHash)
if _, ok := versions[key]; !ok {
fmt.Printf("Missing version vector for file %q, version hash %x\n", fi.Name, fi.VersionHash)
success = false
} else {
usedVersions[key] = struct{}{}
}
}
_, ok := globals[globalKey{fk.folder, fk.name}]
if !ok {
fmt.Printf("Missing global for file %q\n", fi.Name)
success = false
continue
}
}
// Aggregate the ranges of missing sequence entries, print them
sort.Slice(missingSeq, func(a, b int) bool {
if missingSeq[a].folder != missingSeq[b].folder {
return missingSeq[a].folder < missingSeq[b].folder
}
return missingSeq[a].sequence < missingSeq[b].sequence
})
var folder uint32
var startSeq, prevSeq uint64
for _, sk := range missingSeq {
if folder != sk.folder || sk.sequence != prevSeq+1 {
if folder != 0 {
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
}
startSeq = sk.sequence
folder = sk.folder
}
prevSeq = sk.sequence
}
if folder != 0 {
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
}
for gk, vl := range globals {
folder := folders[gk.folder]
if folder == "" {
fmt.Printf("Unknown folder ID %d for VersionList %q\n", gk.folder, gk.name)
success = false
}
checkGlobal := func(i int, device []byte, version protocol.Vector, invalid, deleted bool) {
dev, ok := deviceToIDs[string(device)]
if !ok {
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, device)
success = false
}
fi, ok := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
if !ok {
fmt.Printf("VersionList %q, folder %q, entry %d refers to unknown FileInfo\n", gk.name, folder, i)
success = false
}
fiv := fi.Version
if fi.VersionHash != nil {
fiv = versions[string(fi.VersionHash)]
}
if !protocol.VectorFromWire(fiv).Equal(version) {
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
success = false
}
ffi := protocol.FileInfoFromDB(fi)
if ffi.IsInvalid() != invalid {
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, ffi.IsInvalid())
success = false
}
if ffi.IsDeleted() != deleted {
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, ffi.IsDeleted())
success = false
}
}
for i, fv := range vl.Versions {
ver := protocol.VectorFromWire(fv.Version)
for _, device := range fv.Devices {
checkGlobal(i, device, ver, false, fv.Deleted)
}
for _, device := range fv.InvalidDevices {
checkGlobal(i, device, ver, true, fv.Deleted)
}
}
// If we need this file we should have a need entry for it. False
// positives from needsLocally for deleted files, where we might
// legitimately lack an entry if we never had it, and ignored files.
if needsLocally(vl) {
_, ok := needs[gk]
if !ok {
fv, _ := vlGetGlobal(vl)
devB, _ := fvFirstDevice(fv)
dev := deviceToIDs[string(devB)]
fi := protocol.FileInfoFromDB(fileInfos[fileInfoKey{gk.folder, dev, gk.name}])
if !fi.IsDeleted() && !fi.IsIgnored() {
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
}
}
}
}
seenSeq := make(map[fileInfoKey]uint64)
for sk, name := range sequences {
folder := folders[sk.folder]
if folder == "" {
fmt.Printf("Unknown folder ID %d for sequence entry %d, %q\n", sk.folder, sk.sequence, name)
success = false
continue
}
if prev, ok := seenSeq[fileInfoKey{folder: sk.folder, name: name}]; ok {
fmt.Printf("Duplicate sequence entry for %q, folder %q, seq %d (prev %d)\n", name, folder, sk.sequence, prev)
success = false
}
seenSeq[fileInfoKey{folder: sk.folder, name: name}] = sk.sequence
fi, ok := fileInfos[fileInfoKey{sk.folder, localDeviceKey, name}]
if !ok {
fmt.Printf("Missing FileInfo for sequence entry %d, folder %q, %q\n", sk.sequence, folder, name)
success = false
continue
}
if fi.Sequence != int64(sk.sequence) {
fmt.Printf("Sequence mismatch for %q, folder %q, %d (key) != %d (FileInfo)\n", name, folder, sk.sequence, fi.Sequence)
success = false
}
}
for nk := range needs {
folder := folders[nk.folder]
if folder == "" {
fmt.Printf("Unknown folder ID %d for need entry %q\n", nk.folder, nk.name)
success = false
continue
}
vl, ok := globals[nk]
if !ok {
fmt.Printf("Missing global for need entry %q, folder %q\n", nk.name, folder)
success = false
continue
}
if !needsLocally(vl) {
fmt.Printf("Need entry for file we don't need, %q, folder %q\n", nk.name, folder)
success = false
}
}
if d := len(blocklists) - len(usedBlocklists); d > 0 {
fmt.Printf("%d block list entries out of %d needs GC\n", d, len(blocklists))
}
if d := len(versions) - len(usedVersions); d > 0 {
fmt.Printf("%d version entries out of %d needs GC\n", d, len(versions))
}
return nil
}
func needsLocally(vl *dbproto.VersionList) bool {
gfv, gok := vlGetGlobal(vl)
if !gok { // That's weird, but we hardly need something non-existent
return false
}
fv, ok := vlGet(vl, protocol.LocalDeviceID[:])
return db.Need(gfv, ok, protocol.VectorFromWire(fv.Version))
}
// Get returns a FileVersion that contains the given device and whether it has
// been found at all.
func vlGet(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool) {
_, i, _, ok := vlFindDevice(vl, device)
if !ok {
return &dbproto.FileVersion{}, false
}
return vl.Versions[i], true
}
// GetGlobal returns the current global FileVersion. The returned FileVersion
// may be invalid, if all FileVersions are invalid. Returns false only if
// VersionList is empty.
func vlGetGlobal(vl *dbproto.VersionList) (*dbproto.FileVersion, bool) {
i := vlFindGlobal(vl)
if i == -1 {
return nil, false
}
return vl.Versions[i], true
}
// findGlobal returns the first version that isn't invalid, or if all versions are
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
func vlFindGlobal(vl *dbproto.VersionList) int {
for i := range vl.Versions {
if !fvIsInvalid(vl.Versions[i]) {
return i
}
}
if len(vl.Versions) == 0 {
return -1
}
return 0
}
// findDevice returns whether the device is in InvalidVersions or Versions and
// in InvalidDevices or Devices (true for invalid), the positions in the version
// and device slices and whether it has been found at all.
func vlFindDevice(vl *dbproto.VersionList, device []byte) (bool, int, int, bool) {
for i, v := range vl.Versions {
if j := deviceIndex(v.Devices, device); j != -1 {
return false, i, j, true
}
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
return true, i, j, true
}
}
return false, -1, -1, false
}
func deviceIndex(devices [][]byte, device []byte) int {
for i, dev := range devices {
if bytes.Equal(device, dev) {
return i
}
}
return -1
}
func fvFirstDevice(fv *dbproto.FileVersion) ([]byte, bool) {
if len(fv.Devices) != 0 {
return fv.Devices[0], true
}
if len(fv.InvalidDevices) != 0 {
return fv.InvalidDevices[0], true
}
return nil, false
}
func fvIsInvalid(fv *dbproto.FileVersion) bool {
return fv == nil || len(fv.Devices) == 0
}

View File

@@ -17,8 +17,6 @@ import (
"path/filepath"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/locations"
)
func responseToBArray(response *http.Response) ([]byte, error) {
@@ -133,10 +131,6 @@ func prettyPrintResponse(response *http.Response) error {
return prettyPrintJSON(data)
}
func getDB() (backend.Backend, error) {
return backend.OpenLevelDBRO(locations.Get(locations.Database))
}
func nulString(bs []byte) string {
for i := range bs {
if bs[i] == 0 {

View File

@@ -22,6 +22,7 @@ import (
"path"
"path/filepath"
"regexp"
"runtime"
"runtime/pprof"
"sort"
"strconv"
@@ -38,10 +39,10 @@ import (
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
"github.com/syncthing/syncthing/cmd/syncthing/generate"
"github.com/syncthing/syncthing/internal/db"
_ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/dialer"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
@@ -139,42 +140,41 @@ var entrypoint struct {
// serveOptions are the options for the `syncthing serve` command.
type serveOptions struct {
cmdutil.CommonOptions
AllowNewerConfig bool `help:"Allow loading newer than current config version"`
Audit bool `help:"Write events to audit file"`
AuditFile string `name:"auditfile" placeholder:"PATH" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)"`
BrowserOnly bool `help:"Open GUI in browser"`
DataDir string `name:"data" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
DeviceID bool `help:"Show the device ID"`
GenerateDir string `name:"generate" placeholder:"PATH" help:"Generate key and config in specified dir, then exit"` // DEPRECATED: replaced by subcommand!
GUIAddress string `name:"gui-address" placeholder:"URL" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")"`
GUIAPIKey string `name:"gui-apikey" placeholder:"API-KEY" help:"Override GUI API key"`
LogFile string `name:"logfile" default:"${logFile}" placeholder:"PATH" help:"Log file name (see below)"`
LogFlags int `name:"logflags" default:"${logFlags}" placeholder:"BITS" help:"Select information in log line prefix (see below)"`
LogMaxFiles int `placeholder:"N" default:"${logMaxFiles}" name:"log-max-old-files" help:"Number of old files to keep (zero to keep only current)"`
LogMaxSize int `placeholder:"BYTES" default:"${logMaxSize}" help:"Maximum size of any file (zero to disable log rotation)"`
NoBrowser bool `help:"Do not start browser"`
NoRestart bool `env:"STNORESTART" help:"Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash"`
NoUpgrade bool `env:"STNOUPGRADE" help:"Disable automatic upgrades"`
Paths bool `help:"Show configuration paths"`
Paused bool `help:"Start with all devices and folders paused"`
Unpaused bool `help:"Start with all devices and folders unpaused"`
Upgrade bool `help:"Perform upgrade"`
UpgradeCheck bool `help:"Check for available upgrade"`
UpgradeTo string `placeholder:"URL" help:"Force upgrade directly from specified URL"`
Verbose bool `help:"Print verbose log output"`
Version bool `help:"Show version"`
AllowNewerConfig bool `help:"Allow loading newer than current config version"`
Audit bool `help:"Write events to audit file"`
AuditFile string `name:"auditfile" placeholder:"PATH" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)"`
BrowserOnly bool `help:"Open GUI in browser"`
DataDir string `name:"data" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
DeviceID bool `help:"Show the device ID"`
GenerateDir string `name:"generate" placeholder:"PATH" help:"Generate key and config in specified dir, then exit"` // DEPRECATED: replaced by subcommand!
GUIAddress string `name:"gui-address" placeholder:"URL" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")"`
GUIAPIKey string `name:"gui-apikey" placeholder:"API-KEY" help:"Override GUI API key"`
LogFile string `name:"logfile" default:"${logFile}" placeholder:"PATH" help:"Log file name (see below)"`
LogFlags int `name:"logflags" default:"${logFlags}" placeholder:"BITS" help:"Select information in log line prefix (see below)"`
LogMaxFiles int `placeholder:"N" default:"${logMaxFiles}" name:"log-max-old-files" help:"Number of old files to keep (zero to keep only current)"`
LogMaxSize int `placeholder:"BYTES" default:"${logMaxSize}" help:"Maximum size of any file (zero to disable log rotation)"`
NoBrowser bool `help:"Do not start browser"`
NoRestart bool `env:"STNORESTART" help:"Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash"`
NoUpgrade bool `env:"STNOUPGRADE" help:"Disable automatic upgrades"`
Paths bool `help:"Show configuration paths"`
Paused bool `help:"Start with all devices and folders paused"`
Unpaused bool `help:"Start with all devices and folders unpaused"`
Upgrade bool `help:"Perform upgrade"`
UpgradeCheck bool `help:"Check for available upgrade"`
UpgradeTo string `placeholder:"URL" help:"Force upgrade directly from specified URL"`
Verbose bool `help:"Print verbose log output"`
Version bool `help:"Show version"`
DBMaintenanceInterval time.Duration `env:"STDBMAINTINTERVAL" help:"Database maintenance interval" default:"8h"`
// Debug options below
DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"`
DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"`
DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"`
DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"`
DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"`
DebugProfileCPU bool `help:"Write a CPU profile to cpu-$pid.pprof on exit" env:"STCPUPROFILE"`
DebugProfileHeap bool `env:"STHEAPPROFILE" help:"Write heap profiles to heap-$pid-$timestamp.pprof each time heap usage increases"`
DebugProfilerListen string `placeholder:"ADDR" env:"STPROFILER" help:"Network profiler listen address"`
DebugResetDatabase bool `name:"reset-database" help:"Reset the database, forcing a full rescan and resync"`
DebugResetDeltaIdxs bool `name:"reset-deltas" help:"Reset delta index IDs, forcing a full index exchange"`
DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"`
DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"`
DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"`
DebugProfileCPU bool `help:"Write a CPU profile to cpu-$pid.pprof on exit" env:"STCPUPROFILE"`
DebugProfileHeap bool `env:"STHEAPPROFILE" help:"Write heap profiles to heap-$pid-$timestamp.pprof each time heap usage increases"`
DebugProfilerListen string `placeholder:"ADDR" env:"STPROFILER" help:"Network profiler listen address"`
DebugResetDatabase bool `name:"reset-database" help:"Reset the database, forcing a full rescan and resync"`
DebugResetDeltaIdxs bool `name:"reset-deltas" help:"Reset delta index IDs, forcing a full index exchange"`
// Internal options, not shown to users
InternalRestarting bool `env:"STRESTART" hidden:"1"`
@@ -592,8 +592,12 @@ func syncthingMain(options serveOptions) {
})
}
dbFile := locations.Get(locations.Database)
ldb, err := syncthing.OpenDBBackend(dbFile, cfgWrapper.Options().DatabaseTuning)
if err := syncthing.TryMigrateDatabase(); err != nil {
l.Warnln("Failed to migrate old-style database:", err)
os.Exit(1)
}
sdb, err := syncthing.OpenDatabase(locations.Get(locations.Database))
if err != nil {
l.Warnln("Error opening database:", err)
os.Exit(1)
@@ -602,11 +606,11 @@ func syncthingMain(options serveOptions) {
// Check if auto-upgrades is possible, and if yes, and it's enabled do an initial
// upgrade immediately. The auto-upgrade routine can only be started
// later after App is initialised.
autoUpgradePossible := autoUpgradePossible(options)
if autoUpgradePossible && cfgWrapper.Options().AutoUpgradeEnabled() {
// try to do upgrade directly and log the error if relevant.
release, err := initialAutoUpgradeCheck(db.NewMiscDataNamespace(ldb))
miscDB := db.NewMiscDB(sdb)
release, err := initialAutoUpgradeCheck(miscDB)
if err == nil {
err = upgrade.To(release)
}
@@ -617,7 +621,7 @@ func syncthingMain(options serveOptions) {
l.Infoln("Initial automatic upgrade:", err)
}
} else {
l.Infof("Upgraded to %q, exiting now.", release.Tag)
l.Infof("Upgraded to %q, should exit now.", release.Tag)
os.Exit(svcutil.ExitUpgrade.AsInt())
}
}
@@ -629,24 +633,17 @@ func syncthingMain(options serveOptions) {
}
appOpts := syncthing.Options{
NoUpgrade: options.NoUpgrade,
ProfilerAddr: options.DebugProfilerListen,
ResetDeltaIdxs: options.DebugResetDeltaIdxs,
Verbose: options.Verbose,
DBRecheckInterval: options.DebugDBRecheckInterval,
DBIndirectGCInterval: options.DebugDBIndirectGCInterval,
NoUpgrade: options.NoUpgrade,
ProfilerAddr: options.DebugProfilerListen,
ResetDeltaIdxs: options.DebugResetDeltaIdxs,
Verbose: options.Verbose,
DBMaintenanceInterval: options.DBMaintenanceInterval,
}
if options.Audit {
appOpts.AuditWriter = auditWriter(options.AuditFile)
}
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
appOpts.DBRecheckInterval = dur
}
if dur, err := time.ParseDuration(os.Getenv("STGCINDIRECTEVERY")); err == nil {
appOpts.DBIndirectGCInterval = dur
}
app, err := syncthing.New(cfgWrapper, ldb, evLogger, cert, appOpts)
app, err := syncthing.New(cfgWrapper, sdb, evLogger, cert, appOpts)
if err != nil {
l.Warnln("Failed to start Syncthing:", err)
os.Exit(svcutil.ExitError.AsInt())
@@ -692,6 +689,7 @@ func syncthingMain(options serveOptions) {
pprof.StopCPUProfile()
}
runtime.KeepAlive(lf) // ensure lock is still held to this point
os.Exit(int(status))
}
@@ -833,7 +831,7 @@ func autoUpgrade(cfg config.Wrapper, app *syncthing.App, evLogger events.Logger)
}
}
func initialAutoUpgradeCheck(misc *db.NamespacedKV) (upgrade.Release, error) {
func initialAutoUpgradeCheck(misc *db.Typed) (upgrade.Release, error) {
if last, ok, err := misc.Time(upgradeCheckKey); err == nil && ok && time.Since(last) < upgradeCheckInterval {
return upgrade.Release{}, errTooEarlyUpgradeCheck
}

11
go.mod
View File

@@ -14,13 +14,14 @@ require (
github.com/go-ldap/ldap/v3 v3.4.10
github.com/gobwas/glob v0.2.3
github.com/gofrs/flock v0.12.1
github.com/greatroar/blobloom v0.8.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jackpal/gateway v1.0.16
github.com/jackpal/go-nat-pmp v1.0.2
github.com/jmoiron/sqlx v1.4.0
github.com/julienschmidt/httprouter v1.3.0
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/maruel/panicparse/v2 v2.4.0
github.com/mattn/go-sqlite3 v1.14.24
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2
github.com/maxmind/geoipupdate/v6 v6.1.0
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
@@ -46,6 +47,7 @@ require (
golang.org/x/time v0.11.0
golang.org/x/tools v0.31.0
google.golang.org/protobuf v1.36.5
modernc.org/sqlite v1.36.0
sigs.k8s.io/yaml v1.4.0
)
@@ -57,6 +59,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.8.2 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
@@ -70,7 +73,9 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nxadm/tail v1.4.11 // indirect
github.com/onsi/ginkgo/v2 v2.20.2 // indirect
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
@@ -81,6 +86,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
@@ -93,6 +99,9 @@ require (
golang.org/x/mod v0.24.0 // indirect
golang.org/x/sync v0.12.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.61.13 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.8.2 // indirect
)
// https://github.com/gobwas/glob/pull/55

46
go.sum
View File

@@ -1,3 +1,5 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f h1:GmH5lT+moM7PbAJFBq57nH9WJ+wRnBXr/tyaYWbSAx8=
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
@@ -39,6 +41,8 @@ github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkE
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -58,6 +62,8 @@ github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@@ -89,8 +95,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/greatroar/blobloom v0.8.0 h1:I9RlEkfqK9/6f1v9mFmDYegDQ/x0mISCpiNpAm23Pt4=
github.com/greatroar/blobloom v0.8.0/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -126,6 +130,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
@@ -138,10 +144,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/maruel/panicparse/v2 v2.4.0 h1:yQKMIbQ0DKfinzVkTkcUzQyQ60UCiNnYfR7PWwTs2VI=
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU=
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ=
github.com/maxmind/geoipupdate/v6 v6.1.0 h1:sdtTHzzQNJlXF5+fd/EoPTucRHyMonYt/Cok8xzzfqA=
@@ -150,6 +163,8 @@ github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+U
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75/go.mod h1:pBbZyGwC5i16IBkjVKoy/sznA8jPD/K9iedwe1ESE6w=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
@@ -201,6 +216,8 @@ github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzuk
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY=
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab/go.mod h1:/PfPXh0EntGc3QAAyUaviy4S9tzy4Zp0e2ilq4voC6E=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -328,6 +345,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@@ -397,5 +415,29 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo=
modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw=
modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8=
modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.36.0 h1:EQXNRn4nIS+gfsKeUTymHIz1waxuv5BzU7558dHSfH8=
modernc.org/sqlite v1.36.0/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@@ -59,9 +59,12 @@ Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Al
<li><a href="https://github.com/golang/protobuf">golang/protobuf</a>, Copyright &copy; 2010 The Go Authors.</li>
<li><a href="https://github.com/golang/snappy">golang/snappy</a>, Copyright &copy; 2011 The Snappy-Go Authors.</li>
<li><a href="https://github.com/jackpal/gateway">jackpal/gateway</a>, Copyright &copy; 2010 Jack Palevich.</li>
<li><a href="https://github.com/jmoiron/sqlx">jmoiron/sqlx</a>, Copyright &copy; 2013 Jason Moiron.</li>
<li><a href="https://github.com/kballard/go-shellquote">kballard/go-shellquote</a>, Copyright &copy; 2014 Kevin Ballard.</li>
<li><a href="https://github.com/mattn/go-isatty">mattn/go-isatty</a>, Copyright &copy; Yasuhiro MATSUMOTO.</li>
<li><a href="https://github.com/mattn/go-sqlite3">mattn/go-sqlite3</a>, Copyright &copy; 2014 Yasuhiro Matsumoto</li>
<li><a href="https://github.com/matttproud/golang_protobuf_extensions">matttproud/golang_protobuf_extensions</a>, Copyright &copy; 2012 Matt T. Proud.</li>
<li><a href="https://modernc.org/sqlite">modernc.org/sqlite</a>, Copyright &copy; 2017 The Sqlite Authors</li>
<li><a href="https://github.com/oschwald/geoip2-golang">oschwald/geoip2-golang</a>, Copyright &copy; 2015, Gregory J. Oschwald.</li>
<li><a href="https://github.com/oschwald/maxminddb-golang">oschwald/maxminddb-golang</a>, Copyright &copy; 2015, Gregory J. Oschwald.</li>
<li><a href="https://github.com/petermattis/goid">petermattis/goid</a>, Copyright &copy; 2015-2016 Peter Mattis.</li>

73
internal/db/counts.go Normal file
View File

@@ -0,0 +1,73 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"fmt"
"strings"
"github.com/syncthing/syncthing/lib/protocol"
)
type Counts struct {
Files int
Directories int
Symlinks int
Deleted int
Bytes int64
Sequence int64 // zero for the global state
DeviceID protocol.DeviceID // device ID for remote devices, or special values for local/global
LocalFlags uint32 // the local flag for this count bucket
}
func (c Counts) Add(other Counts) Counts {
return Counts{
Files: c.Files + other.Files,
Directories: c.Directories + other.Directories,
Symlinks: c.Symlinks + other.Symlinks,
Deleted: c.Deleted + other.Deleted,
Bytes: c.Bytes + other.Bytes,
Sequence: c.Sequence + other.Sequence,
DeviceID: protocol.EmptyDeviceID,
LocalFlags: c.LocalFlags | other.LocalFlags,
}
}
func (c Counts) TotalItems() int {
return c.Files + c.Directories + c.Symlinks + c.Deleted
}
func (c Counts) String() string {
var flags strings.Builder
if c.LocalFlags&protocol.FlagLocalNeeded != 0 {
flags.WriteString("Need")
}
if c.LocalFlags&protocol.FlagLocalIgnored != 0 {
flags.WriteString("Ignored")
}
if c.LocalFlags&protocol.FlagLocalMustRescan != 0 {
flags.WriteString("Rescan")
}
if c.LocalFlags&protocol.FlagLocalReceiveOnly != 0 {
flags.WriteString("Recvonly")
}
if c.LocalFlags&protocol.FlagLocalUnsupported != 0 {
flags.WriteString("Unsupported")
}
if c.LocalFlags != 0 {
flags.WriteString(fmt.Sprintf("(%x)", c.LocalFlags))
}
if flags.Len() == 0 {
flags.WriteString("---")
}
return fmt.Sprintf("{Device:%v, Files:%d, Dirs:%d, Symlinks:%d, Del:%d, Bytes:%d, Seq:%d, Flags:%s}", c.DeviceID, c.Files, c.Directories, c.Symlinks, c.Deleted, c.Bytes, c.Sequence, flags.String())
}
// Equal compares the numbers only, not sequence/dev/flags.
func (c Counts) Equal(o Counts) bool {
return c.Files == o.Files && c.Directories == o.Directories && c.Symlinks == o.Symlinks && c.Deleted == o.Deleted && c.Bytes == o.Bytes
}

123
internal/db/interface.go Normal file
View File

@@ -0,0 +1,123 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db // import "github.com/syncthing/syncthing/internal/db/sqlite"
import (
"iter"
"time"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/thejerf/suture/v4"
)
type DB interface {
Service(maintenanceInterval time.Duration) suture.Service
// Basics
Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error
Close() error
// Single files
GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error)
GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, error)
GetGlobalFile(folder string, file string) (protocol.FileInfo, bool, error)
// File iterators
//
// n.b. there is a slight inconsistency in the return types where some
// return a FileInfo iterator and some a FileMetadata iterator. The
// latter is more lightweight, and the discrepancy depends on how the
// functions tend to be used. We can introduce more variations as
// required.
AllGlobalFiles(folder string) (iter.Seq[FileMetadata], func() error)
AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[FileMetadata], func() error)
AllLocalBlocksWithHash(hash []byte) (iter.Seq[BlockMapEntry], func() error)
AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error)
AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error)
AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error)
AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[FileMetadata], func() error)
AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, FileMetadata], func() error)
AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error)
// Cleanup
DropAllFiles(folder string, device protocol.DeviceID) error
DropDevice(device protocol.DeviceID) error
DropFilesNamed(folder string, device protocol.DeviceID, names []string) error
DropFolder(folder string) error
// Various metadata
GetDeviceSequence(folder string, device protocol.DeviceID) (int64, error)
ListFolders() ([]string, error)
ListDevicesForFolder(folder string) ([]protocol.DeviceID, error)
RemoteSequences(folder string) (map[protocol.DeviceID]int64, error)
// Counts
CountGlobal(folder string) (Counts, error)
CountLocal(folder string, device protocol.DeviceID) (Counts, error)
CountNeed(folder string, device protocol.DeviceID) (Counts, error)
CountReceiveOnlyChanged(folder string) (Counts, error)
// Index IDs
DropAllIndexIDs() error
GetIndexID(folder string, device protocol.DeviceID) (protocol.IndexID, error)
SetIndexID(folder string, device protocol.DeviceID, id protocol.IndexID) error
// MtimeFS
DeleteMtime(folder, name string) error
GetMtime(folder, name string) (ondisk, virtual time.Time)
PutMtime(folder, name string, ondisk, virtual time.Time) error
KV
}
// Generic KV store
type KV interface {
GetKV(key string) ([]byte, error)
PutKV(key string, val []byte) error
DeleteKV(key string) error
PrefixKV(prefix string) (iter.Seq[KeyValue], func() error)
}
type BlockMapEntry struct {
BlocklistHash []byte
Offset int64
BlockIndex int
Size int
}
type KeyValue struct {
Key string
Value []byte
}
type FileMetadata struct {
Name string
Sequence int64
ModNanos int64
Size int64
LocalFlags int64
Type protocol.FileInfoType
Deleted bool
Invalid bool
}
func (f *FileMetadata) ModTime() time.Time {
return time.Unix(0, f.ModNanos)
}
func (f *FileMetadata) IsReceiveOnlyChanged() bool {
return f.LocalFlags&protocol.FlagLocalReceiveOnly != 0
}
func (f *FileMetadata) IsDirectory() bool {
return f.Type == protocol.FileInfoTypeDirectory
}
func (f *FileMetadata) ShouldConflict() bool {
return f.LocalFlags&protocol.LocalConflictFlags != 0
}

229
internal/db/metrics.go Normal file
View File

@@ -0,0 +1,229 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"iter"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
)
var (
metricCurrentOperations = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "syncthing",
Subsystem: "db",
Name: "operations_current",
}, []string{"folder", "operation"})
metricTotalOperationSeconds = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "db",
Name: "operation_seconds_total",
Help: "Total time spent in database operations, per folder and operation",
}, []string{"folder", "operation"})
metricTotalOperationsCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "db",
Name: "operations_total",
Help: "Total number of database operations, per folder and operation",
}, []string{"folder", "operation"})
metricTotalFilesUpdatedCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "syncthing",
Subsystem: "db",
Name: "files_updated_total",
Help: "Total number of files updated",
}, []string{"folder"})
)
func MetricsWrap(db DB) DB {
return metricsDB{db}
}
type metricsDB struct {
DB
}
func (m metricsDB) account(folder, op string) func() {
t0 := time.Now()
metricCurrentOperations.WithLabelValues(folder, op).Inc()
return func() {
if dur := time.Since(t0).Seconds(); dur > 0 {
metricTotalOperationSeconds.WithLabelValues(folder, op).Add(dur)
}
metricTotalOperationsCount.WithLabelValues(folder, op).Inc()
metricCurrentOperations.WithLabelValues(folder, op).Dec()
}
}
func (m metricsDB) AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[FileMetadata], func() error) {
defer m.account(folder, "AllLocalFilesWithBlocksHash")()
return m.DB.AllLocalFilesWithBlocksHash(folder, h)
}
func (m metricsDB) AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, FileMetadata], func() error) {
defer m.account("-", "AllLocalFilesWithBlocksHashAnyFolder")()
return m.DB.AllLocalFilesWithBlocksHashAnyFolder(h)
}
func (m metricsDB) AllGlobalFiles(folder string) (iter.Seq[FileMetadata], func() error) {
defer m.account(folder, "AllGlobalFiles")()
return m.DB.AllGlobalFiles(folder)
}
func (m metricsDB) AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[FileMetadata], func() error) {
defer m.account(folder, "AllGlobalFilesPrefix")()
return m.DB.AllGlobalFilesPrefix(folder, prefix)
}
func (m metricsDB) AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error) {
defer m.account(folder, "AllLocalFiles")()
return m.DB.AllLocalFiles(folder, device)
}
func (m metricsDB) AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error) {
defer m.account(folder, "AllLocalFilesPrefix")()
return m.DB.AllLocalFilesWithPrefix(folder, device, prefix)
}
func (m metricsDB) AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error) {
defer m.account(folder, "AllLocalFilesBySequence")()
return m.DB.AllLocalFilesBySequence(folder, device, startSeq, limit)
}
func (m metricsDB) AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error) {
defer m.account(folder, "AllNeededGlobalFiles")()
return m.DB.AllNeededGlobalFiles(folder, device, order, limit, offset)
}
func (m metricsDB) GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, error) {
defer m.account(folder, "GetGlobalAvailability")()
return m.DB.GetGlobalAvailability(folder, file)
}
func (m metricsDB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[BlockMapEntry], func() error) {
defer m.account("-", "AllLocalBlocksWithHash")()
return m.DB.AllLocalBlocksWithHash(hash)
}
func (m metricsDB) Close() error {
defer m.account("-", "Close")()
return m.DB.Close()
}
func (m metricsDB) ListDevicesForFolder(folder string) ([]protocol.DeviceID, error) {
defer m.account(folder, "ListDevicesForFolder")()
return m.DB.ListDevicesForFolder(folder)
}
func (m metricsDB) RemoteSequences(folder string) (map[protocol.DeviceID]int64, error) {
defer m.account(folder, "RemoteSequences")()
return m.DB.RemoteSequences(folder)
}
func (m metricsDB) DropAllFiles(folder string, device protocol.DeviceID) error {
defer m.account(folder, "DropAllFiles")()
return m.DB.DropAllFiles(folder, device)
}
func (m metricsDB) DropDevice(device protocol.DeviceID) error {
defer m.account("-", "DropDevice")()
return m.DB.DropDevice(device)
}
func (m metricsDB) DropFilesNamed(folder string, device protocol.DeviceID, names []string) error {
defer m.account(folder, "DropFilesNamed")()
return m.DB.DropFilesNamed(folder, device, names)
}
func (m metricsDB) DropFolder(folder string) error {
defer m.account(folder, "DropFolder")()
return m.DB.DropFolder(folder)
}
func (m metricsDB) DropAllIndexIDs() error {
defer m.account("-", "IndexIDDropAll")()
return m.DB.DropAllIndexIDs()
}
func (m metricsDB) ListFolders() ([]string, error) {
defer m.account("-", "ListFolders")()
return m.DB.ListFolders()
}
func (m metricsDB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
defer m.account(folder, "GetGlobalFile")()
return m.DB.GetGlobalFile(folder, file)
}
func (m metricsDB) CountGlobal(folder string) (Counts, error) {
defer m.account(folder, "CountGlobal")()
return m.DB.CountGlobal(folder)
}
func (m metricsDB) GetIndexID(folder string, device protocol.DeviceID) (protocol.IndexID, error) {
defer m.account(folder, "IndexIDGet")()
return m.DB.GetIndexID(folder, device)
}
func (m metricsDB) GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {
defer m.account(folder, "GetDeviceFile")()
return m.DB.GetDeviceFile(folder, device, file)
}
func (m metricsDB) CountLocal(folder string, device protocol.DeviceID) (Counts, error) {
defer m.account(folder, "CountLocal")()
return m.DB.CountLocal(folder, device)
}
func (m metricsDB) CountNeed(folder string, device protocol.DeviceID) (Counts, error) {
defer m.account(folder, "CountNeed")()
return m.DB.CountNeed(folder, device)
}
func (m metricsDB) CountReceiveOnlyChanged(folder string) (Counts, error) {
defer m.account(folder, "CountReceiveOnlyChanged")()
return m.DB.CountReceiveOnlyChanged(folder)
}
func (m metricsDB) GetDeviceSequence(folder string, device protocol.DeviceID) (int64, error) {
defer m.account(folder, "GetDeviceSequence")()
return m.DB.GetDeviceSequence(folder, device)
}
func (m metricsDB) SetIndexID(folder string, device protocol.DeviceID, id protocol.IndexID) error {
defer m.account(folder, "IndexIDSet")()
return m.DB.SetIndexID(folder, device, id)
}
func (m metricsDB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
defer m.account(folder, "Update")()
defer metricTotalFilesUpdatedCount.WithLabelValues(folder).Add(float64(len(fs)))
return m.DB.Update(folder, device, fs)
}
func (m metricsDB) GetKV(key string) ([]byte, error) {
defer m.account("-", "GetKV")()
return m.DB.GetKV(key)
}
func (m metricsDB) PutKV(key string, val []byte) error {
defer m.account("-", "PutKV")()
return m.DB.PutKV(key, val)
}
func (m metricsDB) DeleteKV(key string) error {
defer m.account("-", "DeleteKV")()
return m.DB.DeleteKV(key)
}
func (m metricsDB) PrefixKV(prefix string) (iter.Seq[KeyValue], func() error) {
defer m.account("-", "PrefixKV")()
return m.DB.PrefixKV(prefix)
}

View File

@@ -8,6 +8,7 @@ package db
import (
"fmt"
"strings"
"time"
"google.golang.org/protobuf/proto"
@@ -17,6 +18,14 @@ import (
"github.com/syncthing/syncthing/lib/protocol"
)
type ObservedDB struct {
kv KV
}
func NewObservedDB(kv KV) *ObservedDB {
return &ObservedDB{kv: kv}
}
type ObservedFolder struct {
Time time.Time `json:"time"`
Label string `json:"label"`
@@ -52,39 +61,42 @@ func (o *ObservedDevice) fromWire(w *dbproto.ObservedDevice) {
o.Address = w.GetAddress()
}
func (db *Lowlevel) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) error {
key := db.keyer.GeneratePendingDeviceKey(nil, device[:])
func (db *ObservedDB) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) error {
key := "device/" + device.String()
od := &dbproto.ObservedDevice{
Time: timestamppb.New(time.Now().Truncate(time.Second)),
Name: name,
Address: address,
}
return db.Put(key, mustMarshal(od))
return db.kv.PutKV(key, mustMarshal(od))
}
func (db *Lowlevel) RemovePendingDevice(device protocol.DeviceID) error {
key := db.keyer.GeneratePendingDeviceKey(nil, device[:])
return db.Delete(key)
func (db *ObservedDB) RemovePendingDevice(device protocol.DeviceID) error {
key := "device/" + device.String()
return db.kv.DeleteKV(key)
}
// PendingDevices enumerates all entries. Invalid ones are dropped from the database
// after a warning log message, as a side-effect.
func (db *Lowlevel) PendingDevices() (map[protocol.DeviceID]ObservedDevice, error) {
iter, err := db.NewPrefixIterator([]byte{KeyTypePendingDevice})
if err != nil {
return nil, err
}
defer iter.Release()
func (db *ObservedDB) PendingDevices() (map[protocol.DeviceID]ObservedDevice, error) {
res := make(map[protocol.DeviceID]ObservedDevice)
for iter.Next() {
keyDev := db.keyer.DeviceFromPendingDeviceKey(iter.Key())
deviceID, err := protocol.DeviceIDFromBytes(keyDev)
it, errFn := db.kv.PrefixKV("device/")
for kv := range it {
_, keyDev, ok := strings.Cut(kv.Key, "/")
if !ok {
if err := db.kv.DeleteKV(kv.Key); err != nil {
return nil, fmt.Errorf("delete invalid pending device: %w", err)
}
continue
}
deviceID, err := protocol.DeviceIDFromString(keyDev)
var protoD dbproto.ObservedDevice
var od ObservedDevice
if err != nil {
goto deleteKey
}
if err = proto.Unmarshal(iter.Value(), &protoD); err != nil {
if err = proto.Unmarshal(kv.Value, &protoD); err != nil {
goto deleteKey
}
od.fromWire(&protoD)
@@ -94,52 +106,37 @@ func (db *Lowlevel) PendingDevices() (map[protocol.DeviceID]ObservedDevice, erro
// Deleting invalid entries is the only possible "repair" measure and
// appropriate for the importance of pending entries. They will come back
// soon if still relevant.
l.Infof("Invalid pending device entry, deleting from database: %x", iter.Key())
if err := db.Delete(iter.Key()); err != nil {
return nil, err
if err := db.kv.DeleteKV(kv.Key); err != nil {
return nil, fmt.Errorf("delete invalid pending device: %w", err)
}
}
return res, nil
return res, errFn()
}
func (db *Lowlevel) AddOrUpdatePendingFolder(id string, of ObservedFolder, device protocol.DeviceID) error {
key, err := db.keyer.GeneratePendingFolderKey(nil, device[:], []byte(id))
if err != nil {
return err
}
return db.Put(key, mustMarshal(of.toWire()))
func (db *ObservedDB) AddOrUpdatePendingFolder(id string, of ObservedFolder, device protocol.DeviceID) error {
key := "folder/" + device.String() + "/" + id
return db.kv.PutKV(key, mustMarshal(of.toWire()))
}
// RemovePendingFolderForDevice removes entries for specific folder / device combinations.
func (db *Lowlevel) RemovePendingFolderForDevice(id string, device protocol.DeviceID) error {
key, err := db.keyer.GeneratePendingFolderKey(nil, device[:], []byte(id))
if err != nil {
return err
}
return db.Delete(key)
func (db *ObservedDB) RemovePendingFolderForDevice(id string, device protocol.DeviceID) error {
key := "folder/" + device.String() + "/" + id
return db.kv.DeleteKV(key)
}
// RemovePendingFolder removes all entries matching a specific folder ID.
func (db *Lowlevel) RemovePendingFolder(id string) error {
iter, err := db.NewPrefixIterator([]byte{KeyTypePendingFolder})
if err != nil {
return fmt.Errorf("creating iterator: %w", err)
}
defer iter.Release()
var iterErr error
for iter.Next() {
if id != string(db.keyer.FolderFromPendingFolderKey(iter.Key())) {
func (db *ObservedDB) RemovePendingFolder(id string) error {
it, errFn := db.kv.PrefixKV("folder/")
for kv := range it {
parts := strings.Split(kv.Key, "/")
if len(parts) != 3 || parts[2] != id {
continue
}
if err = db.Delete(iter.Key()); err != nil {
if iterErr != nil {
l.Debugf("Repeat error removing pending folder: %v", err)
} else {
iterErr = err
}
if err := db.kv.DeleteKV(kv.Key); err != nil {
return fmt.Errorf("delete pending folder: %w", err)
}
}
return iterErr
return errFn()
}
// Consolidated information about a pending folder
@@ -147,41 +144,37 @@ type PendingFolder struct {
OfferedBy map[protocol.DeviceID]ObservedFolder `json:"offeredBy"`
}
func (db *Lowlevel) PendingFolders() (map[string]PendingFolder, error) {
func (db *ObservedDB) PendingFolders() (map[string]PendingFolder, error) {
return db.PendingFoldersForDevice(protocol.EmptyDeviceID)
}
// PendingFoldersForDevice enumerates only entries matching the given device ID, unless it
// is EmptyDeviceID. Invalid ones are dropped from the database after a info log
// message, as a side-effect.
func (db *Lowlevel) PendingFoldersForDevice(device protocol.DeviceID) (map[string]PendingFolder, error) {
var err error
prefixKey := []byte{KeyTypePendingFolder}
func (db *ObservedDB) PendingFoldersForDevice(device protocol.DeviceID) (map[string]PendingFolder, error) {
prefix := "folder/"
if device != protocol.EmptyDeviceID {
prefixKey, err = db.keyer.GeneratePendingFolderKey(nil, device[:], nil)
if err != nil {
return nil, err
}
prefix += device.String() + "/"
}
iter, err := db.NewPrefixIterator(prefixKey)
if err != nil {
return nil, err
}
defer iter.Release()
res := make(map[string]PendingFolder)
for iter.Next() {
keyDev, ok := db.keyer.DeviceFromPendingFolderKey(iter.Key())
deviceID, err := protocol.DeviceIDFromBytes(keyDev)
it, errFn := db.kv.PrefixKV(prefix)
for kv := range it {
parts := strings.Split(kv.Key, "/")
if len(parts) != 3 {
continue
}
keyDev := parts[1]
deviceID, err := protocol.DeviceIDFromString(keyDev)
var protoF dbproto.ObservedFolder
var of ObservedFolder
var folderID string
if !ok || err != nil {
if err != nil {
goto deleteKey
}
if folderID = string(db.keyer.FolderFromPendingFolderKey(iter.Key())); len(folderID) < 1 {
if folderID = parts[2]; len(folderID) < 1 {
goto deleteKey
}
if err = proto.Unmarshal(iter.Value(), &protoF); err != nil {
if err = proto.Unmarshal(kv.Value, &protoF); err != nil {
goto deleteKey
}
if _, ok := res[folderID]; !ok {
@@ -196,10 +189,17 @@ func (db *Lowlevel) PendingFoldersForDevice(device protocol.DeviceID) (map[strin
// Deleting invalid entries is the only possible "repair" measure and
// appropriate for the importance of pending entries. They will come back
// soon if still relevant.
l.Infof("Invalid pending folder entry, deleting from database: %x", iter.Key())
if err := db.Delete(iter.Key()); err != nil {
return nil, err
if err := db.kv.DeleteKV(kv.Key); err != nil {
return nil, fmt.Errorf("delete invalid pending folder: %w", err)
}
}
return res, nil
return res, errFn()
}
func mustMarshal(m proto.Message) []byte {
bs, err := proto.Marshal(m)
if err != nil {
panic(err)
}
return bs
}

View File

@@ -108,29 +108,8 @@ type Iterator interface {
// is empty for a db in memory.
type Backend interface {
Reader
Writer
NewReadTransaction() (ReadTransaction, error)
NewWriteTransaction(hooks ...CommitHook) (WriteTransaction, error)
Close() error
Compact() error
Location() string
}
type Tuning int
const (
// N.b. these constants must match those in lib/config.Tuning!
TuningAuto Tuning = iota
TuningSmall
TuningLarge
)
func Open(path string, tuning Tuning) (Backend, error) {
return OpenLevelDB(path, tuning)
}
func OpenMemory() Backend {
return OpenLevelDBMemory()
}
var (

View File

@@ -0,0 +1,113 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
// leveldbBackend implements Backend on top of a leveldb
type leveldbBackend struct {
ldb *leveldb.DB
closeWG *closeWaitGroup
location string
}
func newLeveldbBackend(ldb *leveldb.DB, location string) *leveldbBackend {
return &leveldbBackend{
ldb: ldb,
closeWG: &closeWaitGroup{},
location: location,
}
}
func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) {
return b.newSnapshot()
}
func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
rel, err := newReleaser(b.closeWG)
if err != nil {
return leveldbSnapshot{}, err
}
snap, err := b.ldb.GetSnapshot()
if err != nil {
rel.Release()
return leveldbSnapshot{}, wrapLeveldbErr(err)
}
return leveldbSnapshot{
snap: snap,
rel: rel,
}, nil
}
func (b *leveldbBackend) Close() error {
b.closeWG.CloseWait()
return wrapLeveldbErr(b.ldb.Close())
}
func (b *leveldbBackend) Get(key []byte) ([]byte, error) {
val, err := b.ldb.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (b *leveldbBackend) NewPrefixIterator(prefix []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(util.BytesPrefix(prefix), nil)}, nil
}
func (b *leveldbBackend) NewRangeIterator(first, last []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(&util.Range{Start: first, Limit: last}, nil)}, nil
}
func (b *leveldbBackend) Location() string {
return b.location
}
// leveldbSnapshot implements backend.ReadTransaction
type leveldbSnapshot struct {
snap *leveldb.Snapshot
rel *releaser
}
func (l leveldbSnapshot) Get(key []byte) ([]byte, error) {
val, err := l.snap.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (l leveldbSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) {
return l.snap.NewIterator(util.BytesPrefix(prefix), nil), nil
}
func (l leveldbSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) {
return l.snap.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil
}
func (l leveldbSnapshot) Release() {
l.snap.Release()
l.rel.Release()
}
type leveldbIterator struct {
iterator.Iterator
}
func (it *leveldbIterator) Error() error {
return wrapLeveldbErr(it.Iterator.Error())
}
// wrapLeveldbErr wraps errors so that the backend package can recognize them
func wrapLeveldbErr(err error) error {
switch err {
case leveldb.ErrClosed:
return errClosed
case leveldb.ErrNotFound:
return errNotFound
}
return err
}

View File

@@ -0,0 +1,32 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
const dbMaxOpenFiles = 100
// OpenLevelDBRO attempts to open the database at the given location, read
// only.
func OpenLevelDBRO(location string) (Backend, error) {
opts := &opt.Options{
OpenFilesCacheCapacity: dbMaxOpenFiles,
ReadOnly: true,
}
ldb, err := open(location, opts)
if err != nil {
return nil, err
}
return newLeveldbBackend(ldb, location), nil
}
func open(location string, opts *opt.Options) (*leveldb.DB, error) {
return leveldb.OpenFile(location, opts)
}

View File

@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
package olddb
import (
"encoding/binary"

View File

@@ -0,0 +1,70 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package olddb
import (
"encoding/binary"
"time"
"github.com/syncthing/syncthing/internal/db/olddb/backend"
)
// deprecatedLowlevel is the lowest level database interface. It has a very simple
// purpose: hold the actual backend database, and the in-memory state
// that belong to that database. In the same way that a single on disk
// database can only be opened once, there should be only one deprecatedLowlevel for
// any given backend.
type deprecatedLowlevel struct {
backend.Backend
folderIdx *smallIndex
deviceIdx *smallIndex
keyer keyer
}
func NewLowlevel(backend backend.Backend) (*deprecatedLowlevel, error) {
// Only log restarts in debug mode.
db := &deprecatedLowlevel{
Backend: backend,
folderIdx: newSmallIndex(backend, []byte{KeyTypeFolderIdx}),
deviceIdx: newSmallIndex(backend, []byte{KeyTypeDeviceIdx}),
}
db.keyer = newDefaultKeyer(db.folderIdx, db.deviceIdx)
return db, nil
}
// ListFolders returns the list of folders currently in the database
func (db *deprecatedLowlevel) ListFolders() []string {
return db.folderIdx.Values()
}
func (db *deprecatedLowlevel) IterateMtimes(fn func(folder, name string, ondisk, virtual time.Time) error) error {
it, err := db.NewPrefixIterator([]byte{KeyTypeVirtualMtime})
if err != nil {
return err
}
defer it.Release()
for it.Next() {
key := it.Key()[1:]
folderID, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key))
if !ok {
continue
}
name := key[4:]
val := it.Value()
var ondisk, virtual time.Time
if err := ondisk.UnmarshalBinary(val[:len(val)/2]); err != nil {
continue
}
if err := virtual.UnmarshalBinary(val[len(val)/2:]); err != nil {
continue
}
if err := fn(string(folderID), string(name), ondisk, virtual); err != nil {
return err
}
}
return it.Error()
}

67
internal/db/olddb/set.go Normal file
View File

@@ -0,0 +1,67 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// Package db provides a set type to track local/remote files with newness
// checks. We must do a certain amount of normalization in here. We will get
// fed paths with either native or wire-format separators and encodings
// depending on who calls us. We transform paths to wire-format (NFC and
// slashes) on the way to the database, and transform to native format
// (varying separator and encoding) on the way back out.
package olddb
import (
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
)
type deprecatedFileSet struct {
folder string
db *deprecatedLowlevel
}
// The Iterator is called with either a protocol.FileInfo or a
// FileInfoTruncated (depending on the method) and returns true to
// continue iteration, false to stop.
type Iterator func(f protocol.FileInfo) bool
func NewFileSet(folder string, db *deprecatedLowlevel) (*deprecatedFileSet, error) {
s := &deprecatedFileSet{
folder: folder,
db: db,
}
return s, nil
}
type Snapshot struct {
folder string
t readOnlyTransaction
}
func (s *deprecatedFileSet) Snapshot() (*Snapshot, error) {
t, err := s.db.newReadOnlyTransaction()
if err != nil {
return nil, err
}
return &Snapshot{
folder: s.folder,
t: t,
}, nil
}
func (s *Snapshot) Release() {
s.t.close()
}
func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) error {
return s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn))
}
func nativeFileIterator(fn Iterator) Iterator {
return func(fi protocol.FileInfo) bool {
fi.Name = osutil.NativeFilename(fi.Name)
return fn(fi)
}
}

View File

@@ -4,13 +4,13 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
package olddb
import (
"encoding/binary"
"sort"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/internal/db/olddb/backend"
"github.com/syncthing/syncthing/lib/sync"
)
@@ -74,23 +74,7 @@ func (i *smallIndex) ID(val []byte) (uint32, error) {
return id, nil
}
id := i.nextID
i.nextID++
valStr := string(val)
i.val2id[valStr] = id
i.id2val[id] = valStr
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
if err := i.db.Put(key, val); err != nil {
i.mut.Unlock()
return 0, err
}
i.mut.Unlock()
return id, nil
panic("missing ID")
}
// Val returns the value for the given index number, or (nil, false) if there
@@ -106,33 +90,6 @@ func (i *smallIndex) Val(id uint32) ([]byte, bool) {
return []byte(val), true
}
func (i *smallIndex) Delete(val []byte) error {
i.mut.Lock()
defer i.mut.Unlock()
// Check the reverse mapping to get the ID for the value.
if id, ok := i.val2id[string(val)]; ok {
// Generate the corresponding database key.
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
// Put an empty value into the database. This indicates that the
// entry does not exist any more and prevents the ID from being
// reused in the future.
if err := i.db.Put(key, []byte{}); err != nil {
return err
}
// Delete reverse mapping.
delete(i.id2val, id)
}
// Delete forward mapping.
delete(i.val2id, string(val))
return nil
}
// Values returns the set of values in the index
func (i *smallIndex) Values() []string {
// In principle this method should return [][]byte because all the other

View File

@@ -0,0 +1,193 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package olddb
import (
"fmt"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/db/olddb/backend"
"github.com/syncthing/syncthing/internal/gen/bep"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/protocol"
)
// A readOnlyTransaction represents a database snapshot.
type readOnlyTransaction struct {
backend.ReadTransaction
keyer keyer
}
func (db *deprecatedLowlevel) newReadOnlyTransaction() (readOnlyTransaction, error) {
tran, err := db.NewReadTransaction()
if err != nil {
return readOnlyTransaction{}, err
}
return db.readOnlyTransactionFromBackendTransaction(tran), nil
}
func (db *deprecatedLowlevel) readOnlyTransactionFromBackendTransaction(tran backend.ReadTransaction) readOnlyTransaction {
return readOnlyTransaction{
ReadTransaction: tran,
keyer: db.keyer,
}
}
func (t readOnlyTransaction) close() {
t.Release()
}
func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool, error) {
f, ok, err := t.getFileTrunc(key, false)
if err != nil || !ok {
return protocol.FileInfo{}, false, err
}
return f, true, nil
}
func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (protocol.FileInfo, bool, error) {
bs, err := t.Get(key)
if backend.IsNotFound(err) {
return protocol.FileInfo{}, false, nil
}
if err != nil {
return protocol.FileInfo{}, false, err
}
f, err := t.unmarshalTrunc(bs, trunc)
if backend.IsNotFound(err) {
return protocol.FileInfo{}, false, nil
}
if err != nil {
return protocol.FileInfo{}, false, err
}
return f, true, nil
}
func (t readOnlyTransaction) unmarshalTrunc(bs []byte, trunc bool) (protocol.FileInfo, error) {
if trunc {
var bfi dbproto.FileInfoTruncated
err := proto.Unmarshal(bs, &bfi)
if err != nil {
return protocol.FileInfo{}, err
}
if err := t.fillTruncated(&bfi); err != nil {
return protocol.FileInfo{}, err
}
return protocol.FileInfoFromDBTruncated(&bfi), nil
}
var bfi bep.FileInfo
err := proto.Unmarshal(bs, &bfi)
if err != nil {
return protocol.FileInfo{}, err
}
if err := t.fillFileInfo(&bfi); err != nil {
return protocol.FileInfo{}, err
}
return protocol.FileInfoFromDB(&bfi), nil
}
type blocksIndirectionError struct {
err error
}
func (e *blocksIndirectionError) Error() string {
return fmt.Sprintf("filling Blocks: %v", e.err)
}
func (e *blocksIndirectionError) Unwrap() error {
return e.err
}
// fillFileInfo follows the (possible) indirection of blocks and version
// vector and fills it out.
func (t readOnlyTransaction) fillFileInfo(fi *bep.FileInfo) error {
var key []byte
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
// The blocks list is indirected and we need to load it.
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
bs, err := t.Get(key)
if err != nil {
return &blocksIndirectionError{err}
}
var bl dbproto.BlockList
if err := proto.Unmarshal(bs, &bl); err != nil {
return err
}
fi.Blocks = bl.Blocks
}
if len(fi.VersionHash) != 0 {
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
bs, err := t.Get(key)
if err != nil {
return fmt.Errorf("filling Version: %w", err)
}
var v bep.Vector
if err := proto.Unmarshal(bs, &v); err != nil {
return err
}
fi.Version = &v
}
return nil
}
// fillTruncated follows the (possible) indirection of version vector and
// fills it.
func (t readOnlyTransaction) fillTruncated(fi *dbproto.FileInfoTruncated) error {
var key []byte
if len(fi.VersionHash) == 0 {
return nil
}
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
bs, err := t.Get(key)
if err != nil {
return err
}
var v bep.Vector
if err := proto.Unmarshal(bs, &v); err != nil {
return err
}
fi.Version = &v
return nil
}
func (t *readOnlyTransaction) withHaveSequence(folder []byte, startSeq int64, fn Iterator) error {
first, err := t.keyer.GenerateSequenceKey(nil, folder, startSeq)
if err != nil {
return err
}
last, err := t.keyer.GenerateSequenceKey(nil, folder, maxInt64)
if err != nil {
return err
}
dbi, err := t.NewRangeIterator(first, last)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
f, ok, err := t.getFileByKey(dbi.Value())
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}

77
internal/db/sqlite/db.go Normal file
View File

@@ -0,0 +1,77 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"sync"
"time"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/thejerf/suture/v4"
)
type DB struct {
sql *sqlx.DB
localDeviceIdx int64
updateLock sync.Mutex
statementsMut sync.RWMutex
statements map[string]*sqlx.Stmt
tplInput map[string]any
}
var _ db.DB = (*DB)(nil)
func (s *DB) Close() error {
s.updateLock.Lock()
s.statementsMut.Lock()
defer s.updateLock.Unlock()
defer s.statementsMut.Unlock()
for _, stmt := range s.statements {
stmt.Close()
}
return wrap(s.sql.Close())
}
func (s *DB) Service(maintenanceInterval time.Duration) suture.Service {
return newService(s, maintenanceInterval)
}
func (s *DB) ListFolders() ([]string, error) {
var res []string
err := s.stmt(`
SELECT folder_id FROM folders
ORDER BY folder_id
`).Select(&res)
return res, wrap(err)
}
func (s *DB) ListDevicesForFolder(folder string) ([]protocol.DeviceID, error) {
var res []string
err := s.stmt(`
SELECT d.device_id FROM counts s
INNER JOIN folders o ON o.idx = s.folder_idx
INNER JOIN devices d ON d.idx = s.device_idx
WHERE o.folder_id = ? AND s.count > 0 AND s.device_idx != {{.LocalDeviceIdx}}
GROUP BY d.device_id
ORDER BY d.device_id
`).Select(&res, folder)
if err != nil {
return nil, wrap(err)
}
devs := make([]protocol.DeviceID, len(res))
for i, s := range res {
devs[i], err = protocol.DeviceIDFromString(s)
if err != nil {
return nil, wrap(err)
}
}
return devs, nil
}

View File

@@ -0,0 +1,243 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"context"
"fmt"
"testing"
"time"
"github.com/syncthing/syncthing/internal/timeutil"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
)
var globalFi protocol.FileInfo
func BenchmarkUpdate(b *testing.B) {
db, err := OpenTemp()
if err != nil {
b.Fatal(err)
}
b.Cleanup(func() {
if err := db.Close(); err != nil {
b.Fatal(err)
}
})
svc := db.Service(time.Hour).(*Service)
fs := make([]protocol.FileInfo, 100)
seed := 0
size := 10000
for size < 200_000 {
t0 := time.Now()
if err := svc.periodic(context.Background()); err != nil {
b.Fatal(err)
}
b.Log("garbage collect in", time.Since(t0))
for {
local, err := db.CountLocal(folderID, protocol.LocalDeviceID)
if err != nil {
b.Fatal(err)
}
if local.Files >= size {
break
}
fs := make([]protocol.FileInfo, 1000)
for i := range fs {
fs[i] = genFile(rand.String(24), 64, 0)
}
if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
b.Fatal(err)
}
}
b.Run(fmt.Sprintf("Insert100Loc@%d", size), func(b *testing.B) {
for range b.N {
for i := range fs {
fs[i] = genFile(rand.String(24), 64, 0)
}
if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("RepBlocks100@%d", size), func(b *testing.B) {
for range b.N {
for i := range fs {
fs[i].Blocks = genBlocks(fs[i].Name, seed, 64)
fs[i].Version = fs[i].Version.Update(42)
}
seed++
if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("RepSame100@%d", size), func(b *testing.B) {
for range b.N {
for i := range fs {
fs[i].Version = fs[i].Version.Update(42)
}
if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("Insert100Rem@%d", size), func(b *testing.B) {
for range b.N {
for i := range fs {
fs[i].Blocks = genBlocks(fs[i].Name, seed, 64)
fs[i].Version = fs[i].Version.Update(42)
fs[i].Sequence = timeutil.StrictlyMonotonicNanos()
}
if err := db.Update(folderID, protocol.DeviceID{42}, fs); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("GetGlobal100@%d", size), func(b *testing.B) {
for range b.N {
for i := range fs {
_, ok, err := db.GetGlobalFile(folderID, fs[i].Name)
if err != nil {
b.Fatal(err)
}
if !ok {
b.Fatal("should exist")
}
}
}
b.ReportMetric(float64(b.N)*100.0/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("LocalSequenced@%d", size), func(b *testing.B) {
count := 0
for range b.N {
cur, err := db.GetDeviceSequence(folderID, protocol.LocalDeviceID)
if err != nil {
b.Fatal(err)
}
it, errFn := db.AllLocalFilesBySequence(folderID, protocol.LocalDeviceID, cur-100, 0)
for f := range it {
count++
globalFi = f
}
if err := errFn(); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("GetDeviceSequenceLoc@%d", size), func(b *testing.B) {
for range b.N {
_, err := db.GetDeviceSequence(folderID, protocol.LocalDeviceID)
if err != nil {
b.Fatal(err)
}
}
})
b.Run(fmt.Sprintf("GetDeviceSequenceRem@%d", size), func(b *testing.B) {
for range b.N {
_, err := db.GetDeviceSequence(folderID, protocol.DeviceID{42})
if err != nil {
b.Fatal(err)
}
}
})
b.Run(fmt.Sprintf("RemoteNeed@%d", size), func(b *testing.B) {
count := 0
for range b.N {
it, errFn := db.AllNeededGlobalFiles(folderID, protocol.DeviceID{42}, config.PullOrderAlphabetic, 0, 0)
for f := range it {
count++
globalFi = f
}
if err := errFn(); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
})
b.Run(fmt.Sprintf("LocalNeed100Largest@%d", size), func(b *testing.B) {
count := 0
for range b.N {
it, errFn := db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderLargestFirst, 100, 0)
for f := range it {
globalFi = f
count++
}
if err := errFn(); err != nil {
b.Fatal(err)
}
}
b.ReportMetric(float64(count)/b.Elapsed().Seconds(), "files/s")
})
size <<= 1
}
}
func TestBenchmarkDropAllRemote(t *testing.T) {
if testing.Short() {
t.Skip("slow test")
}
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
fs := make([]protocol.FileInfo, 1000)
seq := 0
for {
local, err := db.CountLocal(folderID, protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if local.Files >= 15_000 {
break
}
for i := range fs {
seq++
fs[i] = genFile(rand.String(24), 64, seq)
}
if err := db.Update(folderID, protocol.DeviceID{42}, fs); err != nil {
t.Fatal(err)
}
if err := db.Update(folderID, protocol.LocalDeviceID, fs); err != nil {
t.Fatal(err)
}
}
t0 := time.Now()
if err := db.DropAllFiles(folderID, protocol.DeviceID{42}); err != nil {
t.Fatal(err)
}
d := time.Since(t0)
t.Log("drop all took", d)
}

View File

@@ -0,0 +1,137 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/lib/protocol"
)
type countsRow struct {
Type protocol.FileInfoType
Count int
Size int64
Deleted bool
LocalFlags int64 `db:"local_flags"`
}
func (s *DB) CountLocal(folder string, device protocol.DeviceID) (db.Counts, error) {
var res []countsRow
if err := s.stmt(`
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
INNER JOIN folders o ON o.idx = s.folder_idx
INNER JOIN devices d ON d.idx = s.device_idx
WHERE o.folder_id = ? AND d.device_id = ? AND s.local_flags & {{.FlagLocalIgnored}} = 0
`).Select(&res, folder, device.String()); err != nil {
return db.Counts{}, wrap(err)
}
return summarizeCounts(res), nil
}
func (s *DB) CountNeed(folder string, device protocol.DeviceID) (db.Counts, error) {
if device == protocol.LocalDeviceID {
return s.needSizeLocal(folder)
}
return s.needSizeRemote(folder, device)
}
func (s *DB) CountGlobal(folder string) (db.Counts, error) {
// Exclude ignored and receive-only changed files from the global count
// (legacy expectation? it's a bit weird since those files can in fact
// be global and you can get them with GetGlobal etc.)
var res []countsRow
err := s.stmt(`
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
INNER JOIN folders o ON o.idx = s.folder_idx
WHERE o.folder_id = ? AND s.local_flags & {{.FlagLocalGlobal}} != 0 AND s.local_flags & {{or .FlagLocalReceiveOnly .FlagLocalIgnored}} = 0
`).Select(&res, folder)
if err != nil {
return db.Counts{}, wrap(err)
}
return summarizeCounts(res), nil
}
func (s *DB) CountReceiveOnlyChanged(folder string) (db.Counts, error) {
var res []countsRow
err := s.stmt(`
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
INNER JOIN folders o ON o.idx = s.folder_idx
WHERE o.folder_id = ? AND local_flags & {{.FlagLocalReceiveOnly}} != 0
`).Select(&res, folder)
if err != nil {
return db.Counts{}, wrap(err)
}
return summarizeCounts(res), nil
}
func (s *DB) needSizeLocal(folder string) (db.Counts, error) {
// The need size for the local device is the sum of entries with the
// need bit set.
var res []countsRow
err := s.stmt(`
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
INNER JOIN folders o ON o.idx = s.folder_idx
WHERE o.folder_id = ? AND s.local_flags & {{.FlagLocalNeeded}} != 0
`).Select(&res, folder)
if err != nil {
return db.Counts{}, wrap(err)
}
return summarizeCounts(res), nil
}
func (s *DB) needSizeRemote(folder string, device protocol.DeviceID) (db.Counts, error) {
var res []countsRow
// See neededGlobalFilesRemote for commentary as that is the same query without summing
if err := s.stmt(`
SELECT g.type, count(*) as count, sum(g.size) as size, g.local_flags, g.deleted FROM files g
INNER JOIN folders o ON o.idx = g.folder_idx
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
SELECT 1 FROM FILES f
INNER JOIN devices d ON d.idx = f.device_idx
WHERE f.name = g.name AND f.version = g.version AND f.folder_idx = g.folder_idx AND d.device_id = ?
)
GROUP BY g.type, g.local_flags, g.deleted
UNION ALL
SELECT g.type, count(*) as count, sum(g.size) as size, g.local_flags, g.deleted FROM files g
INNER JOIN folders o ON o.idx = g.folder_idx
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
SELECT 1 FROM FILES f
INNER JOIN devices d ON d.idx = f.device_idx
WHERE f.name = g.name AND f.folder_idx = g.folder_idx AND d.device_id = ? AND NOT f.deleted
)
GROUP BY g.type, g.local_flags, g.deleted
`).Select(&res, folder, device.String(),
folder, device.String()); err != nil {
return db.Counts{}, wrap(err)
}
return summarizeCounts(res), nil
}
func summarizeCounts(res []countsRow) db.Counts {
c := db.Counts{
DeviceID: protocol.LocalDeviceID,
}
for _, r := range res {
switch {
case r.Deleted:
c.Deleted += r.Count
case r.Type == protocol.FileInfoTypeFile:
c.Files += r.Count
c.Bytes += r.Size
case r.Type == protocol.FileInfoTypeDirectory:
c.Directories += r.Count
c.Bytes += r.Size
case r.Type == protocol.FileInfoTypeSymlink:
c.Symlinks += r.Count
c.Bytes += r.Size
}
}
return c
}

View File

@@ -0,0 +1,189 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"database/sql"
"errors"
"fmt"
"iter"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
)
func (s *DB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
file = osutil.NormalizedFilename(file)
var ind indirectFI
err := s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
INNER JOIN files f on fi.sequence = f.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
INNER JOIN folders o ON o.idx = f.folder_idx
WHERE o.folder_id = ? AND f.name = ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
`).Get(&ind, folder, file)
if errors.Is(err, sql.ErrNoRows) {
return protocol.FileInfo{}, false, nil
}
if err != nil {
return protocol.FileInfo{}, false, wrap(err)
}
fi, err := ind.FileInfo()
if err != nil {
return protocol.FileInfo{}, false, wrap(err)
}
return fi, true, nil
}
func (s *DB) GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, error) {
file = osutil.NormalizedFilename(file)
var devStrs []string
err := s.stmt(`
SELECT d.device_id FROM files f
INNER JOIN devices d ON d.idx = f.device_idx
INNER JOIN folders o ON o.idx = f.folder_idx
INNER JOIN files g ON f.folder_idx = g.folder_idx AND g.version = f.version AND g.name = f.name
WHERE o.folder_id = ? AND g.name = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND f.device_idx != {{.LocalDeviceIdx}}
ORDER BY d.device_id
`).Select(&devStrs, folder, file)
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
if err != nil {
return nil, wrap(err)
}
devs := make([]protocol.DeviceID, 0, len(devStrs))
for _, s := range devStrs {
d, err := protocol.DeviceIDFromString(s)
if err != nil {
return nil, wrap(err)
}
devs = append(devs, d)
}
return devs, nil
}
func (s *DB) AllGlobalFiles(folder string) (iter.Seq[db.FileMetadata], func() error) {
it, errFn := iterStructs[db.FileMetadata](s.stmt(`
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
INNER JOIN folders o ON o.idx = f.folder_idx
WHERE o.folder_id = ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
ORDER BY f.name
`).Queryx(folder))
return itererr.Map(it, errFn, func(m db.FileMetadata) (db.FileMetadata, error) {
m.Name = osutil.NativeFilename(m.Name)
return m, nil
})
}
func (s *DB) AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[db.FileMetadata], func() error) {
if prefix == "" {
return s.AllGlobalFiles(folder)
}
prefix = osutil.NormalizedFilename(prefix)
end := prefixEnd(prefix)
it, errFn := iterStructs[db.FileMetadata](s.stmt(`
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
INNER JOIN folders o ON o.idx = f.folder_idx
WHERE o.folder_id = ? AND f.name >= ? AND f.name < ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
ORDER BY f.name
`).Queryx(folder, prefix, end))
return itererr.Map(it, errFn, func(m db.FileMetadata) (db.FileMetadata, error) {
m.Name = osutil.NativeFilename(m.Name)
return m, nil
})
}
func (s *DB) AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error) {
var selectOpts string
switch order {
case config.PullOrderRandom:
selectOpts = "ORDER BY RANDOM()"
case config.PullOrderAlphabetic:
selectOpts = "ORDER BY g.name ASC"
case config.PullOrderSmallestFirst:
selectOpts = "ORDER BY g.size ASC"
case config.PullOrderLargestFirst:
selectOpts = "ORDER BY g.size DESC"
case config.PullOrderOldestFirst:
selectOpts = "ORDER BY g.modified ASC"
case config.PullOrderNewestFirst:
selectOpts = "ORDER BY g.modified DESC"
}
if limit > 0 {
selectOpts += fmt.Sprintf(" LIMIT %d", limit)
}
if offset > 0 {
selectOpts += fmt.Sprintf(" OFFSET %d", offset)
}
if device == protocol.LocalDeviceID {
return s.neededGlobalFilesLocal(folder, selectOpts)
}
return s.neededGlobalFilesRemote(folder, device, selectOpts)
}
func (s *DB) neededGlobalFilesLocal(folder, selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
// Select all the non-ignored files with the need bit set.
it, errFn := iterStructs[indirectFI](s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
INNER JOIN files g on fi.sequence = g.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
INNER JOIN folders o ON o.idx = g.folder_idx
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalIgnored}} = 0 AND g.local_flags & {{.FlagLocalNeeded}} != 0
` + selectOpts).Queryx(folder))
return itererr.Map(it, errFn, indirectFI.FileInfo)
}
func (s *DB) neededGlobalFilesRemote(folder string, device protocol.DeviceID, selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
// Select:
//
// - all the valid, non-deleted global files that don't have a corresponding
// remote file with the same version.
//
// - all the valid, deleted global files that have a corresponding non-deleted
// remote file (of any version)
it, errFn := iterStructs[indirectFI](s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
INNER JOIN files g on fi.sequence = g.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
INNER JOIN folders o ON o.idx = g.folder_idx
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
SELECT 1 FROM FILES f
INNER JOIN devices d ON d.idx = f.device_idx
WHERE f.name = g.name AND f.version = g.version AND f.folder_idx = g.folder_idx AND d.device_id = ?
)
UNION ALL
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
INNER JOIN files g on fi.sequence = g.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
INNER JOIN folders o ON o.idx = g.folder_idx
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
SELECT 1 FROM FILES f
INNER JOIN devices d ON d.idx = f.device_idx
WHERE f.name = g.name AND f.folder_idx = g.folder_idx AND d.device_id = ? AND NOT f.deleted
)
`+selectOpts).Queryx(
folder, device.String(),
folder, device.String(),
))
return itererr.Map(it, errFn, indirectFI.FileInfo)
}

View File

@@ -0,0 +1,493 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"slices"
"testing"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
)
func TestNeed(t *testing.T) {
t.Helper()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// Some local files
var v protocol.Vector
baseV := v.Update(1)
newerV := baseV.Update(42)
files := []protocol.FileInfo{
genFile("test1", 1, 0), // remote need
genFile("test2", 2, 0), // local need
genFile("test3", 3, 0), // global
}
files[0].Version = baseV
files[1].Version = baseV
files[2].Version = newerV
err = db.Update(folderID, protocol.LocalDeviceID, files)
if err != nil {
t.Fatal(err)
}
// Some remote files
remote := []protocol.FileInfo{
genFile("test2", 2, 100), // global
genFile("test3", 3, 101), // remote need
genFile("test4", 4, 102), // local need
}
remote[0].Version = newerV
remote[1].Version = baseV
remote[2].Version = newerV
err = db.Update(folderID, protocol.DeviceID{42}, remote)
if err != nil {
t.Fatal(err)
}
// A couple are needed locally
localNeed := fiNames(mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 0, 0)))
if !slices.Equal(localNeed, []string{"test2", "test4"}) {
t.Log(localNeed)
t.Fatal("bad local need")
}
// Another couple are needed remotely
remoteNeed := fiNames(mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.DeviceID{42}, config.PullOrderAlphabetic, 0, 0)))
if !slices.Equal(remoteNeed, []string{"test1", "test3"}) {
t.Log(remoteNeed)
t.Fatal("bad remote need")
}
}
func TestDropRecalcsGlobal(t *testing.T) {
// When we drop a device we may get a new global
t.Parallel()
t.Run("DropAllFiles", func(t *testing.T) {
t.Parallel()
testDropWithDropper(t, func(t *testing.T, db *DB) {
t.Helper()
if err := db.DropAllFiles(folderID, protocol.DeviceID{42}); err != nil {
t.Fatal(err)
}
})
})
t.Run("DropDevice", func(t *testing.T) {
t.Parallel()
testDropWithDropper(t, func(t *testing.T, db *DB) {
t.Helper()
if err := db.DropDevice(protocol.DeviceID{42}); err != nil {
t.Fatal(err)
}
})
})
t.Run("DropFilesNamed", func(t *testing.T) {
t.Parallel()
testDropWithDropper(t, func(t *testing.T, db *DB) {
t.Helper()
if err := db.DropFilesNamed(folderID, protocol.DeviceID{42}, []string{"test1", "test42"}); err != nil {
t.Fatal(err)
}
})
})
}
func testDropWithDropper(t *testing.T, dropper func(t *testing.T, db *DB)) {
t.Helper()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// Some local files
err = db.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{
genFile("test1", 1, 0),
genFile("test2", 2, 0),
})
if err != nil {
t.Fatal(err)
}
// Some remote files
remote := []protocol.FileInfo{
genFile("test1", 3, 0),
}
remote[0].Version = remote[0].Version.Update(42)
err = db.Update(folderID, protocol.DeviceID{42}, remote)
if err != nil {
t.Fatal(err)
}
// Remote test1 wins as the global, verify.
count, err := db.CountGlobal(folderID)
if err != nil {
t.Fatal(err)
}
if count.Bytes != (2+3)*128<<10 {
t.Log(count)
t.Fatal("bad global size to begin with")
}
if g, ok, err := db.GetGlobalFile(folderID, "test1"); err != nil || !ok {
t.Fatal("missing global to begin with")
} else if g.Size != 3*128<<10 {
t.Fatal("remote test1 should be the global")
}
// Now remove that remote device
dropper(t, db)
// Our test1 should now be the global
count, err = db.CountGlobal(folderID)
if err != nil {
t.Fatal(err)
}
if count.Bytes != (1+2)*128<<10 {
t.Log(count)
t.Fatal("bad global size after drop")
}
if g, ok, err := db.GetGlobalFile(folderID, "test1"); err != nil || !ok {
t.Fatal("missing global after drop")
} else if g.Size != 1*128<<10 {
t.Fatal("local test1 should be the global")
}
}
func TestNeedDeleted(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// Some local files
err = db.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{
genFile("test1", 1, 0),
genFile("test2", 2, 0),
})
if err != nil {
t.Fatal(err)
}
// A remote deleted file
remote := []protocol.FileInfo{
genFile("test1", 1, 101),
}
remote[0].SetDeleted(42)
err = db.Update(folderID, protocol.DeviceID{42}, remote)
if err != nil {
t.Fatal(err)
}
// We need the one deleted file
s, err := db.CountNeed(folderID, protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if s.Bytes != 0 || s.Deleted != 1 {
t.Log(s)
t.Error("bad need")
}
}
func TestDontNeedIgnored(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// A remote file
files := []protocol.FileInfo{
genFile("test1", 1, 103),
}
err = db.Update(folderID, protocol.DeviceID{42}, files)
if err != nil {
t.Fatal(err)
}
// Which we've ignored locally
files[0].SetIgnored()
err = db.Update(folderID, protocol.LocalDeviceID, files)
if err != nil {
t.Fatal(err)
}
// We don't need it
s, err := db.CountNeed(folderID, protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if s.Bytes != 0 || s.Files != 0 {
t.Log(s)
t.Error("bad need")
}
// It shouldn't show up in the need list
names := mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 0, 0))
if len(names) != 0 {
t.Log(names)
t.Error("need no files")
}
}
func TestRemoveDontNeedLocalIgnored(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// A local ignored file
file := genFile("test1", 1, 103)
file.SetIgnored()
files := []protocol.FileInfo{file}
err = db.Update(folderID, protocol.LocalDeviceID, files)
if err != nil {
t.Fatal(err)
}
// Which the remote doesn't have (no update)
// They don't need it
s, err := db.CountNeed(folderID, protocol.DeviceID{42})
if err != nil {
t.Fatal(err)
}
if s.Bytes != 0 || s.Files != 0 {
t.Log(s)
t.Error("bad need")
}
// It shouldn't show up in their need list
names := mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.DeviceID{42}, config.PullOrderAlphabetic, 0, 0))
if len(names) != 0 {
t.Log(names)
t.Error("need no files")
}
}
func TestLocalDontNeedDeletedMissing(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// A remote deleted file
file := genFile("test1", 1, 103)
file.SetDeleted(42)
files := []protocol.FileInfo{file}
err = db.Update(folderID, protocol.DeviceID{42}, files)
if err != nil {
t.Fatal(err)
}
// Which we don't have (no local update)
// We don't need it
s, err := db.CountNeed(folderID, protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if s.Bytes != 0 || s.Files != 0 || s.Deleted != 0 {
t.Log(s)
t.Error("bad need")
}
// It shouldn't show up in the need list
names := mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 0, 0))
if len(names) != 0 {
t.Log(names)
t.Error("need no files")
}
}
func TestRemoteDontNeedDeletedMissing(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// A local deleted file
file := genFile("test1", 1, 103)
file.SetDeleted(42)
files := []protocol.FileInfo{file}
err = db.Update(folderID, protocol.LocalDeviceID, files)
if err != nil {
t.Fatal(err)
}
// Which the remote doesn't have (no local update)
// They don't need it
s, err := db.CountNeed(folderID, protocol.DeviceID{42})
if err != nil {
t.Fatal(err)
}
if s.Bytes != 0 || s.Files != 0 || s.Deleted != 0 {
t.Log(s)
t.Error("bad need")
}
// It shouldn't show up in their need list
names := mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.DeviceID{42}, config.PullOrderAlphabetic, 0, 0))
if len(names) != 0 {
t.Log(names)
t.Error("need no files")
}
}
func TestNeedRemoteSymlinkAndDir(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// Two remote "specials", a symlink and a directory
var v protocol.Vector
v.Update(1)
files := []protocol.FileInfo{
{Name: "sym", Type: protocol.FileInfoTypeSymlink, Sequence: 100, Version: v, Blocks: genBlocks("symlink", 0, 1)},
{Name: "dir", Type: protocol.FileInfoTypeDirectory, Sequence: 101, Version: v},
}
err = db.Update(folderID, protocol.DeviceID{42}, files)
if err != nil {
t.Fatal(err)
}
// We need them
s, err := db.CountNeed(folderID, protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if s.Directories != 1 || s.Symlinks != 1 {
t.Log(s)
t.Error("bad need")
}
// They should be in the need list
names := mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 0, 0))
if len(names) != 2 {
t.Log(names)
t.Error("bad need")
}
}
func TestNeedPagination(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
// Several remote files
var v protocol.Vector
v.Update(1)
files := []protocol.FileInfo{
genFile("test0", 1, 100),
genFile("test1", 1, 101),
genFile("test2", 1, 102),
genFile("test3", 1, 103),
genFile("test4", 1, 104),
genFile("test5", 1, 105),
genFile("test6", 1, 106),
genFile("test7", 1, 107),
genFile("test8", 1, 108),
genFile("test9", 1, 109),
}
err = db.Update(folderID, protocol.DeviceID{42}, files)
if err != nil {
t.Fatal(err)
}
// We should get the first two
names := fiNames(mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 2, 0)))
if !slices.Equal(names, []string{"test0", "test1"}) {
t.Log(names)
t.Error("bad need")
}
// We should get the next three
names = fiNames(mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 3, 2)))
if !slices.Equal(names, []string{"test2", "test3", "test4"}) {
t.Log(names)
t.Error("bad need")
}
// We should get the last five
names = fiNames(mustCollect[protocol.FileInfo](t)(db.AllNeededGlobalFiles(folderID, protocol.LocalDeviceID, config.PullOrderAlphabetic, 5, 5)))
if !slices.Equal(names, []string{"test5", "test6", "test7", "test8", "test9"}) {
t.Log(names)
t.Error("bad need")
}
}

View File

@@ -0,0 +1,163 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"database/sql"
"encoding/hex"
"errors"
"fmt"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/protocol"
)
func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.IndexID, error) {
// Try a fast read-only query to begin with. If it does not find the ID
// we'll do the full thing under a lock.
var indexID string
if err := s.stmt(`
SELECT i.index_id FROM indexids i
INNER JOIN folders o ON o.idx = i.folder_idx
INNER JOIN devices d ON d.idx = i.device_idx
WHERE o.folder_id = ? AND d.device_id = ?
`).Get(&indexID, folder, device.String()); err == nil && indexID != "" {
idx, err := indexIDFromHex(indexID)
return idx, wrap(err, "select")
}
if device != protocol.LocalDeviceID {
// For non-local devices we do not create the index ID, so return
// zero anyway if we don't have one.
return 0, nil
}
s.updateLock.Lock()
defer s.updateLock.Unlock()
// We are now operating only for the local device ID
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return 0, wrap(err)
}
if err := s.stmt(`
SELECT index_id FROM indexids WHERE folder_idx = ? AND device_idx = {{.LocalDeviceIdx}}
`).Get(&indexID, folderIdx); err != nil && !errors.Is(err, sql.ErrNoRows) {
return 0, wrap(err, "select local")
}
if indexID == "" {
// Generate a new index ID. Some trickiness in the query as we need
// to find the max sequence of local files if there already exist
// any.
id := protocol.NewIndexID()
if _, err := s.stmt(`
INSERT INTO indexids (folder_idx, device_idx, index_id, sequence)
SELECT ?, {{.LocalDeviceIdx}}, ?, COALESCE(MAX(sequence), 0) FROM files
WHERE folder_idx = ? AND device_idx = {{.LocalDeviceIdx}}
ON CONFLICT DO UPDATE SET index_id = ?
`).Exec(folderIdx, indexIDToHex(id), folderIdx, indexIDToHex(id)); err != nil {
return 0, wrap(err, "insert")
}
return id, nil
}
return indexIDFromHex(indexID)
}
func (s *DB) SetIndexID(folder string, device protocol.DeviceID, id protocol.IndexID) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err, "folder idx")
}
deviceIdx, err := s.deviceIdxLocked(device)
if err != nil {
return wrap(err, "device idx")
}
if _, err := s.stmt(`
INSERT OR REPLACE INTO indexids (folder_idx, device_idx, index_id, sequence) values (?, ?, ?, 0)
`).Exec(folderIdx, deviceIdx, indexIDToHex(id)); err != nil {
return wrap(err, "insert")
}
return nil
}
func (s *DB) DropAllIndexIDs() error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
_, err := s.stmt(`DELETE FROM indexids`).Exec()
return wrap(err)
}
func (s *DB) GetDeviceSequence(folder string, device protocol.DeviceID) (int64, error) {
var res sql.NullInt64
err := s.stmt(`
SELECT sequence FROM indexids i
INNER JOIN folders o ON o.idx = i.folder_idx
INNER JOIN devices d ON d.idx = i.device_idx
WHERE o.folder_id = ? AND d.device_id = ?
`).Get(&res, folder, device.String())
if errors.Is(err, sql.ErrNoRows) {
return 0, nil
}
if err != nil {
return 0, wrap(err)
}
if !res.Valid {
return 0, nil
}
return res.Int64, nil
}
func (s *DB) RemoteSequences(folder string) (map[protocol.DeviceID]int64, error) {
type row struct {
Device string
Seq int64
}
it, errFn := iterStructs[row](s.stmt(`
SELECT d.device_id AS device, i.sequence AS seq FROM indexids i
INNER JOIN folders o ON o.idx = i.folder_idx
INNER JOIN devices d ON d.idx = i.device_idx
WHERE o.folder_id = ? AND i.device_idx != {{.LocalDeviceIdx}}
`).Queryx(folder))
res := make(map[protocol.DeviceID]int64)
for row, err := range itererr.Zip(it, errFn) {
if err != nil {
return nil, wrap(err)
}
dev, err := protocol.DeviceIDFromString(row.Device)
if err != nil {
return nil, wrap(err, "device ID")
}
res[dev] = row.Seq
}
return res, nil
}
func indexIDFromHex(s string) (protocol.IndexID, error) {
bs, err := hex.DecodeString(s)
if err != nil {
return 0, fmt.Errorf("indexIDFromHex: %q: %w", s, err)
}
var id protocol.IndexID
if err := id.Unmarshal(bs); err != nil {
return 0, fmt.Errorf("indexIDFromHex: %q: %w", s, err)
}
return id, nil
}
func indexIDToHex(i protocol.IndexID) string {
bs, _ := i.Marshal()
return hex.EncodeToString(bs)
}

View File

@@ -0,0 +1,81 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"testing"
"github.com/syncthing/syncthing/lib/protocol"
)
func TestIndexIDs(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal()
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
t.Run("LocalDeviceID", func(t *testing.T) {
t.Parallel()
localID, err := db.GetIndexID("foo", protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if localID == 0 {
t.Fatal("should have been generated")
}
again, err := db.GetIndexID("foo", protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if again != localID {
t.Fatal("should get same again")
}
other, err := db.GetIndexID("bar", protocol.LocalDeviceID)
if err != nil {
t.Fatal(err)
}
if other == localID {
t.Fatal("should not get same for other folder")
}
})
t.Run("OtherDeviceID", func(t *testing.T) {
t.Parallel()
localID, err := db.GetIndexID("foo", protocol.DeviceID{42})
if err != nil {
t.Fatal(err)
}
if localID != 0 {
t.Fatal("should have been zero")
}
newID := protocol.NewIndexID()
if err := db.SetIndexID("foo", protocol.DeviceID{42}, newID); err != nil {
t.Fatal(err)
}
again, err := db.GetIndexID("foo", protocol.DeviceID{42})
if err != nil {
t.Fatal(err)
}
if again != newID {
t.Log(again, newID)
t.Fatal("should get the ID we set")
}
})
}

View File

@@ -0,0 +1,78 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"iter"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/internal/db"
)
func (s *DB) GetKV(key string) ([]byte, error) {
var val []byte
if err := s.stmt(`
SELECT value FROM kv
WHERE key = ?
`).Get(&val, key); err != nil {
return nil, wrap(err)
}
return val, nil
}
func (s *DB) PutKV(key string, val []byte) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
_, err := s.stmt(`
INSERT OR REPLACE INTO kv (key, value)
VALUES (?, ?)
`).Exec(key, val)
return wrap(err)
}
func (s *DB) DeleteKV(key string) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
_, err := s.stmt(`
DELETE FROM kv WHERE key = ?
`).Exec(key)
return wrap(err)
}
func (s *DB) PrefixKV(prefix string) (iter.Seq[db.KeyValue], func() error) {
var rows *sqlx.Rows
var err error
if prefix == "" {
rows, err = s.stmt(`SELECT key, value FROM kv`).Queryx()
} else {
end := prefixEnd(prefix)
rows, err = s.stmt(`
SELECT key, value FROM kv
WHERE key >= ? AND key < ?
`).Queryx(prefix, end)
}
if err != nil {
return func(_ func(db.KeyValue) bool) {}, func() error { return err }
}
return func(yield func(db.KeyValue) bool) {
defer rows.Close()
for rows.Next() {
var key string
var val []byte
if err = rows.Scan(&key, &val); err != nil {
return
}
if !yield(db.KeyValue{Key: key, Value: val}) {
return
}
}
err = rows.Err()
}, func() error {
return err
}
}

View File

@@ -0,0 +1,126 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"database/sql"
"errors"
"fmt"
"iter"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
)
func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {
file = osutil.NormalizedFilename(file)
var ind indirectFI
err := s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
INNER JOIN files f on fi.sequence = f.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
INNER JOIN devices d ON f.device_idx = d.idx
INNER JOIN folders o ON f.folder_idx = o.idx
WHERE o.folder_id = ? AND d.device_id = ? AND f.name = ?
`).Get(&ind, folder, device.String(), file)
if errors.Is(err, sql.ErrNoRows) {
return protocol.FileInfo{}, false, nil
}
if err != nil {
return protocol.FileInfo{}, false, wrap(err)
}
fi, err := ind.FileInfo()
if err != nil {
return protocol.FileInfo{}, false, wrap(err, "indirect")
}
return fi, true, nil
}
func (s *DB) AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error) {
it, errFn := iterStructs[indirectFI](s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
INNER JOIN files f on fi.sequence = f.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
INNER JOIN folders o ON o.idx = f.folder_idx
INNER JOIN devices d ON d.idx = f.device_idx
WHERE o.folder_id = ? AND d.device_id = ?
`).Queryx(folder, device.String()))
return itererr.Map(it, errFn, indirectFI.FileInfo)
}
func (s *DB) AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error) {
var limitStr string
if limit > 0 {
limitStr = fmt.Sprintf(" LIMIT %d", limit)
}
it, errFn := iterStructs[indirectFI](s.stmt(`
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
INNER JOIN files f on fi.sequence = f.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
INNER JOIN folders o ON o.idx = f.folder_idx
INNER JOIN devices d ON d.idx = f.device_idx
WHERE o.folder_id = ? AND d.device_id = ? AND f.sequence >= ?
ORDER BY f.sequence`+limitStr).Queryx(
folder, device.String(), startSeq))
return itererr.Map(it, errFn, indirectFI.FileInfo)
}
func (s *DB) AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error) {
if prefix == "" {
return s.AllLocalFiles(folder, device)
}
prefix = osutil.NormalizedFilename(prefix)
end := prefixEnd(prefix)
it, errFn := iterStructs[indirectFI](s.sql.Queryx(`
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
INNER JOIN files f on fi.sequence = f.sequence
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
INNER JOIN folders o ON o.idx = f.folder_idx
INNER JOIN devices d ON d.idx = f.device_idx
WHERE o.folder_id = ? AND d.device_id = ? AND f.name >= ? AND f.name < ?
`, folder, device.String(), prefix, end))
return itererr.Map(it, errFn, indirectFI.FileInfo)
}
func (s *DB) AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[db.FileMetadata], func() error) {
return iterStructs[db.FileMetadata](s.stmt(`
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
INNER JOIN folders o ON o.idx = f.folder_idx
WHERE o.folder_id = ? AND f.device_idx = {{.LocalDeviceIdx}} AND f.blocklist_hash = ?
`).Queryx(folder, h))
}
func (s *DB) AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, db.FileMetadata], func() error) {
type row struct {
FolderID string `db:"folder_id"`
db.FileMetadata
}
it, errFn := iterStructs[row](s.stmt(`
SELECT o.folder_id, f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
INNER JOIN folders o ON o.idx = f.folder_idx
WHERE f.device_idx = {{.LocalDeviceIdx}} AND f.blocklist_hash = ?
`).Queryx(h))
return itererr.Map2(it, errFn, func(r row) (string, db.FileMetadata, error) {
return r.FolderID, r.FileMetadata, nil
})
}
func (s *DB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[db.BlockMapEntry], func() error) {
// We involve the files table in this select because deletion of blocks
// & blocklists is deferred (garbage collected) while the files list is
// not. This filters out blocks that are in fact deleted.
return iterStructs[db.BlockMapEntry](s.stmt(`
SELECT f.blocklist_hash as blocklisthash, b.idx as blockindex, b.offset, b.size FROM files f
LEFT JOIN blocks b ON f.blocklist_hash = b.blocklist_hash
WHERE f.device_idx = {{.LocalDeviceIdx}} AND b.hash = ?
`).Queryx(hash))
}

View File

@@ -0,0 +1,202 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"testing"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/protocol"
)
func TestBlocks(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal()
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
files := []protocol.FileInfo{
{
Name: "file1",
Blocks: []protocol.BlockInfo{
{Hash: []byte{1, 2, 3}, Offset: 0, Size: 42},
{Hash: []byte{2, 3, 4}, Offset: 42, Size: 42},
{Hash: []byte{3, 4, 5}, Offset: 84, Size: 42},
},
},
{
Name: "file2",
Blocks: []protocol.BlockInfo{
{Hash: []byte{2, 3, 4}, Offset: 0, Size: 42},
{Hash: []byte{3, 4, 5}, Offset: 42, Size: 42},
{Hash: []byte{4, 5, 6}, Offset: 84, Size: 42},
},
},
}
if err := db.Update("test", protocol.LocalDeviceID, files); err != nil {
t.Fatal(err)
}
// Search for blocks
vals, err := itererr.Collect(db.AllLocalBlocksWithHash([]byte{1, 2, 3}))
if err != nil {
t.Fatal(err)
}
if len(vals) != 1 {
t.Log(vals)
t.Fatal("expected one hit")
} else if vals[0].BlockIndex != 0 || vals[0].Offset != 0 || vals[0].Size != 42 {
t.Log(vals[0])
t.Fatal("bad entry")
}
// Get FileInfos for those blocks
found := 0
it, errFn := db.AllLocalFilesWithBlocksHashAnyFolder(vals[0].BlocklistHash)
for folder, fileInfo := range it {
if folder != folderID {
t.Fatal("should be same folder")
}
if fileInfo.Name != "file1" {
t.Fatal("should be file1")
}
found++
}
if err := errFn(); err != nil {
t.Fatal(err)
}
if found != 1 {
t.Fatal("should find one file")
}
// Get the other blocks
vals, err = itererr.Collect(db.AllLocalBlocksWithHash([]byte{3, 4, 5}))
if err != nil {
t.Fatal(err)
}
if len(vals) != 2 {
t.Log(vals)
t.Fatal("expected two hits")
}
// if vals[0].Index != 2 || vals[0].Offset != 84 || vals[0].Size != 42 {
// t.Log(vals[0])
// t.Fatal("bad entry 1")
// }
// if vals[1].Index != 1 || vals[1].Offset != 42 || vals[1].Size != 42 {
// t.Log(vals[1])
// t.Fatal("bad entry 2")
// }
}
func TestBlocksDeleted(t *testing.T) {
t.Parallel()
sdb, err := OpenTemp()
if err != nil {
t.Fatal()
}
t.Cleanup(func() {
if err := sdb.Close(); err != nil {
t.Fatal(err)
}
})
// Insert a file
file := genFile("foo", 1, 0)
if err := sdb.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
// We should find one entry for the block hash
search := file.Blocks[0].Hash
es := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(search))
if len(es) != 1 {
t.Fatal("expected one hit")
}
// Update the file with a new block hash
file.Blocks = genBlocks("foo", 42, 1)
if err := sdb.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
// Searching for the old hash should yield no hits
if hits := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(search)); len(hits) != 0 {
t.Log(hits)
t.Error("expected no hits")
}
// Searching for the new hash should yield one hits
if hits := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(file.Blocks[0].Hash)); len(hits) != 1 {
t.Log(hits)
t.Error("expected one hit")
}
}
func TestRemoteSequence(t *testing.T) {
t.Parallel()
sdb, err := OpenTemp()
if err != nil {
t.Fatal()
}
t.Cleanup(func() {
if err := sdb.Close(); err != nil {
t.Fatal(err)
}
})
// Insert a local file
file := genFile("foo", 1, 0)
if err := sdb.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
// Insert several remote files
file = genFile("foo1", 1, 42)
if err := sdb.Update(folderID, protocol.DeviceID{42}, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
if err := sdb.Update(folderID, protocol.DeviceID{43}, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
file = genFile("foo2", 1, 43)
if err := sdb.Update(folderID, protocol.DeviceID{43}, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
if err := sdb.Update(folderID, protocol.DeviceID{44}, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
file = genFile("foo3", 1, 44)
if err := sdb.Update(folderID, protocol.DeviceID{44}, []protocol.FileInfo{file}); err != nil {
t.Fatal()
}
// Verify remote sequences
seqs, err := sdb.RemoteSequences(folderID)
if err != nil {
t.Fatal(err)
}
if len(seqs) != 3 || seqs[protocol.DeviceID{42}] != 42 ||
seqs[protocol.DeviceID{43}] != 43 ||
seqs[protocol.DeviceID{44}] != 44 {
t.Log(seqs)
t.Error("bad seqs")
}
}

View File

@@ -0,0 +1,54 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"time"
)
func (s *DB) GetMtime(folder, name string) (ondisk, virtual time.Time) {
var res struct {
Ondisk int64
Virtual int64
}
if err := s.stmt(`
SELECT m.ondisk, m.virtual FROM mtimes m
INNER JOIN folders o ON o.idx = m.folder_idx
WHERE o.folder_id = ? AND m.name = ?
`).Get(&res, folder, name); err != nil {
return time.Time{}, time.Time{}
}
return time.Unix(0, res.Ondisk), time.Unix(0, res.Virtual)
}
func (s *DB) PutMtime(folder, name string, ondisk, virtual time.Time) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err)
}
_, err = s.stmt(`
INSERT OR REPLACE INTO mtimes (folder_idx, name, ondisk, virtual)
VALUES (?, ?, ?, ?)
`).Exec(folderIdx, name, ondisk.UnixNano(), virtual.UnixNano())
return wrap(err)
}
func (s *DB) DeleteMtime(folder, name string) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err)
}
_, err = s.stmt(`
DELETE FROM mtimes
WHERE folder_idx = ? AND name = ?
`).Exec(folderIdx, name)
return wrap(err)
}

View File

@@ -0,0 +1,54 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"testing"
"time"
)
func TestMtimePairs(t *testing.T) {
t.Parallel()
db, err := OpenTemp()
if err != nil {
t.Fatal()
}
t.Cleanup(func() {
if err := db.Close(); err != nil {
t.Fatal(err)
}
})
t0 := time.Now().Truncate(time.Second)
t1 := t0.Add(1234567890)
// Set a pair
if err := db.PutMtime("foo", "bar", t0, t1); err != nil {
t.Fatal(err)
}
// Check it
gt0, gt1 := db.GetMtime("foo", "bar")
if !gt0.Equal(t0) || !gt1.Equal(t1) {
t.Log(t0, gt0)
t.Log(t1, gt1)
t.Log("bad times")
}
// Delete it
if err := db.DeleteMtime("foo", "bar"); err != nil {
t.Fatal(err)
}
// Check it
gt0, gt1 = db.GetMtime("foo", "bar")
if !gt0.IsZero() || !gt1.IsZero() {
t.Log(gt0, gt1)
t.Log("bad times")
}
}

View File

@@ -0,0 +1,203 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"database/sql"
"os"
"path/filepath"
"strconv"
"strings"
"text/template"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/protocol"
)
const maxDBConns = 128
func Open(path string) (*DB, error) {
// Open the database with options to enable foreign keys and recursive
// triggers (needed for the delete+insert triggers on row replace).
sqlDB, err := sqlx.Open(dbDriver, "file:"+path+"?"+commonOptions)
if err != nil {
return nil, wrap(err)
}
sqlDB.SetMaxOpenConns(maxDBConns)
if _, err := sqlDB.Exec(`PRAGMA journal_mode = WAL`); err != nil {
return nil, wrap(err, "PRAGMA journal_mode")
}
if _, err := sqlDB.Exec(`PRAGMA optimize = 0x10002`); err != nil {
// https://www.sqlite.org/pragma.html#pragma_optimize
return nil, wrap(err, "PRAGMA optimize")
}
if _, err := sqlDB.Exec(`PRAGMA journal_size_limit = 6144000`); err != nil {
// https://www.powersync.com/blog/sqlite-optimizations-for-ultra-high-performance
return nil, wrap(err, "PRAGMA journal_size_limit")
}
return openCommon(sqlDB)
}
// Open the database with options suitable for the migration inserts. This
// is not a safe mode of operation for normal processing, use only for bulk
// inserts with a close afterwards.
func OpenForMigration(path string) (*DB, error) {
sqlDB, err := sqlx.Open(dbDriver, "file:"+path+"?"+commonOptions)
if err != nil {
return nil, wrap(err, "open")
}
sqlDB.SetMaxOpenConns(1)
if _, err := sqlDB.Exec(`PRAGMA foreign_keys = 0`); err != nil {
return nil, wrap(err, "PRAGMA foreign_keys")
}
if _, err := sqlDB.Exec(`PRAGMA journal_mode = OFF`); err != nil {
return nil, wrap(err, "PRAGMA journal_mode")
}
if _, err := sqlDB.Exec(`PRAGMA synchronous = 0`); err != nil {
return nil, wrap(err, "PRAGMA synchronous")
}
return openCommon(sqlDB)
}
func OpenTemp() (*DB, error) {
// SQLite has a memory mode, but it works differently with concurrency
// compared to what we need with the WAL mode. So, no memory databases
// for now.
dir, err := os.MkdirTemp("", "syncthing-db")
if err != nil {
return nil, wrap(err)
}
path := filepath.Join(dir, "db")
l.Debugln("Test DB in", path)
return Open(path)
}
func openCommon(sqlDB *sqlx.DB) (*DB, error) {
if _, err := sqlDB.Exec(`PRAGMA auto_vacuum = INCREMENTAL`); err != nil {
return nil, wrap(err, "PRAGMA auto_vacuum")
}
if _, err := sqlDB.Exec(`PRAGMA default_temp_store = MEMORY`); err != nil {
return nil, wrap(err, "PRAGMA default_temp_store")
}
if _, err := sqlDB.Exec(`PRAGMA temp_store = MEMORY`); err != nil {
return nil, wrap(err, "PRAGMA temp_store")
}
db := &DB{
sql: sqlDB,
statements: make(map[string]*sqlx.Stmt),
}
if err := db.runScripts("sql/schema/*"); err != nil {
return nil, wrap(err)
}
ver, _ := db.getAppliedSchemaVersion()
if ver.SchemaVersion > 0 {
filter := func(scr string) bool {
scr = filepath.Base(scr)
nstr, _, ok := strings.Cut(scr, "-")
if !ok {
return false
}
n, err := strconv.ParseInt(nstr, 10, 32)
if err != nil {
return false
}
return int(n) > ver.SchemaVersion
}
if err := db.runScripts("sql/migrations/*", filter); err != nil {
return nil, wrap(err)
}
}
// Touch device IDs that should always exist and have a low index
// numbers, and will never change
db.localDeviceIdx, _ = db.deviceIdxLocked(protocol.LocalDeviceID)
// Set the current schema version, if not already set
if err := db.setAppliedSchemaVersion(currentSchemaVersion); err != nil {
return nil, wrap(err)
}
db.tplInput = map[string]any{
"FlagLocalUnsupported": protocol.FlagLocalUnsupported,
"FlagLocalIgnored": protocol.FlagLocalIgnored,
"FlagLocalMustRescan": protocol.FlagLocalMustRescan,
"FlagLocalReceiveOnly": protocol.FlagLocalReceiveOnly,
"FlagLocalGlobal": protocol.FlagLocalGlobal,
"FlagLocalNeeded": protocol.FlagLocalNeeded,
"LocalDeviceIdx": db.localDeviceIdx,
"SyncthingVersion": build.LongVersion,
}
return db, nil
}
var tplFuncs = template.FuncMap{
"or": func(vs ...int) int {
v := vs[0]
for _, ov := range vs[1:] {
v |= ov
}
return v
},
}
// stmt returns a prepared statement for the given SQL string, after
// applying local template expansions. The statement is cached.
func (s *DB) stmt(tpl string) stmt {
tpl = strings.TrimSpace(tpl)
// Fast concurrent lookup of cached statement
s.statementsMut.RLock()
stmt, ok := s.statements[tpl]
s.statementsMut.RUnlock()
if ok {
return stmt
}
// On miss, take the full lock, check again
s.statementsMut.Lock()
defer s.statementsMut.Unlock()
stmt, ok = s.statements[tpl]
if ok {
return stmt
}
// Apply template expansions
var sb strings.Builder
compTpl := template.Must(template.New("tpl").Funcs(tplFuncs).Parse(tpl))
if err := compTpl.Execute(&sb, s.tplInput); err != nil {
panic("bug: bad template: " + err.Error())
}
// Prepare and cache
stmt, err := s.sql.Preparex(sb.String())
if err != nil {
return failedStmt{err}
}
s.statements[tpl] = stmt
return stmt
}
type stmt interface {
Exec(args ...any) (sql.Result, error)
Get(dest any, args ...any) error
Queryx(args ...any) (*sqlx.Rows, error)
Select(dest any, args ...any) error
}
type failedStmt struct {
err error
}
func (f failedStmt) Exec(_ ...any) (sql.Result, error) { return nil, f.err }
func (f failedStmt) Get(_ any, _ ...any) error { return f.err }
func (f failedStmt) Queryx(_ ...any) (*sqlx.Rows, error) { return nil, f.err }
func (f failedStmt) Select(_ any, _ ...any) error { return f.err }

View File

@@ -0,0 +1,18 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
//go:build cgo
package sqlite
import (
_ "github.com/mattn/go-sqlite3" // register sqlite3 database driver
)
const (
dbDriver = "sqlite3"
commonOptions = "_fk=true&_rt=true&_cache_size=-65536&_sync=1&_txlock=immediate"
)

View File

@@ -0,0 +1,23 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
//go:build !cgo && !wazero
package sqlite
import (
"github.com/syncthing/syncthing/lib/build"
_ "modernc.org/sqlite" // register sqlite database driver
)
const (
dbDriver = "sqlite"
commonOptions = "_pragma=foreign_keys(1)&_pragma=recursive_triggers(1)&_pragma=cache_size(-65536)&_pragma=synchronous(1)"
)
func init() {
build.AddTag("modernc-sqlite")
}

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import "github.com/jmoiron/sqlx"
type txPreparedStmts struct {
*sqlx.Tx
stmts map[string]*sqlx.Stmt
}
func (p *txPreparedStmts) Preparex(query string) (*sqlx.Stmt, error) {
if p.stmts == nil {
p.stmts = make(map[string]*sqlx.Stmt)
}
stmt, ok := p.stmts[query]
if ok {
return stmt, nil
}
stmt, err := p.Tx.Preparex(query)
if err != nil {
return nil, wrap(err)
}
p.stmts[query] = stmt
return stmt, nil
}
func (p *txPreparedStmts) Commit() error {
for _, s := range p.stmts {
s.Close()
}
return p.Tx.Commit()
}
func (p *txPreparedStmts) Rollback() error {
for _, s := range p.stmts {
s.Close()
}
return p.Tx.Rollback()
}

View File

@@ -0,0 +1,88 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"embed"
"io/fs"
"strings"
"time"
"github.com/syncthing/syncthing/lib/build"
)
const currentSchemaVersion = 1
//go:embed sql/**
var embedded embed.FS
func (s *DB) runScripts(glob string, filter ...func(s string) bool) error {
scripts, err := fs.Glob(embedded, glob)
if err != nil {
return wrap(err)
}
tx, err := s.sql.Begin()
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
nextScript:
for _, scr := range scripts {
for _, fn := range filter {
if !fn(scr) {
l.Debugln("Skipping script", scr)
continue nextScript
}
}
l.Debugln("Executing script", scr)
bs, err := fs.ReadFile(embedded, scr)
if err != nil {
return wrap(err, scr)
}
// SQLite requires one statement per exec, so we split the init
// files on lines containing only a semicolon and execute them
// separately. We require it on a separate line because there are
// also statement-internal semicolons in the triggers.
for _, stmt := range strings.Split(string(bs), "\n;") {
if _, err := tx.Exec(stmt); err != nil {
return wrap(err, stmt)
}
}
}
return wrap(tx.Commit())
}
type schemaVersion struct {
SchemaVersion int
AppliedAt int64
SyncthingVersion string
}
func (s *schemaVersion) AppliedTime() time.Time {
return time.Unix(0, s.AppliedAt)
}
func (s *DB) setAppliedSchemaVersion(ver int) error {
_, err := s.stmt(`
INSERT OR IGNORE INTO schemamigrations (schema_version, applied_at, syncthing_version)
VALUES (?, ?, ?)
`).Exec(ver, time.Now().UnixNano(), build.LongVersion)
return wrap(err)
}
func (s *DB) getAppliedSchemaVersion() (schemaVersion, error) {
var v schemaVersion
err := s.stmt(`
SELECT schema_version as schemaversion, applied_at as appliedat, syncthing_version as syncthingversion FROM schemamigrations
ORDER BY schema_version DESC
LIMIT 1
`).Get(&v)
return v, wrap(err)
}

View File

@@ -0,0 +1,141 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"context"
"time"
"github.com/syncthing/syncthing/internal/db"
)
const (
internalMetaPrefix = "dbsvc"
lastMaintKey = "lastMaint"
)
type Service struct {
sdb *DB
maintenanceInterval time.Duration
internalMeta *db.Typed
}
func newService(sdb *DB, maintenanceInterval time.Duration) *Service {
return &Service{
sdb: sdb,
maintenanceInterval: maintenanceInterval,
internalMeta: db.NewTyped(sdb, internalMetaPrefix),
}
}
func (s *Service) Serve(ctx context.Context) error {
// Run periodic maintenance
// Figure out when we last ran maintenance and schedule accordingly. If
// it was never, do it now.
lastMaint, _, _ := s.internalMeta.Time(lastMaintKey)
nextMaint := lastMaint.Add(s.maintenanceInterval)
wait := time.Until(nextMaint)
if wait < 0 {
wait = time.Minute
}
l.Debugln("Next periodic run in", wait)
timer := time.NewTimer(wait)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
}
if err := s.periodic(ctx); err != nil {
return wrap(err)
}
timer.Reset(s.maintenanceInterval)
l.Debugln("Next periodic run in", s.maintenanceInterval)
_ = s.internalMeta.PutTime(lastMaintKey, time.Now())
}
}
func (s *Service) periodic(ctx context.Context) error {
t0 := time.Now()
l.Debugln("Periodic start")
s.sdb.updateLock.Lock()
defer s.sdb.updateLock.Unlock()
t1 := time.Now()
defer func() { l.Debugln("Periodic done in", time.Since(t1), "+", t1.Sub(t0)) }()
if err := s.garbageCollectBlocklistsAndBlocksLocked(ctx); err != nil {
return wrap(err)
}
_, _ = s.sdb.sql.ExecContext(ctx, `ANALYZE`)
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA optimize`)
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA incremental_vacuum`)
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`)
return nil
}
func (s *Service) garbageCollectBlocklistsAndBlocksLocked(ctx context.Context) error {
// Remove all blocklists not referred to by any files and, by extension,
// any blocks not referred to by a blocklist. This is an expensive
// operation when run normally, especially if there are a lot of blocks
// to collect.
//
// We make this orders of magnitude faster by disabling foreign keys for
// the transaction and doing the cleanup manually. This requires using
// an explicit connection and disabling foreign keys before starting the
// transaction. We make sure to clean up on the way out.
conn, err := s.sdb.sql.Connx(ctx)
if err != nil {
return wrap(err)
}
defer conn.Close()
if _, err := conn.ExecContext(ctx, `PRAGMA foreign_keys = 0`); err != nil {
return wrap(err)
}
defer func() { //nolint:contextcheck
_, _ = conn.ExecContext(context.Background(), `PRAGMA foreign_keys = 1`)
}()
tx, err := conn.BeginTxx(ctx, nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
if res, err := tx.ExecContext(ctx, `
DELETE FROM blocklists
WHERE NOT EXISTS (
SELECT 1 FROM files WHERE files.blocklist_hash = blocklists.blocklist_hash
)`); err != nil {
return wrap(err, "delete blocklists")
} else if shouldDebug() {
rows, err := res.RowsAffected()
l.Debugln("Blocklist GC:", rows, err)
}
if res, err := tx.ExecContext(ctx, `
DELETE FROM blocks
WHERE NOT EXISTS (
SELECT 1 FROM blocklists WHERE blocklists.blocklist_hash = blocks.blocklist_hash
)`); err != nil {
return wrap(err, "delete blocks")
} else if shouldDebug() {
rows, err := res.RowsAffected()
l.Debugln("Blocks GC:", rows, err)
}
return wrap(tx.Commit())
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,549 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"cmp"
"context"
"fmt"
"runtime"
"slices"
"strings"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sliceutil"
"google.golang.org/protobuf/proto"
)
func (s *DB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err)
}
deviceIdx, err := s.deviceIdxLocked(device)
if err != nil {
return wrap(err)
}
tx, err := s.sql.BeginTxx(context.Background(), nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
txp := &txPreparedStmts{Tx: tx}
//nolint:sqlclosecheck
insertFileStmt, err := txp.Preparex(`
INSERT OR REPLACE INTO files (folder_idx, device_idx, remote_sequence, name, type, modified, size, version, deleted, invalid, local_flags, blocklist_hash)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
RETURNING sequence
`)
if err != nil {
return wrap(err, "prepare insert file")
}
//nolint:sqlclosecheck
insertFileInfoStmt, err := txp.Preparex(`
INSERT INTO fileinfos (sequence, fiprotobuf)
VALUES (?, ?)
`)
if err != nil {
return wrap(err, "prepare insert fileinfo")
}
//nolint:sqlclosecheck
insertBlockListStmt, err := txp.Preparex(`
INSERT OR IGNORE INTO blocklists (blocklist_hash, blprotobuf)
VALUES (?, ?)
`)
if err != nil {
return wrap(err, "prepare insert blocklist")
}
var prevRemoteSeq int64
for i, f := range fs {
f.Name = osutil.NormalizedFilename(f.Name)
var blockshash *[]byte
if len(f.Blocks) > 0 {
f.BlocksHash = protocol.BlocksHash(f.Blocks)
blockshash = &f.BlocksHash
} else {
f.BlocksHash = nil
}
if f.Type == protocol.FileInfoTypeDirectory {
f.Size = 128 // synthetic directory size
}
// Insert the file.
//
// If it is a remote file, set remote_sequence otherwise leave it at
// null. Returns the new local sequence.
var remoteSeq *int64
if device != protocol.LocalDeviceID {
if i > 0 && f.Sequence == prevRemoteSeq {
return fmt.Errorf("duplicate remote sequence number %d", prevRemoteSeq)
}
prevRemoteSeq = f.Sequence
remoteSeq = &f.Sequence
}
var localSeq int64
if err := insertFileStmt.Get(&localSeq, folderIdx, deviceIdx, remoteSeq, f.Name, f.Type, f.ModTime().UnixNano(), f.Size, f.Version.String(), f.IsDeleted(), f.IsInvalid(), f.LocalFlags, blockshash); err != nil {
return wrap(err, "insert file")
}
if len(f.Blocks) > 0 {
// Indirect the block list
blocks := sliceutil.Map(f.Blocks, protocol.BlockInfo.ToWire)
bs, err := proto.Marshal(&dbproto.BlockList{Blocks: blocks})
if err != nil {
return wrap(err, "marshal blocklist")
}
if _, err := insertBlockListStmt.Exec(f.BlocksHash, bs); err != nil {
return wrap(err, "insert blocklist")
}
if device == protocol.LocalDeviceID {
// Insert all blocks
if err := s.insertBlocksLocked(txp, f.BlocksHash, f.Blocks); err != nil {
return wrap(err, "insert blocks")
}
}
f.Blocks = nil
}
// Insert the fileinfo
if device == protocol.LocalDeviceID {
f.Sequence = localSeq
}
bs, err := proto.Marshal(f.ToWire(true))
if err != nil {
return wrap(err, "marshal fileinfo")
}
if _, err := insertFileInfoStmt.Exec(localSeq, bs); err != nil {
return wrap(err, "insert fileinfo")
}
// Update global and need
if err := s.recalcGlobalForFile(txp, folderIdx, f.Name); err != nil {
return wrap(err)
}
}
return wrap(tx.Commit())
}
func (s *DB) DropFolder(folder string) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
_, err := s.stmt(`
DELETE FROM folders
WHERE folder_id = ?
`).Exec(folder)
return wrap(err)
}
func (s *DB) DropDevice(device protocol.DeviceID) error {
if device == protocol.LocalDeviceID {
panic("bug: cannot drop local device")
}
s.updateLock.Lock()
defer s.updateLock.Unlock()
deviceIdx, err := s.deviceIdxLocked(device)
if err != nil {
return wrap(err)
}
tx, err := s.sql.BeginTxx(context.Background(), nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
txp := &txPreparedStmts{Tx: tx}
// Find all folders where the device is involved
var folderIdxs []int64
if err := tx.Select(&folderIdxs, `
SELECT folder_idx
FROM counts
WHERE device_idx = ? AND count > 0
GROUP BY folder_idx
`, deviceIdx); err != nil {
return wrap(err)
}
// Drop the device, which cascades to delete all files etc for it
if _, err := tx.Exec(`DELETE FROM devices WHERE device_id = ?`, device.String()); err != nil {
return wrap(err)
}
// Recalc the globals for all affected folders
for _, idx := range folderIdxs {
if err := s.recalcGlobalForFolder(txp, idx); err != nil {
return wrap(err)
}
}
return wrap(tx.Commit())
}
func (s *DB) DropAllFiles(folder string, device protocol.DeviceID) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
// This is a two part operation, first dropping all the files and then
// recalculating the global state for the entire folder.
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err)
}
deviceIdx, err := s.deviceIdxLocked(device)
if err != nil {
return wrap(err)
}
tx, err := s.sql.BeginTxx(context.Background(), nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
txp := &txPreparedStmts{Tx: tx}
// Drop all the file entries
result, err := tx.Exec(`
DELETE FROM files
WHERE folder_idx = ? AND device_idx = ?
`, folderIdx, deviceIdx)
if err != nil {
return wrap(err)
}
if n, err := result.RowsAffected(); err == nil && n == 0 {
// The delete affected no rows, so we don't need to redo the entire
// global/need calculation.
return wrap(tx.Commit())
}
// Recalc global for the entire folder
if err := s.recalcGlobalForFolder(txp, folderIdx); err != nil {
return wrap(err)
}
return wrap(tx.Commit())
}
func (s *DB) DropFilesNamed(folder string, device protocol.DeviceID, names []string) error {
for i := range names {
names[i] = osutil.NormalizedFilename(names[i])
}
s.updateLock.Lock()
defer s.updateLock.Unlock()
folderIdx, err := s.folderIdxLocked(folder)
if err != nil {
return wrap(err)
}
deviceIdx, err := s.deviceIdxLocked(device)
if err != nil {
return wrap(err)
}
tx, err := s.sql.BeginTxx(context.Background(), nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback() //nolint:errcheck
txp := &txPreparedStmts{Tx: tx}
// Drop the named files
query, args, err := sqlx.In(`
DELETE FROM files
WHERE folder_idx = ? AND device_idx = ? AND name IN (?)
`, folderIdx, deviceIdx, names)
if err != nil {
return wrap(err)
}
if _, err := tx.Exec(query, args...); err != nil {
return wrap(err)
}
// Recalc globals for the named files
for _, name := range names {
if err := s.recalcGlobalForFile(txp, folderIdx, name); err != nil {
return wrap(err)
}
}
return wrap(tx.Commit())
}
func (*DB) insertBlocksLocked(tx *txPreparedStmts, blocklistHash []byte, blocks []protocol.BlockInfo) error {
if len(blocks) == 0 {
return nil
}
bs := make([]map[string]any, len(blocks))
for i, b := range blocks {
bs[i] = map[string]any{
"hash": b.Hash,
"blocklist_hash": blocklistHash,
"idx": i,
"offset": b.Offset,
"size": b.Size,
}
}
_, err := tx.NamedExec(`
INSERT OR IGNORE INTO blocks (hash, blocklist_hash, idx, offset, size)
VALUES (:hash, :blocklist_hash, :idx, :offset, :size)
`, bs)
return wrap(err)
}
func (s *DB) recalcGlobalForFolder(txp *txPreparedStmts, folderIdx int64) error {
// Select files where there is no global, those are the ones we need to
// recalculate.
//nolint:sqlclosecheck
namesStmt, err := txp.Preparex(`
SELECT f.name FROM files f
WHERE f.folder_idx = ? AND NOT EXISTS (
SELECT 1 FROM files g
WHERE g.folder_idx = ? AND g.name = f.name AND g.local_flags & ? != 0
)
GROUP BY name
`)
if err != nil {
return wrap(err)
}
rows, err := namesStmt.Queryx(folderIdx, folderIdx, protocol.FlagLocalGlobal)
if err != nil {
return wrap(err)
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return wrap(err)
}
if err := s.recalcGlobalForFile(txp, folderIdx, name); err != nil {
return wrap(err)
}
}
return wrap(rows.Err())
}
func (s *DB) recalcGlobalForFile(txp *txPreparedStmts, folderIdx int64, file string) error {
//nolint:sqlclosecheck
selStmt, err := txp.Preparex(`
SELECT name, folder_idx, device_idx, sequence, modified, version, deleted, invalid, local_flags FROM files
WHERE folder_idx = ? AND name = ?
`)
if err != nil {
return wrap(err)
}
es, err := itererr.Collect(iterStructs[fileRow](selStmt.Queryx(folderIdx, file)))
if err != nil {
return wrap(err)
}
if len(es) == 0 {
// shouldn't happen
return nil
}
// Sort the entries; the global entry is at the head of the list
slices.SortFunc(es, fileRow.Compare)
// The global version is the first one in the list that is not invalid,
// or just the first one in the list if all are invalid.
var global fileRow
globIdx := slices.IndexFunc(es, func(e fileRow) bool { return !e.Invalid })
if globIdx < 0 {
globIdx = 0
}
global = es[globIdx]
// We "have" the file if the position in the list of versions is at the
// global version or better, or if the version is the same as the global
// file (we might be further down the list due to invalid flags), or if
// the global is deleted and we don't have it at all...
localIdx := slices.IndexFunc(es, func(e fileRow) bool { return e.DeviceIdx == s.localDeviceIdx })
hasLocal := localIdx >= 0 && localIdx <= globIdx || // have a better or equal version
localIdx >= 0 && es[localIdx].Version.Equal(global.Version.Vector) || // have an equal version but invalid/ignored
localIdx < 0 && global.Deleted // missing it, but the global is also deleted
// Set the global flag on the global entry. Set the need flag if the
// local device needs this file, unless it's invalid.
global.LocalFlags |= protocol.FlagLocalGlobal
if hasLocal || global.Invalid {
global.LocalFlags &= ^protocol.FlagLocalNeeded
} else {
global.LocalFlags |= protocol.FlagLocalNeeded
}
//nolint:sqlclosecheck
upStmt, err := txp.Prepare(`
UPDATE files SET local_flags = ?
WHERE folder_idx = ? AND device_idx = ? AND sequence = ?
`)
if err != nil {
return wrap(err)
}
if _, err := upStmt.Exec(global.LocalFlags, global.FolderIdx, global.DeviceIdx, global.Sequence); err != nil {
return wrap(err)
}
// Clear the need and global flags on all other entries
//nolint:sqlclosecheck
upStmt, err = txp.Prepare(`
UPDATE files SET local_flags = local_flags & ?
WHERE folder_idx = ? AND name = ? AND sequence != ? AND local_flags & ? != 0
`)
if err != nil {
return wrap(err)
}
if _, err := upStmt.Exec(^(protocol.FlagLocalNeeded | protocol.FlagLocalGlobal), folderIdx, global.Name, global.Sequence, protocol.FlagLocalNeeded|protocol.FlagLocalGlobal); err != nil {
return wrap(err)
}
return nil
}
func (s *DB) folderIdxLocked(folderID string) (int64, error) {
if _, err := s.stmt(`
INSERT OR IGNORE INTO folders(folder_id)
VALUES (?)
`).Exec(folderID); err != nil {
return 0, wrap(err)
}
var idx int64
if err := s.stmt(`
SELECT idx FROM folders
WHERE folder_id = ?
`).Get(&idx, folderID); err != nil {
return 0, wrap(err)
}
return idx, nil
}
func (s *DB) deviceIdxLocked(deviceID protocol.DeviceID) (int64, error) {
devStr := deviceID.String()
if _, err := s.stmt(`
INSERT OR IGNORE INTO devices(device_id)
VALUES (?)
`).Exec(devStr); err != nil {
return 0, wrap(err)
}
var idx int64
if err := s.stmt(`
SELECT idx FROM devices
WHERE device_id = ?
`).Get(&idx, devStr); err != nil {
return 0, wrap(err)
}
return idx, nil
}
// wrap returns the error wrapped with the calling function name and
// optional extra context strings as prefix. A nil error wraps to nil.
func wrap(err error, context ...string) error {
if err == nil {
return nil
}
prefix := "error"
pc, _, _, ok := runtime.Caller(1)
details := runtime.FuncForPC(pc)
if ok && details != nil {
prefix = strings.ToLower(details.Name())
if dotIdx := strings.LastIndex(prefix, "."); dotIdx > 0 {
prefix = prefix[dotIdx+1:]
}
}
if len(context) > 0 {
for i := range context {
context[i] = strings.TrimSpace(context[i])
}
extra := strings.Join(context, ", ")
return fmt.Errorf("%s (%s): %w", prefix, extra, err)
}
return fmt.Errorf("%s: %w", prefix, err)
}
type fileRow struct {
Name string
Version dbVector
FolderIdx int64 `db:"folder_idx"`
DeviceIdx int64 `db:"device_idx"`
Sequence int64
Modified int64
Size int64
LocalFlags int64 `db:"local_flags"`
Deleted bool
Invalid bool
}
func (e fileRow) Compare(other fileRow) int {
// From FileInfo.WinsConflict
vc := e.Version.Vector.Compare(other.Version.Vector)
switch vc {
case protocol.Equal:
if e.Invalid != other.Invalid {
if e.Invalid {
return 1
}
return -1
}
// Compare the device ID index, lower is better. This is only
// deterministic to the extent that LocalDeviceID will always be the
// lowest one, order between remote devices is random (and
// irrelevant).
return cmp.Compare(e.DeviceIdx, other.DeviceIdx)
case protocol.Greater: // we are newer
return -1
case protocol.Lesser: // we are older
return 1
case protocol.ConcurrentGreater, protocol.ConcurrentLesser: // there is a conflict
if e.Invalid != other.Invalid {
if e.Invalid { // we are invalid, we lose
return 1
}
return -1 // they are invalid, we win
}
if e.Deleted != other.Deleted {
if e.Deleted { // we are deleted, we lose
return 1
}
return -1 // they are deleted, we win
}
if d := cmp.Compare(e.Modified, other.Modified); d != 0 {
return -d // positive d means we were newer, so we win (negative return)
}
if vc == protocol.ConcurrentGreater {
return -1 // we have a better device ID, we win
}
return 1 // they win
default:
return 0
}
}

View File

@@ -1,17 +1,15 @@
// Copyright (C) 2014 The Syncthing Authors.
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
package sqlite
import (
"github.com/syncthing/syncthing/lib/logger"
)
var l = logger.DefaultLogger.NewFacility("db", "The database layer")
var l = logger.DefaultLogger.NewFacility("sqlite", "SQLite database")
func shouldDebug() bool {
return l.ShouldDebug("db")
}
func shouldDebug() bool { return l.ShouldDebug("sqlite") }

View File

@@ -0,0 +1,8 @@
These SQL scripts are embedded in the binary.
Scripts in `schema/` are run at every startup, in alphanumerical order.
Scripts in `migrations/` are run when a migration is needed; the must begin
with a number that equals the schema version that results from that
migration. Migrations are not run on initial database creation, so the
scripts in `schema/` should create the latest version.

View File

@@ -0,0 +1,7 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- The next migration should be number two.

View File

@@ -0,0 +1,19 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- folders map folder IDs as used by Syncthing to database folder indexes
CREATE TABLE IF NOT EXISTS folders (
idx INTEGER NOT NULL PRIMARY KEY,
folder_id TEXT NOT NULL UNIQUE COLLATE BINARY
) STRICT
;
-- devices map device IDs as used by Syncthing to database device indexes
CREATE TABLE IF NOT EXISTS devices (
idx INTEGER NOT NULL PRIMARY KEY,
device_id TEXT NOT NULL UNIQUE COLLATE BINARY
) STRICT
;

View File

@@ -0,0 +1,14 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- Schema migrations hold the list of historical migrations applied
CREATE TABLE IF NOT EXISTS schemamigrations (
schema_version INTEGER NOT NULL,
applied_at INTEGER NOT NULL, -- unix nanos
syncthing_version TEXT NOT NULL COLLATE BINARY,
PRIMARY KEY(schema_version)
) STRICT
;

View File

@@ -0,0 +1,62 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- Files
--
-- The files table contains all files announced by any device. Files present
-- on this device are filed under the LocalDeviceID, not the actual current
-- device ID, for simplicity, consistency and portability. One announced
-- version of each file is considered the "global" version - the latest one,
-- that all other devices strive to replicate. This instance gets the Global
-- flag bit set. There may be other identical instances of this file
-- announced by other devices, but only one onstance gets the Global flag;
-- this simplifies accounting. If the current device has the Global version,
-- the LocalDeviceID instance of the file is the one that has the Global
-- bit.
--
-- If the current device does not have that version of the file it gets the
-- Need bit set. Only Global files announced by another device can have the
-- Need bit. This allows for very efficient lookup of files needing handling
-- on this device, which is a common query.
CREATE TABLE IF NOT EXISTS files (
folder_idx INTEGER NOT NULL,
device_idx INTEGER NOT NULL, -- actual device ID or LocalDeviceID
sequence INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, -- our local database sequence, for each and every entry
remote_sequence INTEGER, -- remote device's sequence number, null for local or synthetic entries
name TEXT NOT NULL COLLATE BINARY,
type INTEGER NOT NULL, -- protocol.FileInfoType
modified INTEGER NOT NULL, -- Unix nanos
size INTEGER NOT NULL,
version TEXT NOT NULL COLLATE BINARY,
deleted INTEGER NOT NULL, -- boolean
invalid INTEGER NOT NULL, -- boolean
local_flags INTEGER NOT NULL,
blocklist_hash BLOB, -- null when there are no blocks
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE,
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
) STRICT
;
-- FileInfos store the actual protobuf object. We do this separately to keep
-- the files rows smaller and more efficient.
CREATE TABLE IF NOT EXISTS fileinfos (
sequence INTEGER NOT NULL PRIMARY KEY, -- our local database sequence from the files table
fiprotobuf BLOB NOT NULL,
FOREIGN KEY(sequence) REFERENCES files(sequence) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
) STRICT
;
-- There can be only one file per folder, device, and remote sequence number
CREATE UNIQUE INDEX IF NOT EXISTS files_remote_sequence ON files (folder_idx, device_idx, remote_sequence)
WHERE remote_sequence IS NOT NULL
;
-- There can be only one file per folder, device, and name
CREATE UNIQUE INDEX IF NOT EXISTS files_device_name ON files (folder_idx, device_idx, name)
;
-- We want to be able to look up & iterate files based on just folder and name
CREATE INDEX IF NOT EXISTS files_name_only ON files (folder_idx, name)
;
-- We want to be able to look up & iterate files based on blocks hash
CREATE INDEX IF NOT EXISTS files_blocklist_hash_only ON files (blocklist_hash, device_idx, folder_idx) WHERE blocklist_hash IS NOT NULL
;

View File

@@ -0,0 +1,24 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- indexids holds the index ID and maximum sequence for a given device and folder
CREATE TABLE IF NOT EXISTS indexids (
device_idx INTEGER NOT NULL,
folder_idx INTEGER NOT NULL,
index_id TEXT NOT NULL COLLATE BINARY,
sequence INTEGER NOT NULL DEFAULT 0,
PRIMARY KEY(device_idx, folder_idx),
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE,
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE
) STRICT, WITHOUT ROWID
;
CREATE TRIGGER IF NOT EXISTS indexids_seq AFTER INSERT ON files
BEGIN
INSERT INTO indexids (folder_idx, device_idx, index_id, sequence)
VALUES (NEW.folder_idx, NEW.device_idx, "", COALESCE(NEW.remote_sequence, NEW.sequence))
ON CONFLICT DO UPDATE SET sequence = COALESCE(NEW.remote_sequence, NEW.sequence);
END
;

View File

@@ -0,0 +1,53 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- Counts
--
-- Counts and sizes are maintained for each device, folder, type, flag bits
-- combination.
CREATE TABLE IF NOT EXISTS counts (
folder_idx INTEGER NOT NULL,
device_idx INTEGER NOT NULL,
type INTEGER NOT NULL,
local_flags INTEGER NOT NULL,
count INTEGER NOT NULL,
size INTEGER NOT NULL,
deleted INTEGER NOT NULL, -- boolean
PRIMARY KEY(folder_idx, device_idx, type, local_flags, deleted),
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE,
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
) STRICT, WITHOUT ROWID
;
--- Maintain counts when files are added and removed using triggers
CREATE TRIGGER IF NOT EXISTS counts_insert AFTER INSERT ON files
BEGIN
INSERT INTO counts (folder_idx, device_idx, type, local_flags, count, size, deleted)
VALUES (NEW.folder_idx, NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
ON CONFLICT DO UPDATE SET count = count + 1, size = size + NEW.size;
END
;
CREATE TRIGGER IF NOT EXISTS counts_delete AFTER DELETE ON files
BEGIN
UPDATE counts SET count = count - 1, size = size - OLD.size
WHERE folder_idx = OLD.folder_idx AND device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
END
;
CREATE TRIGGER IF NOT EXISTS counts_update AFTER UPDATE OF local_flags ON files
WHEN NEW.local_flags != OLD.local_flags
BEGIN
INSERT INTO counts (folder_idx, device_idx, type, local_flags, count, size, deleted)
VALUES (NEW.folder_idx, NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
ON CONFLICT DO UPDATE SET count = count + 1, size = size + NEW.size;
UPDATE counts SET count = count - 1, size = size - OLD.size
WHERE folder_idx = OLD.folder_idx AND device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
END
;
DROP TRIGGER IF EXISTS counts_update_add -- tmp migration
;
DROP TRIGGER IF EXISTS counts_update_del -- tmp migration
;

View File

@@ -0,0 +1,34 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
-- Block lists
--
-- The block lists are extracted from FileInfos and stored separately. This
-- reduces the database size by reusing the same block list entry for all
-- devices announcing the same file. Doing it for all block lists instead of
-- using a size cutoff simplifies queries. Block lists are garbage collected
-- "manually", not using a trigger as that was too performance impacting.
CREATE TABLE IF NOT EXISTS blocklists (
blocklist_hash BLOB NOT NULL PRIMARY KEY,
blprotobuf BLOB NOT NULL
) STRICT
;
-- Blocks
--
-- For all local files we store the blocks individually for quick lookup. A
-- given block can exist in multiple blocklists and at multiple offsets in a
-- blocklist.
CREATE TABLE IF NOT EXISTS blocks (
hash BLOB NOT NULL,
blocklist_hash BLOB NOT NULL,
idx INTEGER NOT NULL,
offset INTEGER NOT NULL,
size INTEGER NOT NULL,
PRIMARY KEY (hash, blocklist_hash, idx),
FOREIGN KEY(blocklist_hash) REFERENCES blocklists(blocklist_hash) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
) STRICT
;

View File

@@ -0,0 +1,16 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
--- Backing for the MtimeFS
CREATE TABLE IF NOT EXISTS mtimes (
folder_idx INTEGER NOT NULL,
name TEXT NOT NULL,
ondisk INTEGER NOT NULL, -- unix nanos
virtual INTEGER NOT NULL, -- unix nanos
PRIMARY KEY(folder_idx, name),
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
) STRICT, WITHOUT ROWID
;

View File

@@ -0,0 +1,13 @@
-- Copyright (C) 2025 The Syncthing Authors.
--
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
-- You can obtain one at https://mozilla.org/MPL/2.0/.
--- Simple KV store. This backs the "miscDB" we use for certain minor pieces
-- of data.
CREATE TABLE IF NOT EXISTS kv (
key TEXT NOT NULL PRIMARY KEY COLLATE BINARY,
value BLOB NOT NULL
) STRICT
;

117
internal/db/sqlite/util.go Normal file
View File

@@ -0,0 +1,117 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package sqlite
import (
"database/sql/driver"
"errors"
"iter"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/internal/gen/bep"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"google.golang.org/protobuf/proto"
)
// iterStructs returns an iterator over the given struct type by scanning
// the SQL rows. `rows` is closed when the iterator exits.
func iterStructs[T any](rows *sqlx.Rows, err error) (iter.Seq[T], func() error) {
if err != nil {
return func(_ func(T) bool) {}, func() error { return err }
}
var retErr error
return func(yield func(T) bool) {
defer rows.Close()
for rows.Next() {
v := new(T)
if err := rows.StructScan(v); err != nil {
retErr = err
break
}
if cleanuper, ok := any(v).(interface{ cleanup() }); ok {
cleanuper.cleanup()
}
if !yield(*v) {
return
}
}
if err := rows.Err(); err != nil && retErr == nil {
retErr = err
}
}, func() error { return retErr }
}
// dbVector is a wrapper that allows protocol.Vector values to be serialized
// to and from the database.
type dbVector struct { //nolint:recvcheck
protocol.Vector
}
func (v dbVector) Value() (driver.Value, error) {
return v.String(), nil
}
func (v *dbVector) Scan(value any) error {
str, ok := value.(string)
if !ok {
return errors.New("not a string")
}
if str == "" {
v.Vector = protocol.Vector{}
return nil
}
vec, err := protocol.VectorFromString(str)
if err != nil {
return wrap(err)
}
v.Vector = vec
return nil
}
// indirectFI constructs a FileInfo from separate marshalled FileInfo and
// BlockList bytes.
type indirectFI struct {
Name string // not used, must be present as dest for Need iterator
FiProtobuf []byte
BlProtobuf []byte
Size int64 // not used
Modified int64 // not used
}
func (i indirectFI) FileInfo() (protocol.FileInfo, error) {
var fi bep.FileInfo
if err := proto.Unmarshal(i.FiProtobuf, &fi); err != nil {
return protocol.FileInfo{}, wrap(err, "unmarshal fileinfo")
}
if len(i.BlProtobuf) > 0 {
var bl dbproto.BlockList
if err := proto.Unmarshal(i.BlProtobuf, &bl); err != nil {
return protocol.FileInfo{}, wrap(err, "unmarshal blocklist")
}
fi.Blocks = bl.Blocks
}
fi.Name = osutil.NativeFilename(fi.Name)
return protocol.FileInfoFromDB(&fi), nil
}
func prefixEnd(s string) string {
if s == "" {
panic("bug: cannot represent end prefix for empty string")
}
bs := []byte(s)
for i := len(bs) - 1; i >= 0; i-- {
if bs[i] < 0xff {
bs[i]++
break
}
}
return string(bs)
}

140
internal/db/typed.go Normal file
View File

@@ -0,0 +1,140 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"database/sql"
"encoding/binary"
"errors"
"time"
)
// Typed is a simple key-value store using a specific namespace within a
// lower level KV.
type Typed struct {
db KV
prefix string
}
func NewMiscDB(db KV) *Typed {
return NewTyped(db, "misc")
}
// NewTyped returns a new typed key-value store that lives in the namespace
// specified by the prefix.
func NewTyped(db KV, prefix string) *Typed {
return &Typed{
db: db,
prefix: prefix,
}
}
// PutInt64 stores a new int64. Any existing value (even if of another type)
// is overwritten.
func (n *Typed) PutInt64(key string, val int64) error {
var valBs [8]byte
binary.BigEndian.PutUint64(valBs[:], uint64(val))
return n.db.PutKV(n.prefixedKey(key), valBs[:])
}
// Int64 returns the stored value interpreted as an int64 and a boolean that
// is false if no value was stored at the key.
func (n *Typed) Int64(key string) (int64, bool, error) {
valBs, err := n.db.GetKV(n.prefixedKey(key))
if err != nil {
return 0, false, filterNotFound(err)
}
val := binary.BigEndian.Uint64(valBs)
return int64(val), true, nil
}
// PutTime stores a new time.Time. Any existing value (even if of another
// type) is overwritten.
func (n *Typed) PutTime(key string, val time.Time) error {
valBs, _ := val.MarshalBinary() // never returns an error
return n.db.PutKV(n.prefixedKey(key), valBs)
}
// Time returns the stored value interpreted as a time.Time and a boolean
// that is false if no value was stored at the key.
func (n *Typed) Time(key string) (time.Time, bool, error) {
var t time.Time
valBs, err := n.db.GetKV(n.prefixedKey(key))
if err != nil {
return t, false, filterNotFound(err)
}
err = t.UnmarshalBinary(valBs)
return t, err == nil, err
}
// PutString stores a new string. Any existing value (even if of another type)
// is overwritten.
func (n *Typed) PutString(key, val string) error {
return n.db.PutKV(n.prefixedKey(key), []byte(val))
}
// String returns the stored value interpreted as a string and a boolean that
// is false if no value was stored at the key.
func (n *Typed) String(key string) (string, bool, error) {
valBs, err := n.db.GetKV(n.prefixedKey(key))
if err != nil {
return "", false, filterNotFound(err)
}
return string(valBs), true, nil
}
// PutBytes stores a new byte slice. Any existing value (even if of another type)
// is overwritten.
func (n *Typed) PutBytes(key string, val []byte) error {
return n.db.PutKV(n.prefixedKey(key), val)
}
// Bytes returns the stored value as a raw byte slice and a boolean that
// is false if no value was stored at the key.
func (n *Typed) Bytes(key string) ([]byte, bool, error) {
valBs, err := n.db.GetKV(n.prefixedKey(key))
if err != nil {
return nil, false, filterNotFound(err)
}
return valBs, true, nil
}
// PutBool stores a new boolean. Any existing value (even if of another type)
// is overwritten.
func (n *Typed) PutBool(key string, val bool) error {
if val {
return n.db.PutKV(n.prefixedKey(key), []byte{0x0})
}
return n.db.PutKV(n.prefixedKey(key), []byte{0x1})
}
// Bool returns the stored value as a boolean and a boolean that
// is false if no value was stored at the key.
func (n *Typed) Bool(key string) (bool, bool, error) {
valBs, err := n.db.GetKV(n.prefixedKey(key))
if err != nil {
return false, false, filterNotFound(err)
}
return valBs[0] == 0x0, true, nil
}
// Delete deletes the specified key. It is allowed to delete a nonexistent
// key.
func (n *Typed) Delete(key string) error {
return n.db.DeleteKV(n.prefixedKey(key))
}
func (n *Typed) prefixedKey(key string) string {
return n.prefix + "/" + key
}
func filterNotFound(err error) error {
if errors.Is(err, sql.ErrNoRows) {
return nil
}
return err
}

115
internal/db/typed_test.go Normal file
View File

@@ -0,0 +1,115 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db_test
import (
"testing"
"time"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/db/sqlite"
)
func TestNamespacedInt(t *testing.T) {
t.Parallel()
ldb, err := sqlite.OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
ldb.Close()
})
n1 := db.NewTyped(ldb, "foo")
n2 := db.NewTyped(ldb, "bar")
t.Run("Int", func(t *testing.T) {
t.Parallel()
// Key is missing to start with
if v, ok, err := n1.Int64("testint"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
if err := n1.PutInt64("testint", 42); err != nil {
t.Fatal(err)
}
// It should now exist in n1
if v, ok, err := n1.Int64("testint"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 42 || !ok {
t.Errorf("Incorrect return v %v != 42 || ok %v != true", v, ok)
}
// ... but not in n2, which is in a different namespace
if v, ok, err := n2.Int64("testint"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
if err := n1.Delete("testint"); err != nil {
t.Fatal(err)
}
// It should no longer exist
if v, ok, err := n1.Int64("testint"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
})
t.Run("Time", func(t *testing.T) {
t.Parallel()
if v, ok, err := n1.Time("testtime"); err != nil {
t.Error("Unexpected error:", err)
} else if !v.IsZero() || ok {
t.Errorf("Incorrect return v %v != %v || ok %v != false", v, time.Time{}, ok)
}
now := time.Now()
if err := n1.PutTime("testtime", now); err != nil {
t.Fatal(err)
}
if v, ok, err := n1.Time("testtime"); err != nil {
t.Error("Unexpected error:", err)
} else if !v.Equal(now) || !ok {
t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok)
}
})
t.Run("String", func(t *testing.T) {
t.Parallel()
if v, ok, err := n1.String("teststring"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "" || ok {
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
}
if err := n1.PutString("teststring", "yo"); err != nil {
t.Fatal(err)
}
if v, ok, err := n1.String("teststring"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "yo" || !ok {
t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok)
}
})
}

View File

@@ -0,0 +1,83 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package itererr
import "iter"
// Collect returns a slice of the items from the iterator, plus the error if
// any.
func Collect[T any](it iter.Seq[T], errFn func() error) ([]T, error) {
var s []T
for v := range it {
s = append(s, v)
}
return s, errFn()
}
// Zip interleaves the iterator value with the error. The iteration ends
// after a non-nil error.
func Zip[T any](it iter.Seq[T], errFn func() error) iter.Seq2[T, error] {
return func(yield func(T, error) bool) {
for v := range it {
if !yield(v, nil) {
break
}
}
if err := errFn(); err != nil {
var zero T
yield(zero, err)
}
}
}
// Map returns a new iterator by applying the map function, while respecting
// the error function. Additionally, the map function can return an error if
// its own.
func Map[A, B any](i iter.Seq[A], errFn func() error, mapFn func(A) (B, error)) (iter.Seq[B], func() error) {
var retErr error
return func(yield func(B) bool) {
for v := range i {
mapped, err := mapFn(v)
if err != nil {
retErr = err
return
}
if !yield(mapped) {
return
}
}
}, func() error {
if prevErr := errFn(); prevErr != nil {
return prevErr
}
return retErr
}
}
// Map returns a new iterator by applying the map function, while respecting
// the error function. Additionally, the map function can return an error if
// its own.
func Map2[A, B, C any](i iter.Seq[A], errFn func() error, mapFn func(A) (B, C, error)) (iter.Seq2[B, C], func() error) {
var retErr error
return func(yield func(B, C) bool) {
for v := range i {
ma, mb, err := mapFn(v)
if err != nil {
retErr = err
return
}
if !yield(ma, mb) {
return
}
}
}, func() error {
if prevErr := errFn(); prevErr != nil {
return prevErr
}
return retErr
}
}

View File

@@ -1,3 +1,9 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package protoutil
import (

View File

@@ -0,0 +1,27 @@
// Copyright (C) 2025 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package timeutil
import (
"sync/atomic"
"time"
)
var prevNanos atomic.Int64
// StrictlyMonotonicNanos returns the current time in Unix nanoseconds.
// Guaranteed to strictly increase for each call, regardless of the
// underlying OS timer resolution or clock jumps.
func StrictlyMonotonicNanos() int64 {
for {
old := prevNanos.Load()
now := max(time.Now().UnixNano(), old+1)
if prevNanos.CompareAndSwap(old, now) {
return now
}
}
}

View File

@@ -40,10 +40,10 @@ import (
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/connections"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
@@ -91,7 +91,7 @@ type service struct {
startupErr error
listenerAddr net.Addr
exitChan chan *svcutil.FatalErr
miscDB *db.NamespacedKV
miscDB *db.Typed
shutdownTimeout time.Duration
guiErrors logger.Recorder
@@ -106,7 +106,7 @@ type Service interface {
WaitForStart() error
}
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, noUpgrade bool, miscDB *db.NamespacedKV) Service {
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, noUpgrade bool, miscDB *db.Typed) Service {
return &service{
id: id,
cfg: cfg,
@@ -984,16 +984,11 @@ func (s *service) getDBFile(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
mtimeMapping, mtimeErr := s.model.GetMtimeMapping(folder, file)
sendJSON(w, map[string]interface{}{
"global": jsonFileInfo(gf),
"local": jsonFileInfo(lf),
"availability": av,
"mtime": map[string]interface{}{
"err": mtimeErr,
"value": mtimeMapping,
},
})
}
@@ -1002,28 +997,14 @@ func (s *service) getDebugFile(w http.ResponseWriter, r *http.Request) {
folder := qs.Get("folder")
file := qs.Get("file")
snap, err := s.model.DBSnapshot(folder)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
mtimeMapping, mtimeErr := s.model.GetMtimeMapping(folder, file)
lf, _ := snap.Get(protocol.LocalDeviceID, file)
gf, _ := snap.GetGlobal(file)
av := snap.Availability(file)
vl := snap.DebugGlobalVersions(file)
lf, _, _ := s.model.CurrentFolderFile(folder, file)
gf, _, _ := s.model.CurrentGlobalFile(folder, file)
av, _ := s.model.Availability(folder, protocol.FileInfo{Name: file}, protocol.BlockInfo{})
sendJSON(w, map[string]interface{}{
"global": jsonFileInfo(gf),
"local": jsonFileInfo(lf),
"availability": av,
"globalVersions": vl.String(),
"mtime": map[string]interface{}{
"err": mtimeErr,
"value": mtimeMapping,
},
"global": jsonFileInfo(gf),
"local": jsonFileInfo(lf),
"availability": av,
})
}

View File

@@ -10,10 +10,9 @@ import (
"testing"
"time"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/db/sqlite"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
)
var guiCfg config.GUIConfiguration
@@ -131,8 +130,14 @@ func (c *mockClock) wind(t time.Duration) {
func TestTokenManager(t *testing.T) {
t.Parallel()
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
kdb := db.NewNamespacedKV(mdb, "test")
mdb, err := sqlite.OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
mdb.Close()
})
kdb := db.NewMiscDB(mdb)
clock := &mockClock{now: time.Now()}
// Token manager keeps up to three tokens with a validity time of 24 hours.

View File

@@ -11,7 +11,7 @@ import (
"strings"
"time"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/internal/db"
)
const (
@@ -34,7 +34,7 @@ type apiKeyValidator interface {
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
// the request with 403. For / and /index.html, set a new CSRF cookie if none
// is currently set.
func newCsrfManager(unique string, prefix string, apiKeyValidator apiKeyValidator, next http.Handler, miscDB *db.NamespacedKV) *csrfManager {
func newCsrfManager(unique string, prefix string, apiKeyValidator apiKeyValidator, next http.Handler, miscDB *db.Typed) *csrfManager {
m := &csrfManager{
unique: unique,
prefix: prefix,

View File

@@ -27,12 +27,12 @@ import (
"github.com/d4l3k/messagediff"
"github.com/thejerf/suture/v4"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/db/sqlite"
"github.com/syncthing/syncthing/lib/assets"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
connmocks "github.com/syncthing/syncthing/lib/connections/mocks"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend"
discovermocks "github.com/syncthing/syncthing/lib/discover/mocks"
"github.com/syncthing/syncthing/lib/events"
eventmocks "github.com/syncthing/syncthing/lib/events/mocks"
@@ -84,8 +84,14 @@ func TestStopAfterBrokenConfig(t *testing.T) {
}
w := config.Wrap("/dev/null", cfg, protocol.LocalDeviceID, events.NoopLogger)
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
kdb := db.NewMiscDataNamespace(mdb)
mdb, err := sqlite.OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
mdb.Close()
})
kdb := db.NewMiscDB(mdb)
srv := New(protocol.LocalDeviceID, w, "", "syncthing", nil, nil, nil, events.NoopLogger, nil, nil, nil, nil, nil, nil, false, kdb).(*service)
srv.started = make(chan string)
@@ -217,11 +223,7 @@ type httpTestCase struct {
func TestAPIServiceRequests(t *testing.T) {
t.Parallel()
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
t.Fatal(err)
}
t.Cleanup(cancel)
baseURL := startHTTP(t, apiCfg)
cases := []httpTestCase{
// /rest/db
@@ -598,11 +600,7 @@ func TestHTTPLogin(t *testing.T) {
APIKey: testAPIKey,
SendBasicAuthPrompt: sendBasicAuthPrompt,
})
baseURL, cancel, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
t.Cleanup(cancel)
baseURL := startHTTP(t, cfg)
url := baseURL + path
t.Run(fmt.Sprintf("%d path", expectedOkStatus), func(t *testing.T) {
@@ -795,13 +793,9 @@ func TestHTTPLogin(t *testing.T) {
w := initConfig(initialPassword, t)
{
baseURL, cancel, err := startHTTPWithShutdownTimeout(w, shutdownTimeout)
baseURL := startHTTPWithShutdownTimeout(t, w, shutdownTimeout)
cfgPath := baseURL + "/rest/config"
path := baseURL + "/meta.js"
t.Cleanup(cancel)
if err != nil {
t.Fatal(err)
}
resp := httpGetBasicAuth(path, "user", initialPassword)
if resp.StatusCode != http.StatusOK {
@@ -813,12 +807,8 @@ func TestHTTPLogin(t *testing.T) {
httpRequest(http.MethodPut, cfgPath, cfg, "", "", testAPIKey, "", "", "", nil, t)
}
{
baseURL, cancel, err := startHTTP(w)
baseURL := startHTTP(t, w)
path := baseURL + "/meta.js"
t.Cleanup(cancel)
if err != nil {
t.Fatal(err)
}
resp := httpGetBasicAuth(path, "user", initialPassword)
if resp.StatusCode != http.StatusForbidden {
@@ -837,13 +827,9 @@ func TestHTTPLogin(t *testing.T) {
w := initConfig(initialPassword, t)
{
baseURL, cancel, err := startHTTPWithShutdownTimeout(w, shutdownTimeout)
baseURL := startHTTPWithShutdownTimeout(t, w, shutdownTimeout)
cfgPath := baseURL + "/rest/config/gui"
path := baseURL + "/meta.js"
t.Cleanup(cancel)
if err != nil {
t.Fatal(err)
}
resp := httpGetBasicAuth(path, "user", initialPassword)
if resp.StatusCode != http.StatusOK {
@@ -855,12 +841,8 @@ func TestHTTPLogin(t *testing.T) {
httpRequest(http.MethodPut, cfgPath, cfg.GUI, "", "", testAPIKey, "", "", "", nil, t)
}
{
baseURL, cancel, err := startHTTP(w)
baseURL := startHTTP(t, w)
path := baseURL + "/meta.js"
t.Cleanup(cancel)
if err != nil {
t.Fatal(err)
}
resp := httpGetBasicAuth(path, "user", initialPassword)
if resp.StatusCode != http.StatusForbidden {
@@ -885,11 +867,7 @@ func TestHtmlFormLogin(t *testing.T) {
Password: "$2a$10$IdIZTxTg/dCNuNEGlmLynOjqg4B1FvDKuIV5e0BB3pnWVHNb8.GSq", // bcrypt of "räksmörgås" in UTF-8
SendBasicAuthPrompt: false,
})
baseURL, cancel, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
t.Cleanup(cancel)
baseURL := startHTTP(t, cfg)
loginUrl := baseURL + "/rest/noauth/auth/password"
resourceUrl := baseURL + "/meta.js"
@@ -1030,11 +1008,7 @@ func TestApiCache(t *testing.T) {
RawAddress: "127.0.0.1:0",
APIKey: testAPIKey,
})
baseURL, cancel, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
t.Cleanup(cancel)
baseURL := startHTTP(t, cfg)
httpGet := func(url string, bearer string) *http.Response {
return httpGet(url, "", "", "", bearer, nil, t)
@@ -1059,11 +1033,11 @@ func TestApiCache(t *testing.T) {
})
}
func startHTTP(cfg config.Wrapper) (string, context.CancelFunc, error) {
return startHTTPWithShutdownTimeout(cfg, 0)
func startHTTP(t *testing.T, cfg config.Wrapper) string {
return startHTTPWithShutdownTimeout(t, cfg, 0)
}
func startHTTPWithShutdownTimeout(cfg config.Wrapper, shutdownTimeout time.Duration) (string, context.CancelFunc, error) {
func startHTTPWithShutdownTimeout(t *testing.T, cfg config.Wrapper, shutdownTimeout time.Duration) string {
m := new(modelmocks.Model)
assetDir := "../../gui"
eventSub := new(eventmocks.BufferedSubscription)
@@ -1086,12 +1060,18 @@ func startHTTPWithShutdownTimeout(cfg config.Wrapper, shutdownTimeout time.Durat
// Instantiate the API service
urService := ur.New(cfg, m, connections, false)
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
kdb := db.NewMiscDataNamespace(mdb)
mdb, err := sqlite.OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
mdb.Close()
})
kdb := db.NewMiscDB(mdb)
svc := New(protocol.LocalDeviceID, cfg, assetDir, "syncthing", m, eventSub, diskEventSub, events.NoopLogger, discoverer, connections, urService, mockedSummary, errorLog, systemLog, false, kdb).(*service)
svc.started = addrChan
if shutdownTimeout > 0*time.Millisecond {
if shutdownTimeout > 0 {
svc.shutdownTimeout = shutdownTimeout
}
@@ -1101,14 +1081,14 @@ func startHTTPWithShutdownTimeout(cfg config.Wrapper, shutdownTimeout time.Durat
})
supervisor.Add(svc)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
supervisor.ServeBackground(ctx)
// Make sure the API service is listening, and get the URL to use.
addr := <-addrChan
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
cancel()
return "", cancel, fmt.Errorf("weird address from API service: %w", err)
t.Fatal(fmt.Errorf("weird address from API service: %w", err))
}
host, _, _ := net.SplitHostPort(cfg.GUI().RawAddress)
@@ -1117,17 +1097,13 @@ func startHTTPWithShutdownTimeout(cfg config.Wrapper, shutdownTimeout time.Durat
}
baseURL := fmt.Sprintf("http://%s", net.JoinHostPort(host, strconv.Itoa(tcpAddr.Port)))
return baseURL, cancel, nil
return baseURL
}
func TestCSRFRequired(t *testing.T) {
t.Parallel()
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
t.Fatal("Unexpected error from getting base URL:", err)
}
t.Cleanup(cancel)
baseURL := startHTTP(t, apiCfg)
cli := &http.Client{
Timeout: time.Minute,
@@ -1245,11 +1221,7 @@ func TestCSRFRequired(t *testing.T) {
func TestRandomString(t *testing.T) {
t.Parallel()
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL := startHTTP(t, apiCfg)
cli := &http.Client{
Timeout: time.Second,
}
@@ -1304,7 +1276,7 @@ func TestConfigPostOK(t *testing.T) {
]
}`))
resp, err := testConfigPost(cfg)
resp, err := testConfigPost(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -1325,7 +1297,7 @@ func TestConfigPostDupFolder(t *testing.T) {
]
}`))
resp, err := testConfigPost(cfg)
resp, err := testConfigPost(t, cfg)
if err != nil {
t.Fatal(err)
}
@@ -1334,12 +1306,10 @@ func TestConfigPostDupFolder(t *testing.T) {
}
}
func testConfigPost(data io.Reader) (*http.Response, error) {
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
return nil, err
}
defer cancel()
func testConfigPost(t *testing.T, data io.Reader) (*http.Response, error) {
t.Helper()
baseURL := startHTTP(t, apiCfg)
cli := &http.Client{
Timeout: time.Second,
}
@@ -1356,11 +1326,7 @@ func TestHostCheck(t *testing.T) {
cfg := newMockedConfig()
cfg.GUIReturns(config.GUIConfiguration{RawAddress: "127.0.0.1:0"})
baseURL, cancel, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL := startHTTP(t, cfg)
// A normal HTTP get to the localhost-bound service should succeed
@@ -1419,11 +1385,7 @@ func TestHostCheck(t *testing.T) {
RawAddress: "127.0.0.1:0",
InsecureSkipHostCheck: true,
})
baseURL, cancel, err = startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL = startHTTP(t, cfg)
// A request with a suspicious Host header should be allowed
@@ -1445,11 +1407,7 @@ func TestHostCheck(t *testing.T) {
cfg.GUIReturns(config.GUIConfiguration{
RawAddress: "0.0.0.0:0",
})
baseURL, cancel, err = startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL = startHTTP(t, cfg)
// A request with a suspicious Host header should be allowed
@@ -1476,11 +1434,7 @@ func TestHostCheck(t *testing.T) {
cfg.GUIReturns(config.GUIConfiguration{
RawAddress: "[::1]:0",
})
baseURL, cancel, err = startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL = startHTTP(t, cfg)
// A normal HTTP get to the localhost-bound service should succeed
@@ -1568,11 +1522,7 @@ func TestAddressIsLocalhost(t *testing.T) {
func TestAccessControlAllowOriginHeader(t *testing.T) {
t.Parallel()
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL := startHTTP(t, apiCfg)
cli := &http.Client{
Timeout: time.Second,
}
@@ -1596,11 +1546,7 @@ func TestAccessControlAllowOriginHeader(t *testing.T) {
func TestOptionsRequest(t *testing.T) {
t.Parallel()
baseURL, cancel, err := startHTTP(apiCfg)
if err != nil {
t.Fatal(err)
}
defer cancel()
baseURL := startHTTP(t, apiCfg)
cli := &http.Client{
Timeout: time.Second,
}
@@ -1632,8 +1578,14 @@ func TestEventMasks(t *testing.T) {
cfg := newMockedConfig()
defSub := new(eventmocks.BufferedSubscription)
diskSub := new(eventmocks.BufferedSubscription)
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
kdb := db.NewMiscDataNamespace(mdb)
mdb, err := sqlite.OpenTemp()
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
mdb.Close()
})
kdb := db.NewMiscDB(mdb)
svc := New(protocol.LocalDeviceID, cfg, "", "syncthing", nil, defSub, diskSub, events.NoopLogger, nil, nil, nil, nil, nil, nil, false, kdb).(*service)
if mask := svc.getEventMask(""); mask != DefaultEventMask {
@@ -1780,11 +1732,7 @@ func TestConfigChanges(t *testing.T) {
cfgCtx, cfgCancel := context.WithCancel(context.Background())
go w.Serve(cfgCtx)
defer cfgCancel()
baseURL, cancel, err := startHTTP(w)
if err != nil {
t.Fatal("Unexpected error from getting base URL:", err)
}
defer cancel()
baseURL := startHTTP(t, w)
cli := &http.Client{
Timeout: time.Minute,

View File

@@ -14,9 +14,9 @@ import (
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/gen/apiproto"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sync"
@@ -24,7 +24,7 @@ import (
type tokenManager struct {
key string
miscDB *db.NamespacedKV
miscDB *db.Typed
lifetime time.Duration
maxItems int
@@ -35,7 +35,7 @@ type tokenManager struct {
saveTimer *time.Timer
}
func newTokenManager(key string, miscDB *db.NamespacedKV, lifetime time.Duration, maxItems int) *tokenManager {
func newTokenManager(key string, miscDB *db.Typed, lifetime time.Duration, maxItems int) *tokenManager {
var tokens apiproto.TokenSet
if bs, ok, _ := miscDB.Bytes(key); ok {
_ = proto.Unmarshal(bs, &tokens) // best effort
@@ -152,7 +152,7 @@ type tokenCookieManager struct {
tokens *tokenManager
}
func newTokenCookieManager(shortID string, guiCfg config.GUIConfiguration, evLogger events.Logger, miscDB *db.NamespacedKV) *tokenCookieManager {
func newTokenCookieManager(shortID string, guiCfg config.GUIConfiguration, evLogger events.Logger, miscDB *db.Typed) *tokenCookieManager {
return &tokenCookieManager{
cookieName: "sessionid-" + shortID,
shortID: shortID,

View File

@@ -18,7 +18,7 @@ import (
"time"
)
const Codename = "Gold Grasshopper"
const Codename = "Hafnium Hornet"
var (
// Injected by build script
@@ -28,6 +28,9 @@ var (
Stamp = "0"
Tags = ""
// Added to by other packages
extraTags []string
// Set by init()
Date time.Time
IsRelease bool
@@ -43,6 +46,11 @@ var (
"STNORESTART",
"STNOUPGRADE",
}
replaceTags = map[string]string{
"sqlite_omit_load_extension": "",
"osusergo": "",
"netgo": "",
}
)
const versionExtraAllowedChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-. "
@@ -108,8 +116,23 @@ func TagsList() []string {
if Extra != "" {
tags = append(tags, Extra)
}
tags = append(tags, extraTags...)
// Replace any tag values we want to have more user friendly versions,
// or be removed
for i, tag := range tags {
if repl, ok := replaceTags[tag]; ok {
tags[i] = repl
}
}
sort.Strings(tags)
// Remove any empty tags, which will be at the front of the list now
for len(tags) > 0 && tags[0] == "" {
tags = tags[1:]
}
return tags
}
@@ -124,3 +147,8 @@ func filterString(s, allowedChars string) string {
}
return res.String()
}
func AddTag(tag string) {
extraTags = append(extraTags, tag)
LongVersion = LongVersionFor("syncthing")
}

View File

@@ -484,7 +484,7 @@ func TestIssue1262(t *testing.T) {
t.Fatal(err)
}
actual := cfg.Folders()["test"].Filesystem(nil).URI()
actual := cfg.Folders()["test"].Filesystem().URI()
expected := `e:\`
if actual != expected {
@@ -521,7 +521,7 @@ func TestFolderPath(t *testing.T) {
Path: "~/tmp",
}
realPath := folder.Filesystem(nil).URI()
realPath := folder.Filesystem().URI()
if !filepath.IsAbs(realPath) {
t.Error(realPath, "should be absolute")
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/shirou/gopsutil/v4/disk"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol"
)
@@ -119,26 +118,24 @@ func (f FolderConfiguration) Copy() FolderConfiguration {
// Filesystem creates a filesystem for the path and options of this folder.
// The fset parameter may be nil, in which case no mtime handling on top of
// the filesystem is provided.
func (f FolderConfiguration) Filesystem(fset *db.FileSet) fs.Filesystem {
func (f FolderConfiguration) Filesystem(extraOpts ...fs.Option) fs.Filesystem {
// This is intentionally not a pointer method, because things like
// cfg.Folders["default"].Filesystem(nil) should be valid.
opts := make([]fs.Option, 0, 3)
var opts []fs.Option
if f.FilesystemType == FilesystemTypeBasic && f.JunctionsAsDirs {
opts = append(opts, new(fs.OptionJunctionsAsDirs))
}
if !f.CaseSensitiveFS {
opts = append(opts, new(fs.OptionDetectCaseConflicts))
}
if fset != nil {
opts = append(opts, fset.MtimeOption())
}
opts = append(opts, extraOpts...)
return fs.NewFilesystem(f.FilesystemType.ToFS(), f.Path, opts...)
}
func (f FolderConfiguration) ModTimeWindow() time.Duration {
dur := time.Duration(f.RawModTimeWindowS) * time.Second
if f.RawModTimeWindowS < 1 && build.IsAndroid {
if usage, err := disk.Usage(f.Filesystem(nil).URI()); err != nil {
if usage, err := disk.Usage(f.Filesystem().URI()); err != nil {
dur = 2 * time.Second
l.Debugf(`Detecting FS at "%v" on android: Setting mtime window to 2s: err == "%v"`, f.Path, err)
} else if strings.HasPrefix(strings.ToLower(usage.Fstype), "ext2") || strings.HasPrefix(strings.ToLower(usage.Fstype), "ext3") || strings.HasPrefix(strings.ToLower(usage.Fstype), "ext4") {
@@ -162,7 +159,7 @@ func (f *FolderConfiguration) CreateMarker() error {
return nil
}
ffs := f.Filesystem(nil)
ffs := f.Filesystem()
// Create the marker as a directory
err := ffs.Mkdir(DefaultMarkerName, 0o755)
@@ -189,7 +186,7 @@ func (f *FolderConfiguration) CreateMarker() error {
}
func (f *FolderConfiguration) RemoveMarker() error {
ffs := f.Filesystem(nil)
ffs := f.Filesystem()
_ = ffs.Remove(filepath.Join(DefaultMarkerName, f.markerFilename()))
return ffs.Remove(DefaultMarkerName)
}
@@ -209,7 +206,7 @@ func (f *FolderConfiguration) markerContents() []byte {
// CheckPath returns nil if the folder root exists and contains the marker file
func (f *FolderConfiguration) CheckPath() error {
return f.checkFilesystemPath(f.Filesystem(nil), ".")
return f.checkFilesystemPath(f.Filesystem(), ".")
}
func (f *FolderConfiguration) checkFilesystemPath(ffs fs.Filesystem, path string) error {
@@ -252,7 +249,7 @@ func (f *FolderConfiguration) CreateRoot() (err error) {
permBits = 0o700
}
filesystem := f.Filesystem(nil)
filesystem := f.Filesystem()
if _, err = filesystem.Stat("."); fs.IsNotExist(err) {
err = filesystem.MkdirAll(".", permBits)
@@ -363,7 +360,7 @@ func (f *FolderConfiguration) CheckAvailableSpace(req uint64) error {
if val <= 0 {
return nil
}
fs := f.Filesystem(nil)
fs := f.Filesystem()
usage, err := fs.Usage(".")
if err != nil {
return nil //nolint: nilerr

View File

@@ -208,7 +208,7 @@ func migrateToConfigV23(cfg *Configuration) {
// marker name in later versions.
for i := range cfg.Folders {
fs := cfg.Folders[i].Filesystem(nil)
fs := cfg.Folders[i].Filesystem()
// Invalid config posted, or tests.
if fs == nil {
continue
@@ -244,18 +244,18 @@ func migrateToConfigV21(cfg *Configuration) {
switch folder.Versioning.Type {
case "simple", "trashcan":
// Clean out symlinks in the known place
cleanSymlinks(folder.Filesystem(nil), ".stversions")
cleanSymlinks(folder.Filesystem(), ".stversions")
case "staggered":
versionDir := folder.Versioning.Params["versionsPath"]
if versionDir == "" {
// default place
cleanSymlinks(folder.Filesystem(nil), ".stversions")
cleanSymlinks(folder.Filesystem(), ".stversions")
} else if filepath.IsAbs(versionDir) {
// absolute
cleanSymlinks(fs.NewFilesystem(fs.FilesystemTypeBasic, versionDir), ".")
} else {
// relative to folder
cleanSymlinks(folder.Filesystem(nil), versionDir)
cleanSymlinks(folder.Filesystem(), versionDir)
}
}
}

View File

@@ -61,7 +61,6 @@ type OptionsConfiguration struct {
StunKeepaliveStartS int `json:"stunKeepaliveStartS" xml:"stunKeepaliveStartS" default:"180"`
StunKeepaliveMinS int `json:"stunKeepaliveMinS" xml:"stunKeepaliveMinS" default:"20"`
RawStunServers []string `json:"stunServers" xml:"stunServer" default:"default"`
DatabaseTuning Tuning `json:"databaseTuning" xml:"databaseTuning" restart:"true"`
RawMaxCIRequestKiB int `json:"maxConcurrentIncomingRequestKiB" xml:"maxConcurrentIncomingRequestKiB"`
AnnounceLANAddresses bool `json:"announceLANAddresses" xml:"announceLANAddresses" default:"true"`
SendFullIndexOnUpgrade bool `json:"sendFullIndexOnUpgrade" xml:"sendFullIndexOnUpgrade"`

View File

@@ -1,46 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package config
type Tuning int32
const (
TuningAuto Tuning = 0
TuningSmall Tuning = 1
TuningLarge Tuning = 2
)
func (t Tuning) String() string {
switch t {
case TuningAuto:
return "auto"
case TuningSmall:
return "small"
case TuningLarge:
return "large"
default:
return "unknown"
}
}
func (t Tuning) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
func (t *Tuning) UnmarshalText(bs []byte) error {
switch string(bs) {
case "auto":
*t = TuningAuto
case "small":
*t = TuningSmall
case "large":
*t = TuningLarge
default:
*t = TuningAuto
}
return nil
}

View File

@@ -1,26 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package config_test
import (
"testing"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db/backend"
)
func TestTuningMatches(t *testing.T) {
if int(config.TuningAuto) != int(backend.TuningAuto) {
t.Error("mismatch for TuningAuto")
}
if int(config.TuningSmall) != int(backend.TuningSmall) {
t.Error("mismatch for TuningSmall")
}
if int(config.TuningLarge) != int(backend.TuningLarge) {
t.Error("mismatch for TuningLarge")
}
}

2
lib/db/.gitignore vendored
View File

@@ -1,2 +0,0 @@
!*.zip
testdata/*.db

View File

@@ -1,76 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import "testing"
// testBackendBehavior is the generic test suite that must be fulfilled by
// every backend implementation. It should be called by each implementation
// as (part of) their test suite.
func testBackendBehavior(t *testing.T, open func() Backend) {
t.Run("WriteIsolation", func(t *testing.T) { testWriteIsolation(t, open) })
t.Run("DeleteNonexisten", func(t *testing.T) { testDeleteNonexistent(t, open) })
t.Run("IteratorClosedDB", func(t *testing.T) { testIteratorClosedDB(t, open) })
}
func testWriteIsolation(t *testing.T, open func() Backend) {
// Values written during a transaction should not be read back, our
// updateGlobal depends on this.
db := open()
defer db.Close()
// Sanity check
_ = db.Put([]byte("a"), []byte("a"))
v, _ := db.Get([]byte("a"))
if string(v) != "a" {
t.Fatal("read back should work")
}
// Now in a transaction we should still see the old value
tx, _ := db.NewWriteTransaction()
defer tx.Release()
_ = tx.Put([]byte("a"), []byte("b"))
v, _ = tx.Get([]byte("a"))
if string(v) != "a" {
t.Fatal("read in transaction should read the old value")
}
}
func testDeleteNonexistent(t *testing.T, open func() Backend) {
// Deleting a non-existent key is not an error
db := open()
defer db.Close()
err := db.Delete([]byte("a"))
if err != nil {
t.Error(err)
}
}
// Either creating the iterator or the .Error() method of the returned iterator
// should return an error and IsClosed(err) == true.
func testIteratorClosedDB(t *testing.T, open func() Backend) {
db := open()
_ = db.Put([]byte("a"), []byte("a"))
db.Close()
it, err := db.NewPrefixIterator(nil)
if err != nil {
if !IsClosed(err) {
t.Error("NewPrefixIterator: IsClosed(err) == false:", err)
}
return
}
it.Next()
if err := it.Error(); !IsClosed(err) {
t.Error("Next: IsClosed(err) == false:", err)
}
}

View File

@@ -1,13 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"github.com/syncthing/syncthing/lib/logger"
)
var l = logger.DefaultLogger.NewFacility("backend", "The database backend")

View File

@@ -1,233 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
const (
// Never flush transactions smaller than this, even on Checkpoint().
// This just needs to be just large enough to avoid flushing
// transactions when they are super tiny, thus creating millions of tiny
// transactions unnecessarily.
dbFlushBatchMin = 64 << KiB
// Once a transaction reaches this size, flush it unconditionally. This
// should be large enough to avoid forcing a flush between Checkpoint()
// calls in loops where we do those, so in principle just large enough
// to hold a FileInfo plus corresponding version list and metadata
// updates or two.
dbFlushBatchMax = 1 << MiB
)
// leveldbBackend implements Backend on top of a leveldb
type leveldbBackend struct {
ldb *leveldb.DB
closeWG *closeWaitGroup
location string
}
func newLeveldbBackend(ldb *leveldb.DB, location string) *leveldbBackend {
return &leveldbBackend{
ldb: ldb,
closeWG: &closeWaitGroup{},
location: location,
}
}
func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) {
return b.newSnapshot()
}
func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
rel, err := newReleaser(b.closeWG)
if err != nil {
return leveldbSnapshot{}, err
}
snap, err := b.ldb.GetSnapshot()
if err != nil {
rel.Release()
return leveldbSnapshot{}, wrapLeveldbErr(err)
}
return leveldbSnapshot{
snap: snap,
rel: rel,
}, nil
}
func (b *leveldbBackend) NewWriteTransaction(hooks ...CommitHook) (WriteTransaction, error) {
rel, err := newReleaser(b.closeWG)
if err != nil {
return nil, err
}
snap, err := b.newSnapshot()
if err != nil {
rel.Release()
return nil, err // already wrapped
}
return &leveldbTransaction{
leveldbSnapshot: snap,
ldb: b.ldb,
batch: new(leveldb.Batch),
rel: rel,
commitHooks: hooks,
inFlush: false,
}, nil
}
func (b *leveldbBackend) Close() error {
b.closeWG.CloseWait()
return wrapLeveldbErr(b.ldb.Close())
}
func (b *leveldbBackend) Get(key []byte) ([]byte, error) {
val, err := b.ldb.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (b *leveldbBackend) NewPrefixIterator(prefix []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(util.BytesPrefix(prefix), nil)}, nil
}
func (b *leveldbBackend) NewRangeIterator(first, last []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(&util.Range{Start: first, Limit: last}, nil)}, nil
}
func (b *leveldbBackend) Put(key, val []byte) error {
return wrapLeveldbErr(b.ldb.Put(key, val, nil))
}
func (b *leveldbBackend) Delete(key []byte) error {
return wrapLeveldbErr(b.ldb.Delete(key, nil))
}
func (b *leveldbBackend) Compact() error {
// Race is detected during testing when db is closed while compaction
// is ongoing.
err := b.closeWG.Add(1)
if err != nil {
return err
}
defer b.closeWG.Done()
return wrapLeveldbErr(b.ldb.CompactRange(util.Range{}))
}
func (b *leveldbBackend) Location() string {
return b.location
}
// leveldbSnapshot implements backend.ReadTransaction
type leveldbSnapshot struct {
snap *leveldb.Snapshot
rel *releaser
}
func (l leveldbSnapshot) Get(key []byte) ([]byte, error) {
val, err := l.snap.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (l leveldbSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) {
return l.snap.NewIterator(util.BytesPrefix(prefix), nil), nil
}
func (l leveldbSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) {
return l.snap.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil
}
func (l leveldbSnapshot) Release() {
l.snap.Release()
l.rel.Release()
}
// leveldbTransaction implements backend.WriteTransaction using a batch (not
// an actual leveldb transaction)
type leveldbTransaction struct {
leveldbSnapshot
ldb *leveldb.DB
batch *leveldb.Batch
rel *releaser
commitHooks []CommitHook
inFlush bool
}
func (t *leveldbTransaction) Delete(key []byte) error {
t.batch.Delete(key)
return t.checkFlush(dbFlushBatchMax)
}
func (t *leveldbTransaction) Put(key, val []byte) error {
t.batch.Put(key, val)
return t.checkFlush(dbFlushBatchMax)
}
func (t *leveldbTransaction) Checkpoint() error {
return t.checkFlush(dbFlushBatchMin)
}
func (t *leveldbTransaction) Commit() error {
err := wrapLeveldbErr(t.flush())
t.leveldbSnapshot.Release()
t.rel.Release()
return err
}
func (t *leveldbTransaction) Release() {
t.leveldbSnapshot.Release()
t.rel.Release()
}
// checkFlush flushes and resets the batch if its size exceeds the given size.
func (t *leveldbTransaction) checkFlush(size int) error {
// Hooks might put values in the database, which triggers a checkFlush which might trigger a flush,
// which might trigger the hooks.
// Don't recurse...
if t.inFlush || len(t.batch.Dump()) < size {
return nil
}
return t.flush()
}
func (t *leveldbTransaction) flush() error {
t.inFlush = true
defer func() { t.inFlush = false }()
for _, hook := range t.commitHooks {
if err := hook(t); err != nil {
return err
}
}
if t.batch.Len() == 0 {
return nil
}
if err := t.ldb.Write(t.batch, nil); err != nil {
return wrapLeveldbErr(err)
}
t.batch.Reset()
return nil
}
type leveldbIterator struct {
iterator.Iterator
}
func (it *leveldbIterator) Error() error {
return wrapLeveldbErr(it.Iterator.Error())
}
// wrapLeveldbErr wraps errors so that the backend package can recognize them
func wrapLeveldbErr(err error) error {
switch err {
case leveldb.ErrClosed:
return errClosed
case leveldb.ErrNotFound:
return errNotFound
}
return err
}

View File

@@ -1,231 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
const (
dbMaxOpenFiles = 100
// A large database is > 200 MiB. It's a mostly arbitrary value, but
// it's also the case that each file is 2 MiB by default and when we
// have dbMaxOpenFiles of them we will need to start thrashing fd:s.
// Switching to large database settings causes larger files to be used
// when compacting, reducing the number.
dbLargeThreshold = dbMaxOpenFiles * (2 << MiB)
KiB = 10
MiB = 20
)
// OpenLevelDB attempts to open the database at the given location, and runs
// recovery on it if opening fails. Worst case, if recovery is not possible,
// the database is erased and created from scratch.
func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
opts := optsFor(location, tuning)
ldb, err := open(location, opts)
if err != nil {
return nil, err
}
return newLeveldbBackend(ldb, location), nil
}
// OpenLevelDBAuto is OpenLevelDB with TuningAuto tuning.
func OpenLevelDBAuto(location string) (Backend, error) {
return OpenLevelDB(location, TuningAuto)
}
// OpenLevelDBRO attempts to open the database at the given location, read
// only.
func OpenLevelDBRO(location string) (Backend, error) {
opts := &opt.Options{
OpenFilesCacheCapacity: dbMaxOpenFiles,
ReadOnly: true,
}
ldb, err := open(location, opts)
if err != nil {
return nil, err
}
return newLeveldbBackend(ldb, location), nil
}
// OpenLevelDBMemory returns a new Backend referencing an in-memory database.
func OpenLevelDBMemory() Backend {
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
return newLeveldbBackend(ldb, "")
}
// optsFor returns the database options to use when opening a database with
// the given location and tuning. Settings can be overridden by debug
// environment variables.
func optsFor(location string, tuning Tuning) *opt.Options {
large := false
switch tuning {
case TuningLarge:
large = true
case TuningAuto:
large = dbIsLarge(location)
}
var (
// Set defaults used for small databases.
defaultBlockCacheCapacity = 0 // 0 means let leveldb use default
defaultBlockSize = 0
defaultCompactionTableSize = 0
defaultCompactionTableSizeMultiplier = 0
defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB
defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff
)
if large {
// Change the parameters for better throughput at the price of some
// RAM and larger files. This results in larger batches of writes
// and compaction at a lower frequency.
l.Infoln("Using large-database tuning")
defaultBlockCacheCapacity = 64 << MiB
defaultBlockSize = 64 << KiB
defaultCompactionTableSize = 16 << MiB
defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten
defaultWriteBuffer = 64 << MiB
defaultCompactionL0Trigger = 8 // number of l0 files
}
opts := &opt.Options{
BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity),
BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0,
BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0),
BlockSize: debugEnvValue("BlockSize", defaultBlockSize),
CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0),
CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0),
CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger),
CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0),
CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize),
CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0,
CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0),
CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0,
DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0,
DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0,
DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0,
DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0,
NoSync: debugEnvValue("NoSync", 0) != 0,
NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0,
OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles),
WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer),
// The write slowdown and pause can be overridden, but even if they
// are not and the compaction trigger is overridden we need to
// adjust so that we don't pause writes for L0 compaction before we
// even *start* L0 compaction...
WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
}
return opts
}
func open(location string, opts *opt.Options) (*leveldb.DB, error) {
db, err := leveldb.OpenFile(location, opts)
if leveldbIsCorrupted(err) {
db, err = leveldb.RecoverFile(location, opts)
}
if leveldbIsCorrupted(err) {
// The database is corrupted, and we've tried to recover it but it
// didn't work. At this point there isn't much to do beyond dropping
// the database and reindexing...
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
if err := os.RemoveAll(location); err != nil {
return nil, &errorSuggestion{err, "failed to delete corrupted database"}
}
db, err = leveldb.OpenFile(location, opts)
}
if err != nil {
return nil, &errorSuggestion{err, "is another instance of Syncthing running?"}
}
if debugEnvValue("CompactEverything", 0) != 0 {
if err := db.CompactRange(util.Range{}); err != nil {
l.Warnln("Compacting database:", err)
}
}
return db, nil
}
func debugEnvValue(key string, def int) int {
v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63)
if err != nil {
return def
}
return int(v)
}
// A "better" version of leveldb's errors.IsCorrupted.
func leveldbIsCorrupted(err error) bool {
switch {
case err == nil:
return false
case errors.IsCorrupted(err):
return true
case strings.Contains(err.Error(), "corrupted"):
return true
}
return false
}
// dbIsLarge returns whether the estimated size of the database at location
// is large enough to warrant optimization for large databases.
func dbIsLarge(location string) bool {
if ^uint(0)>>63 == 0 {
// We're compiled for a 32 bit architecture. We've seen trouble with
// large settings there.
// (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842)
return false
}
entries, err := os.ReadDir(location)
if err != nil {
return false
}
var size int64
for _, entry := range entries {
if entry.Name() == "LOG" {
// don't count the size
continue
}
fi, err := entry.Info()
if err != nil {
continue
}
size += fi.Size()
}
return size > dbLargeThreshold
}
type errorSuggestion struct {
inner error
suggestion string
}
func (e *errorSuggestion) Error() string {
return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion)
}

View File

@@ -1,13 +0,0 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import "testing"
func TestLevelDBBackendBehavior(t *testing.T) {
testBackendBehavior(t, OpenLevelDBMemory)
}

View File

@@ -1,344 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db_test
import (
"fmt"
"testing"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/protocol"
)
var files, oneFile, firstHalf, secondHalf, changed100, unchanged100 []protocol.FileInfo
func lazyInitBenchFiles() {
if files != nil {
return
}
files = make([]protocol.FileInfo, 0, 1000)
for i := 0; i < 1000; i++ {
files = append(files, protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}},
Blocks: genBlocks(i),
})
}
middle := len(files) / 2
firstHalf = files[:middle]
secondHalf = files[middle:]
oneFile = firstHalf[middle-1 : middle]
unchanged100 := files[100:200]
changed100 := append([]protocol.FileInfo{}, unchanged100...)
for i := range changed100 {
changed100[i].Version = changed100[i].Version.Copy().Update(myID)
}
}
func getBenchFileSet(b testing.TB) (*db.Lowlevel, *db.FileSet) {
lazyInitBenchFiles()
ldb := newLowlevelMemory(b)
benchS := newFileSet(b, "test)", ldb)
replace(benchS, remoteDevice0, files)
replace(benchS, protocol.LocalDeviceID, firstHalf)
return ldb, benchS
}
func BenchmarkReplaceAll(b *testing.B) {
ldb := newLowlevelMemory(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := newFileSet(b, "test)", ldb)
replace(m, protocol.LocalDeviceID, files)
}
b.ReportAllocs()
}
func BenchmarkUpdateOneChanged(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
changed := make([]protocol.FileInfo, 1)
changed[0] = oneFile[0]
changed[0].Version = changed[0].Version.Copy().Update(myID)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
benchS.Update(protocol.LocalDeviceID, changed)
} else {
benchS.Update(protocol.LocalDeviceID, oneFile)
}
}
b.ReportAllocs()
}
func BenchmarkUpdate100Changed(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
benchS.Update(protocol.LocalDeviceID, changed100)
} else {
benchS.Update(protocol.LocalDeviceID, unchanged100)
}
}
b.ReportAllocs()
}
func setup10Remotes(benchS *db.FileSet) {
idBase := remoteDevice1.String()[1:]
first := 'J'
for i := 0; i < 10; i++ {
id, _ := protocol.DeviceIDFromString(fmt.Sprintf("%v%s", first+rune(i), idBase))
if i%2 == 0 {
benchS.Update(id, changed100)
} else {
benchS.Update(id, unchanged100)
}
}
}
func BenchmarkUpdate100Changed10Remotes(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
setup10Remotes(benchS)
b.ResetTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
benchS.Update(protocol.LocalDeviceID, changed100)
} else {
benchS.Update(protocol.LocalDeviceID, unchanged100)
}
}
b.ReportAllocs()
}
func BenchmarkUpdate100ChangedRemote(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
benchS.Update(remoteDevice0, changed100)
} else {
benchS.Update(remoteDevice0, unchanged100)
}
}
b.ReportAllocs()
}
func BenchmarkUpdate100ChangedRemote10Remotes(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
benchS.Update(remoteDevice0, changed100)
} else {
benchS.Update(remoteDevice0, unchanged100)
}
}
b.ReportAllocs()
}
func BenchmarkUpdateOneUnchanged(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
benchS.Update(protocol.LocalDeviceID, oneFile)
}
b.ReportAllocs()
}
func BenchmarkNeedHalf(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf))
}
}
b.ReportAllocs()
}
func BenchmarkNeedHalfRemote(b *testing.B) {
ldb := newLowlevelMemory(b)
defer ldb.Close()
fset := newFileSet(b, "test)", ldb)
replace(fset, remoteDevice0, firstHalf)
replace(fset, protocol.LocalDeviceID, files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, fset)
snap.WithNeed(remoteDevice0, func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf))
}
}
b.ReportAllocs()
}
func BenchmarkHave(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithHave(protocol.LocalDeviceID, func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(firstHalf) {
b.Errorf("wrong length %d != %d", count, len(firstHalf))
}
}
b.ReportAllocs()
}
func BenchmarkGlobal(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithGlobal(func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(files) {
b.Errorf("wrong length %d != %d", count, len(files))
}
}
b.ReportAllocs()
}
func BenchmarkNeedHalfTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithNeedTruncated(protocol.LocalDeviceID, func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf))
}
}
b.ReportAllocs()
}
func BenchmarkHaveTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithHaveTruncated(protocol.LocalDeviceID, func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(firstHalf) {
b.Errorf("wrong length %d != %d", count, len(firstHalf))
}
}
b.ReportAllocs()
}
func BenchmarkGlobalTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := snapshot(b, benchS)
snap.WithGlobalTruncated(func(fi protocol.FileInfo) bool {
count++
return true
})
snap.Release()
if count != len(files) {
b.Errorf("wrong length %d != %d", count, len(files))
}
}
b.ReportAllocs()
}
func BenchmarkNeedCount(b *testing.B) {
ldb, benchS := getBenchFileSet(b)
defer ldb.Close()
benchS.Update(protocol.LocalDeviceID, changed100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
snap := snapshot(b, benchS)
_ = snap.NeedSize(protocol.LocalDeviceID)
snap.Release()
}
b.ReportAllocs()
}

View File

@@ -1,64 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"encoding/binary"
"fmt"
"github.com/syncthing/syncthing/lib/osutil"
)
type BlockFinder struct {
db *Lowlevel
}
func NewBlockFinder(db *Lowlevel) *BlockFinder {
return &BlockFinder{
db: db,
}
}
func (f *BlockFinder) String() string {
return fmt.Sprintf("BlockFinder@%p", f)
}
// Iterate takes an iterator function which iterates over all matching blocks
// for the given hash. The iterator function has to return either true (if
// they are happy with the block) or false to continue iterating for whatever
// reason. The iterator finally returns the result, whether or not a
// satisfying block was eventually found.
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
t, err := f.db.newReadOnlyTransaction()
if err != nil {
return false
}
defer t.close()
var key []byte
for _, folder := range folders {
key, err = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil)
if err != nil {
return false
}
iter, err := t.NewPrefixIterator(key)
if err != nil {
return false
}
for iter.Next() && iter.Error() == nil {
file := string(f.db.keyer.NameFromBlockMapKey(iter.Key()))
index := int32(binary.BigEndian.Uint32(iter.Value()))
if iterFn(folder, osutil.NativeFilename(file), index) {
iter.Release()
return true
}
}
iter.Release()
}
return false
}

View File

@@ -1,260 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"encoding/binary"
"testing"
"github.com/syncthing/syncthing/lib/protocol"
)
var (
f1, f2, f3 protocol.FileInfo
folders = []string{"folder1", "folder2"}
)
func init() {
blocks := genBlocks(30)
f1 = protocol.FileInfo{
Name: "f1",
Blocks: blocks[:10],
}
f2 = protocol.FileInfo{
Name: "f2",
Blocks: blocks[10:20],
}
f3 = protocol.FileInfo{
Name: "f3",
Blocks: blocks[20:],
}
}
func setup(t testing.TB) (*Lowlevel, *BlockFinder) {
t.Helper()
db := newLowlevelMemory(t)
return db, NewBlockFinder(db)
}
func dbEmpty(db *Lowlevel) bool {
iter, err := db.NewPrefixIterator([]byte{KeyTypeBlock})
if err != nil {
panic(err)
}
defer iter.Release()
return !iter.Next()
}
func addToBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var keyBuf []byte
blockBuf := make([]byte, 4)
for _, f := range fs {
if !f.IsDirectory() && !f.IsDeleted() && !f.IsInvalid() {
name := []byte(f.Name)
for i, block := range f.Blocks {
binary.BigEndian.PutUint32(blockBuf, uint32(i))
keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
if err != nil {
return err
}
if err := t.Put(keyBuf, blockBuf); err != nil {
return err
}
}
}
}
return t.Commit()
}
func discardFromBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var keyBuf []byte
for _, ef := range fs {
if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() {
name := []byte(ef.Name)
for _, block := range ef.Blocks {
keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
if err != nil {
return err
}
if err := t.Delete(keyBuf); err != nil {
return err
}
}
}
}
return t.Commit()
}
func TestBlockMapAddUpdateWipe(t *testing.T) {
db, f := setup(t)
defer db.Close()
if !dbEmpty(db) {
t.Fatal("db not empty")
}
folder := []byte("folder1")
f3.Type = protocol.FileInfoTypeDirectory
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
t.Fatal(err)
}
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
if folder != "folder1" || file != "f1" || index != 0 {
t.Fatal("Mismatch")
}
return true
})
f.Iterate(folders, f2.Blocks[0].Hash, func(folder, file string, index int32) bool {
if folder != "folder1" || file != "f2" || index != 0 {
t.Fatal("Mismatch")
}
return true
})
f.Iterate(folders, f3.Blocks[0].Hash, func(folder, file string, index int32) bool {
t.Fatal("Unexpected block")
return true
})
if err := discardFromBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
t.Fatal(err)
}
f1.Deleted = true
f2.LocalFlags = protocol.FlagLocalMustRescan // one of the invalid markers
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
t.Fatal(err)
}
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
t.Fatal("Unexpected block")
return false
})
f.Iterate(folders, f2.Blocks[0].Hash, func(folder, file string, index int32) bool {
t.Fatal("Unexpected block")
return false
})
f.Iterate(folders, f3.Blocks[0].Hash, func(folder, file string, index int32) bool {
if folder != "folder1" || file != "f3" || index != 0 {
t.Fatal("Mismatch")
}
return true
})
if err := db.dropFolder(folder); err != nil {
t.Fatal(err)
}
if !dbEmpty(db) {
t.Fatal("db not empty")
}
// Should not add
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2}); err != nil {
t.Fatal(err)
}
if !dbEmpty(db) {
t.Fatal("db not empty")
}
f1.Deleted = false
f1.LocalFlags = 0
f2.Deleted = false
f2.LocalFlags = 0
f3.Deleted = false
f3.LocalFlags = 0
}
func TestBlockFinderLookup(t *testing.T) {
db, f := setup(t)
defer db.Close()
folder1 := []byte("folder1")
folder2 := []byte("folder2")
if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
t.Fatal(err)
}
if err := addToBlockMap(db, folder2, []protocol.FileInfo{f1}); err != nil {
t.Fatal(err)
}
counter := 0
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
counter++
switch counter {
case 1:
if folder != "folder1" || file != "f1" || index != 0 {
t.Fatal("Mismatch")
}
case 2:
if folder != "folder2" || file != "f1" || index != 0 {
t.Fatal("Mismatch")
}
default:
t.Fatal("Unexpected block")
}
return false
})
if counter != 2 {
t.Fatal("Incorrect count", counter)
}
if err := discardFromBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
t.Fatal(err)
}
f1.Deleted = true
if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
t.Fatal(err)
}
counter = 0
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
counter++
switch counter {
case 1:
if folder != "folder2" || file != "f1" || index != 0 {
t.Fatal("Mismatch")
}
default:
t.Fatal("Unexpected block")
}
return false
})
if counter != 1 {
t.Fatal("Incorrect count")
}
f1.Deleted = false
}

View File

@@ -1,701 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
)
func genBlocks(n int) []protocol.BlockInfo {
b := make([]protocol.BlockInfo, n)
for i := range b {
h := make([]byte, 32)
for j := range h {
h[j] = byte(i + j)
}
b[i].Size = i
b[i].Hash = h
}
return b
}
const myID = 1
var (
remoteDevice0, remoteDevice1 protocol.DeviceID
invalid = "invalid"
slashPrefixed = "/notgood"
haveUpdate0to3 map[protocol.DeviceID][]protocol.FileInfo
)
func init() {
remoteDevice0, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
haveUpdate0to3 = map[protocol.DeviceID][]protocol.FileInfo{
protocol.LocalDeviceID: {
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: slashPrefixed, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
},
remoteDevice0: {
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), RawInvalid: true},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
},
remoteDevice1: {
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(5), RawInvalid: true},
protocol.FileInfo{Name: invalid, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1004}}}, Blocks: genBlocks(5), RawInvalid: true},
},
}
}
// TestRepairSequence checks that a few hand-crafted messed-up sequence entries get fixed.
func TestRepairSequence(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
folderStr := "test"
folder := []byte(folderStr)
id := protocol.LocalDeviceID
short := protocol.LocalDeviceID.Short()
files := []protocol.FileInfo{
{Name: "fine", Blocks: genBlocks(1)},
{Name: "duplicate", Blocks: genBlocks(2)},
{Name: "missing", Blocks: genBlocks(3)},
{Name: "overwriting", Blocks: genBlocks(4)},
{Name: "inconsistent", Blocks: genBlocks(5)},
{Name: "inconsistentNotIndirected", Blocks: genBlocks(2)},
}
for i, f := range files {
files[i].Version = f.Version.Update(short)
}
trans, err := db.newReadWriteTransaction()
if err != nil {
t.Fatal(err)
}
defer trans.close()
addFile := func(f protocol.FileInfo, seq int64) {
dk, err := trans.keyer.GenerateDeviceFileKey(nil, folder, id[:], []byte(f.Name))
if err != nil {
t.Fatal(err)
}
if err := trans.putFile(dk, f); err != nil {
t.Fatal(err)
}
sk, err := trans.keyer.GenerateSequenceKey(nil, folder, seq)
if err != nil {
t.Fatal(err)
}
if err := trans.Put(sk, dk); err != nil {
t.Fatal(err)
}
}
// Plain normal entry
var seq int64 = 1
files[0].Sequence = 1
addFile(files[0], seq)
// Second entry once updated with original sequence still in place
f := files[1]
f.Sequence = int64(len(files) + 1)
addFile(f, f.Sequence)
// Original sequence entry
seq++
sk, err := trans.keyer.GenerateSequenceKey(nil, folder, seq)
if err != nil {
t.Fatal(err)
}
dk, err := trans.keyer.GenerateDeviceFileKey(nil, folder, id[:], []byte(f.Name))
if err != nil {
t.Fatal(err)
}
if err := trans.Put(sk, dk); err != nil {
t.Fatal(err)
}
// File later overwritten thus missing sequence entry
seq++
files[2].Sequence = seq
addFile(files[2], seq)
// File overwriting previous sequence entry (no seq bump)
seq++
files[3].Sequence = seq
addFile(files[3], seq)
// Inconistent files
seq++
files[4].Sequence = 101
addFile(files[4], seq)
seq++
files[5].Sequence = 102
addFile(files[5], seq)
// And a sequence entry pointing at nothing because why not
sk, err = trans.keyer.GenerateSequenceKey(nil, folder, 100001)
if err != nil {
t.Fatal(err)
}
dk, err = trans.keyer.GenerateDeviceFileKey(nil, folder, id[:], []byte("nonexisting"))
if err != nil {
t.Fatal(err)
}
if err := trans.Put(sk, dk); err != nil {
t.Fatal(err)
}
if err := trans.Commit(); err != nil {
t.Fatal(err)
}
// Loading the metadata for the first time means a "re"calculation happens,
// along which the sequences get repaired too.
db.gcMut.RLock()
_, err = db.loadMetadataTracker(folderStr)
db.gcMut.RUnlock()
if err != nil {
t.Fatal(err)
}
// Check the db
ro, err := db.newReadOnlyTransaction()
if err != nil {
t.Fatal(err)
}
defer ro.close()
it, err := ro.NewPrefixIterator([]byte{KeyTypeDevice})
if err != nil {
t.Fatal(err)
}
defer it.Release()
for it.Next() {
fi, err := ro.unmarshalTrunc(it.Value(), true)
if err != nil {
t.Fatal(err)
}
if sk, err = ro.keyer.GenerateSequenceKey(sk, folder, fi.SequenceNo()); err != nil {
t.Fatal(err)
}
dk, err := ro.Get(sk)
if backend.IsNotFound(err) {
t.Error("Missing sequence entry for", fi.FileName())
} else if err != nil {
t.Fatal(err)
}
if !bytes.Equal(it.Key(), dk) {
t.Errorf("Wrong key for %v, expected %s, got %s", f.FileName(), it.Key(), dk)
}
}
if err := it.Error(); err != nil {
t.Fatal(err)
}
it.Release()
it, err = ro.NewPrefixIterator([]byte{KeyTypeSequence})
if err != nil {
t.Fatal(err)
}
defer it.Release()
for it.Next() {
fi, ok, err := ro.getFileTrunc(it.Value(), false)
if err != nil {
t.Fatal(err)
}
seq := ro.keyer.SequenceFromSequenceKey(it.Key())
if !ok {
t.Errorf("Sequence entry %v points at nothing", seq)
} else if fi.SequenceNo() != seq {
t.Errorf("Inconsistent sequence entry for %v: %v != %v", fi.FileName(), fi.SequenceNo(), seq)
}
if len(fi.Blocks) == 0 {
t.Error("Missing blocks in", fi.FileName())
}
}
if err := it.Error(); err != nil {
t.Fatal(err)
}
it.Release()
}
func TestDowngrade(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
// sets the min version etc
if err := UpdateSchema(db); err != nil {
t.Fatal(err)
}
// Bump the database version to something newer than we actually support
miscDB := NewMiscDataNamespace(db)
if err := miscDB.PutInt64("dbVersion", dbVersion+1); err != nil {
t.Fatal(err)
}
l.Infoln(dbVersion)
// Pretend we just opened the DB and attempt to update it again
err := UpdateSchema(db)
if err, ok := err.(*databaseDowngradeError); !ok {
t.Fatal("Expected error due to database downgrade, got", err)
} else if err.minSyncthingVersion != dbMinSyncthingVersion {
t.Fatalf("Error has %v as min Syncthing version, expected %v", err.minSyncthingVersion, dbMinSyncthingVersion)
}
}
func TestCheckGlobals(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
fs := newFileSet(t, "test", db)
// Add any file
name := "foo"
fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{
Name: name,
Type: protocol.FileInfoTypeFile,
Version: protocol.Vector{Counters: []protocol.Counter{{ID: 1, Value: 1001}}},
},
})
// Remove just the file entry
if err := db.dropPrefix([]byte{KeyTypeDevice}); err != nil {
t.Fatal(err)
}
// Clean up global entry of the now missing file
if repaired, err := db.checkGlobals(fs.folder); err != nil {
t.Fatal(err)
} else if repaired != 1 {
t.Error("Expected 1 repaired global item, got", repaired)
}
// Check that the global entry is gone
gk, err := db.keyer.GenerateGlobalVersionKey(nil, []byte(fs.folder), []byte(name))
if err != nil {
t.Fatal(err)
}
_, err = db.Get(gk)
if !backend.IsNotFound(err) {
t.Error("Expected key missing error, got", err)
}
}
func TestDropDuplicates(t *testing.T) {
names := []string{
"foo",
"bar",
"dcxvoijnds",
"3d/dsfase/4/ss2",
}
tcs := []struct{ in, out []int }{
{[]int{0}, []int{0}},
{[]int{0, 1}, []int{0, 1}},
{[]int{0, 1, 0, 1}, []int{0, 1}},
{[]int{0, 1, 1, 1, 1}, []int{0, 1}},
{[]int{0, 0, 0, 1}, []int{0, 1}},
{[]int{0, 1, 2, 3}, []int{0, 1, 2, 3}},
{[]int{3, 2, 1, 0, 0, 1, 2, 3}, []int{0, 1, 2, 3}},
{[]int{0, 1, 1, 3, 0, 1, 0, 1, 2, 3}, []int{0, 1, 2, 3}},
}
for tci, tc := range tcs {
inp := make([]protocol.FileInfo, len(tc.in))
expSeq := make(map[string]int)
for i, j := range tc.in {
inp[i] = protocol.FileInfo{Name: names[j], Sequence: int64(i)}
expSeq[names[j]] = i
}
outp := normalizeFilenamesAndDropDuplicates(inp)
if len(outp) != len(tc.out) {
t.Errorf("tc %v: Expected %v entries, got %v", tci, len(tc.out), len(outp))
continue
}
for i, f := range outp {
if exp := names[tc.out[i]]; exp != f.Name {
t.Errorf("tc %v: Got file %v at pos %v, expected %v", tci, f.Name, i, exp)
}
if exp := int64(expSeq[outp[i].Name]); exp != f.Sequence {
t.Errorf("tc %v: Got sequence %v at pos %v, expected %v", tci, f.Sequence, i, exp)
}
}
}
}
func TestGCIndirect(t *testing.T) {
// Verify that the gcIndirect run actually removes block lists.
db := newLowlevelMemory(t)
defer db.Close()
meta := newMetadataTracker(db.keyer, events.NoopLogger)
// Add three files with different block lists
files := []protocol.FileInfo{
{Name: "a", Blocks: genBlocks(100)},
{Name: "b", Blocks: genBlocks(200)},
{Name: "c", Blocks: genBlocks(300)},
}
db.updateLocalFiles([]byte("folder"), files, meta)
// Run a GC pass
db.gcIndirect(context.Background())
// Verify that we have three different block lists
n, err := numBlockLists(db)
if err != nil {
t.Fatal(err)
}
if n != len(files) {
t.Fatal("expected each file to have a block list")
}
// Change the block lists for each file
for i := range files {
files[i].Version = files[i].Version.Update(42)
files[i].Blocks = genBlocks(len(files[i].Blocks) + 1)
}
db.updateLocalFiles([]byte("folder"), files, meta)
// Verify that we now have *six* different block lists
n, err = numBlockLists(db)
if err != nil {
t.Fatal(err)
}
if n != 2*len(files) {
t.Fatal("expected both old and new block lists to exist")
}
// Run a GC pass
db.gcIndirect(context.Background())
// Verify that we now have just the three we need, again
n, err = numBlockLists(db)
if err != nil {
t.Fatal(err)
}
if n != len(files) {
t.Fatal("expected GC to collect all but the needed ones")
}
// Double check the correctness by loading the block lists and comparing with what we stored
tr, err := db.newReadOnlyTransaction()
if err != nil {
t.Fatal()
}
defer tr.Release()
for _, f := range files {
fi, ok, err := tr.getFile([]byte("folder"), protocol.LocalDeviceID[:], []byte(f.Name))
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("mysteriously missing")
}
if len(fi.Blocks) != len(f.Blocks) {
t.Fatal("block list mismatch")
}
for i := range fi.Blocks {
if !bytes.Equal(fi.Blocks[i].Hash, f.Blocks[i].Hash) {
t.Fatal("hash mismatch")
}
}
}
}
func TestUpdateTo14(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
folderStr := "default"
folder := []byte(folderStr)
name := []byte("foo")
file := protocol.FileInfo{Name: string(name), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(blocksIndirectionCutoff - 1)}
file.BlocksHash = protocol.BlocksHash(file.Blocks)
fileWOBlocks := file
fileWOBlocks.Blocks = nil
meta, err := db.loadMetadataTracker(folderStr)
if err != nil {
t.Fatal(err)
}
// Initially add the correct file the usual way, all good here.
if err := db.updateLocalFiles(folder, []protocol.FileInfo{file}, meta); err != nil {
t.Fatal(err)
}
// Simulate the previous bug, where .putFile could write a file info without
// blocks, even though the file has them (and thus a non-nil BlocksHash).
trans, err := db.newReadWriteTransaction()
if err != nil {
t.Fatal(err)
}
defer trans.close()
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, protocol.LocalDeviceID[:], name)
if err != nil {
t.Fatal(err)
}
fiBs := mustMarshal(fileWOBlocks.ToWire(true))
if err := trans.Put(key, fiBs); err != nil {
t.Fatal(err)
}
if err := trans.Commit(); err != nil {
t.Fatal(err)
}
trans.close()
// Run migration, pretending were still on schema 13.
if err := (&schemaUpdater{db}).updateSchemaTo14(13); err != nil {
t.Fatal(err)
}
// checks
ro, err := db.newReadOnlyTransaction()
if err != nil {
t.Fatal(err)
}
defer ro.close()
if f, ok, err := ro.getFileByKey(key); err != nil {
t.Fatal(err)
} else if !ok {
t.Error("file missing")
} else if !f.MustRescan() {
t.Error("file not marked as MustRescan")
}
if vl, err := ro.getGlobalVersions(nil, folder, name); err != nil {
t.Fatal(err)
} else if fv, ok := vlGetGlobal(vl); !ok {
t.Error("missing global")
} else if !fvIsInvalid(fv) {
t.Error("global not marked as invalid")
}
}
func TestFlushRecursion(t *testing.T) {
// Verify that a commit hook can write to the transaction without
// causing another flush and thus recursion.
db := newLowlevelMemory(t)
defer db.Close()
// A commit hook that writes a small piece of data to the transaction.
hookFired := 0
hook := func(tx backend.WriteTransaction) error {
err := tx.Put([]byte(fmt.Sprintf("hook-key-%d", hookFired)), []byte(fmt.Sprintf("hook-value-%d", hookFired)))
if err != nil {
t.Fatal(err)
}
hookFired++
return nil
}
// A transaction.
tx, err := db.NewWriteTransaction(hook)
if err != nil {
t.Fatal(err)
}
defer tx.Release()
// Write stuff until the transaction flushes, thus firing the hook.
i := 0
for hookFired == 0 {
err := tx.Put([]byte(fmt.Sprintf("key-%d", i)), []byte(fmt.Sprintf("value-%d", i)))
if err != nil {
t.Fatal(err)
}
i++
}
// The hook should have fired precisely once.
if hookFired != 1 {
t.Error("expect one hook fire, not", hookFired)
}
}
func TestCheckLocalNeed(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
folderStr := "test"
fs := newFileSet(t, folderStr, db)
// Add files such that we are in sync for a and b, and need c and d.
files := []protocol.FileInfo{
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}},
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}},
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}},
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}},
}
fs.Update(protocol.LocalDeviceID, files)
files[2].Version = files[2].Version.Update(remoteDevice0.Short())
files[3].Version = files[2].Version.Update(remoteDevice0.Short())
fs.Update(remoteDevice0, files)
checkNeed := func() {
snap := snapshot(t, fs)
defer snap.Release()
c := snap.NeedSize(protocol.LocalDeviceID)
if c.Files != 2 {
t.Errorf("Expected 2 needed files locally, got %v in meta", c.Files)
}
needed := make([]protocol.FileInfo, 0, 2)
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileInfo) bool {
needed = append(needed, fi)
return true
})
if l := len(needed); l != 2 {
t.Errorf("Expected 2 needed files locally, got %v in db", l)
} else if needed[0].Name != "c" || needed[1].Name != "d" {
t.Errorf("Expected files c and d to be needed, got %v and %v", needed[0].Name, needed[1].Name)
}
}
checkNeed()
trans, err := db.newReadWriteTransaction()
if err != nil {
t.Fatal(err)
}
defer trans.close()
// Add "b" to needed and remove "d"
folder := []byte(folderStr)
key, err := trans.keyer.GenerateNeedFileKey(nil, folder, []byte(files[1].Name))
if err != nil {
t.Fatal(err)
}
if err = trans.Put(key, nil); err != nil {
t.Fatal(err)
}
key, err = trans.keyer.GenerateNeedFileKey(nil, folder, []byte(files[3].Name))
if err != nil {
t.Fatal(err)
}
if err = trans.Delete(key); err != nil {
t.Fatal(err)
}
if err := trans.Commit(); err != nil {
t.Fatal(err)
}
if repaired, err := db.checkLocalNeed(folder); err != nil {
t.Fatal(err)
} else if repaired != 2 {
t.Error("Expected 2 repaired local need items, got", repaired)
}
checkNeed()
}
func TestDuplicateNeedCount(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
folder := "test"
fs := newFileSet(t, folder, db)
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID), Sequence: 1}}
fs.Update(protocol.LocalDeviceID, files)
files[0].Version = files[0].Version.Update(remoteDevice0.Short())
fs.Update(remoteDevice0, files)
db.checkRepair()
fs = newFileSet(t, folder, db)
found := false
for _, c := range fs.meta.counts.Counts {
if protocol.LocalDeviceID == c.DeviceID && c.LocalFlags == needFlag {
if found {
t.Fatal("second need count for local device encountered")
}
found = true
}
}
if !found {
t.Fatal("no need count for local device encountered")
}
}
func TestNeedAfterDropGlobal(t *testing.T) {
db := newLowlevelMemory(t)
defer db.Close()
folder := "test"
fs := newFileSet(t, folder, db)
// Initial:
// Three devices and a file "test": local has Version 1, remoteDevice0
// Version 2 and remoteDevice2 doesn't have it.
// All of them have "bar", just so the db knows about remoteDevice2.
files := []protocol.FileInfo{
{Name: "foo", Version: protocol.Vector{}.Update(myID), Sequence: 1},
{Name: "bar", Version: protocol.Vector{}.Update(myID), Sequence: 2},
}
fs.Update(protocol.LocalDeviceID, files)
files[0].Version = files[0].Version.Update(myID)
fs.Update(remoteDevice0, files)
fs.Update(remoteDevice1, files[1:])
// remoteDevice1 needs one file: test
snap := snapshot(t, fs)
c := snap.NeedSize(remoteDevice1)
if c.Files != 1 {
t.Errorf("Expected 1 needed files initially, got %v", c.Files)
}
snap.Release()
// Drop remoteDevice0, i.e. remove all their files from db.
// That changes the global file, which is now what local has.
fs.Drop(remoteDevice0)
// remoteDevice1 still needs test.
snap = snapshot(t, fs)
c = snap.NeedSize(remoteDevice1)
if c.Files != 1 {
t.Errorf("Expected still 1 needed files, got %v", c.Files)
}
snap.Release()
}
func numBlockLists(db *Lowlevel) (int, error) {
it, err := db.Backend.NewPrefixIterator([]byte{KeyTypeBlockList})
if err != nil {
return 0, err
}
defer it.Release()
n := 0
for it.Next() {
n++
}
if err := it.Error(); err != nil {
return 0, err
}
return n, nil
}

View File

@@ -1,80 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"testing"
)
func TestDeviceKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
dev := []byte("device67890123456789012345678901")
name := []byte("name")
db := newLowlevelMemory(t)
defer db.Close()
key, err := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name)
if err != nil {
t.Fatal(err)
}
fld2, ok := db.keyer.FolderFromDeviceFileKey(key)
if !ok {
t.Fatal("unexpectedly not found")
}
if !bytes.Equal(fld2, fld) {
t.Errorf("wrong folder %q != %q", fld2, fld)
}
dev2, ok := db.keyer.DeviceFromDeviceFileKey(key)
if !ok {
t.Fatal("unexpectedly not found")
}
if !bytes.Equal(dev2, dev) {
t.Errorf("wrong device %q != %q", dev2, dev)
}
name2 := db.keyer.NameFromDeviceFileKey(key)
if !bytes.Equal(name2, name) {
t.Errorf("wrong name %q != %q", name2, name)
}
}
func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name")
db := newLowlevelMemory(t)
defer db.Close()
key, err := db.keyer.GenerateGlobalVersionKey(nil, fld, name)
if err != nil {
t.Fatal(err)
}
name2 := db.keyer.NameFromGlobalVersionKey(key)
if !bytes.Equal(name2, name) {
t.Errorf("wrong name %q != %q", name2, name)
}
}
func TestSequenceKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
db := newLowlevelMemory(t)
defer db.Close()
const seq = 1234567890
key, err := db.keyer.GenerateSequenceKey(nil, fld, seq)
if err != nil {
t.Fatal(err)
}
outSeq := db.keyer.SequenceFromSequenceKey(key)
if outSeq != seq {
t.Errorf("sequence number mangled, %d != %d", outSeq, seq)
}
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,472 +0,0 @@
// Copyright (C) 2017 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"errors"
"fmt"
"math/bits"
"time"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
var errMetaInconsistent = errors.New("inconsistent counts detected")
type countsMap struct {
counts CountsSet
indexes map[metaKey]int // device ID + local flags -> index in counts
}
// metadataTracker keeps metadata on a per device, per local flag basis.
type metadataTracker struct {
keyer keyer
countsMap
mut sync.RWMutex
dirty bool
evLogger events.Logger
}
type metaKey struct {
dev protocol.DeviceID
flag uint32
}
const needFlag uint32 = 1 << 31 // Last bit, as early ones are local flags
func newMetadataTracker(keyer keyer, evLogger events.Logger) *metadataTracker {
return &metadataTracker{
keyer: keyer,
mut: sync.NewRWMutex(),
countsMap: countsMap{
indexes: make(map[metaKey]int),
},
evLogger: evLogger,
}
}
// Unmarshal loads a metadataTracker from the corresponding protobuf
// representation
func (m *metadataTracker) Unmarshal(bs []byte) error {
var dbc dbproto.CountsSet
if err := proto.Unmarshal(bs, &dbc); err != nil {
return err
}
m.counts.Created = dbc.Created
m.counts.Counts = make([]Counts, len(dbc.Counts))
for i, c := range dbc.Counts {
m.counts.Counts[i] = countsFromWire(c)
}
// Initialize the index map
m.indexes = make(map[metaKey]int)
for i, c := range m.counts.Counts {
m.indexes[metaKey{c.DeviceID, c.LocalFlags}] = i
}
return nil
}
// protoMarshal returns the protobuf representation of the metadataTracker.
// Must be called with the read lock held.
func (m *metadataTracker) protoMarshal() ([]byte, error) {
dbc := &dbproto.CountsSet{
Counts: make([]*dbproto.Counts, len(m.counts.Counts)),
Created: m.counts.Created,
}
for i, c := range m.counts.Counts {
dbc.Counts[i] = c.toWire()
}
return proto.Marshal(dbc)
}
func (m *metadataTracker) CommitHook(folder []byte) backend.CommitHook {
return func(t backend.WriteTransaction) error {
return m.toDB(t, folder)
}
}
// toDB saves the marshalled metadataTracker to the given db, under the key
// corresponding to the given folder
func (m *metadataTracker) toDB(t backend.WriteTransaction, folder []byte) error {
key, err := m.keyer.GenerateFolderMetaKey(nil, folder)
if err != nil {
return err
}
m.mut.RLock()
defer m.mut.RUnlock()
if !m.dirty {
return nil
}
bs, err := m.protoMarshal()
if err != nil {
return err
}
err = t.Put(key, bs)
if err == nil {
m.dirty = false
}
return err
}
// fromDB initializes the metadataTracker from the marshalled data found in
// the database under the key corresponding to the given folder
func (m *metadataTracker) fromDB(db *Lowlevel, folder []byte) error {
key, err := db.keyer.GenerateFolderMetaKey(nil, folder)
if err != nil {
return err
}
bs, err := db.Get(key)
if err != nil {
return err
}
if err = m.Unmarshal(bs); err != nil {
return err
}
if m.counts.Created == 0 {
return errMetaInconsistent
}
return nil
}
// countsPtr returns a pointer to the corresponding Counts struct, if
// necessary allocating one in the process
func (m *metadataTracker) countsPtr(dev protocol.DeviceID, flag uint32) *Counts {
// must be called with the mutex held
if bits.OnesCount32(flag) > 1 {
panic("incorrect usage: set at most one bit in flag")
}
key := metaKey{dev, flag}
idx, ok := m.indexes[key]
if !ok {
idx = len(m.counts.Counts)
m.counts.Counts = append(m.counts.Counts, Counts{DeviceID: dev, LocalFlags: flag})
m.indexes[key] = idx
// Need bucket must be initialized when a device first occurs in
// the metadatatracker, even if there's no change to the need
// bucket itself.
nkey := metaKey{dev, needFlag}
if _, ok := m.indexes[nkey]; !ok {
// Initially a new device needs everything, except deletes
nidx := len(m.counts.Counts)
m.counts.Counts = append(m.counts.Counts, m.allNeededCounts(dev))
m.indexes[nkey] = nidx
}
}
return &m.counts.Counts[idx]
}
// allNeeded makes sure there is a counts in case the device needs everything.
func (m *countsMap) allNeededCounts(dev protocol.DeviceID) Counts {
var counts Counts
if idx, ok := m.indexes[metaKey{protocol.GlobalDeviceID, 0}]; ok {
counts = m.counts.Counts[idx]
counts.Deleted = 0 // Don't need deletes if having nothing
}
counts.DeviceID = dev
counts.LocalFlags = needFlag
return counts
}
// addFile adds a file to the counts, adjusting the sequence number as
// appropriate
func (m *metadataTracker) addFile(dev protocol.DeviceID, f protocol.FileInfo) {
m.mut.Lock()
defer m.mut.Unlock()
m.updateSeqLocked(dev, f)
m.updateFileLocked(dev, f, m.addFileLocked)
}
func (m *metadataTracker) updateFileLocked(dev protocol.DeviceID, f protocol.FileInfo, fn func(protocol.DeviceID, uint32, protocol.FileInfo)) {
m.dirty = true
if f.IsInvalid() && (f.FileLocalFlags() == 0 || dev == protocol.GlobalDeviceID) {
// This is a remote invalid file or concern the global state.
// In either case invalid files are not accounted.
return
}
if flags := f.FileLocalFlags(); flags == 0 {
// Account regular files in the zero-flags bucket.
fn(dev, 0, f)
} else {
// Account in flag specific buckets.
eachFlagBit(flags, func(flag uint32) {
fn(dev, flag, f)
})
}
}
// emptyNeeded ensures that there is a need count for the given device and that it is empty.
func (m *metadataTracker) emptyNeeded(dev protocol.DeviceID) {
m.mut.Lock()
defer m.mut.Unlock()
m.dirty = true
empty := Counts{
DeviceID: dev,
LocalFlags: needFlag,
}
key := metaKey{dev, needFlag}
if idx, ok := m.indexes[key]; ok {
m.counts.Counts[idx] = empty
return
}
m.indexes[key] = len(m.counts.Counts)
m.counts.Counts = append(m.counts.Counts, empty)
}
// addNeeded adds a file to the needed counts
func (m *metadataTracker) addNeeded(dev protocol.DeviceID, f protocol.FileInfo) {
m.mut.Lock()
defer m.mut.Unlock()
m.dirty = true
m.addFileLocked(dev, needFlag, f)
}
func (m *metadataTracker) Sequence(dev protocol.DeviceID) int64 {
m.mut.Lock()
defer m.mut.Unlock()
return m.countsPtr(dev, 0).Sequence
}
func (m *metadataTracker) updateSeqLocked(dev protocol.DeviceID, f protocol.FileInfo) {
if dev == protocol.GlobalDeviceID {
return
}
if cp := m.countsPtr(dev, 0); f.SequenceNo() > cp.Sequence {
cp.Sequence = f.SequenceNo()
}
}
func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flag uint32, f protocol.FileInfo) {
cp := m.countsPtr(dev, flag)
switch {
case f.IsDeleted():
cp.Deleted++
case f.IsDirectory() && !f.IsSymlink():
cp.Directories++
case f.IsSymlink():
cp.Symlinks++
default:
cp.Files++
}
cp.Bytes += f.FileSize()
}
// removeFile removes a file from the counts
func (m *metadataTracker) removeFile(dev protocol.DeviceID, f protocol.FileInfo) {
m.mut.Lock()
defer m.mut.Unlock()
m.updateFileLocked(dev, f, m.removeFileLocked)
}
// removeNeeded removes a file from the needed counts
func (m *metadataTracker) removeNeeded(dev protocol.DeviceID, f protocol.FileInfo) {
m.mut.Lock()
defer m.mut.Unlock()
m.dirty = true
m.removeFileLocked(dev, needFlag, f)
}
func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flag uint32, f protocol.FileInfo) {
cp := m.countsPtr(dev, flag)
switch {
case f.IsDeleted():
cp.Deleted--
case f.IsDirectory() && !f.IsSymlink():
cp.Directories--
case f.IsSymlink():
cp.Symlinks--
default:
cp.Files--
}
cp.Bytes -= f.FileSize()
// If we've run into an impossible situation, correct it for now and set
// the created timestamp to zero. Next time we start up the metadata
// will be seen as infinitely old and recalculated from scratch.
if cp.Deleted < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta deleted count for flag 0x%x dropped below zero", flag))
cp.Deleted = 0
m.counts.Created = 0
}
if cp.Files < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta files count for flag 0x%x dropped below zero", flag))
cp.Files = 0
m.counts.Created = 0
}
if cp.Directories < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta directories count for flag 0x%x dropped below zero", flag))
cp.Directories = 0
m.counts.Created = 0
}
if cp.Symlinks < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta deleted count for flag 0x%x dropped below zero", flag))
cp.Symlinks = 0
m.counts.Created = 0
}
}
// resetAll resets all metadata for the given device
func (m *metadataTracker) resetAll(dev protocol.DeviceID) {
m.mut.Lock()
m.dirty = true
for i, c := range m.counts.Counts {
if c.DeviceID == dev {
if c.LocalFlags != needFlag {
m.counts.Counts[i] = Counts{
DeviceID: c.DeviceID,
LocalFlags: c.LocalFlags,
}
} else {
m.counts.Counts[i] = m.allNeededCounts(dev)
}
}
}
m.mut.Unlock()
}
// resetCounts resets the file, dir, etc. counters, while retaining the
// sequence number
func (m *metadataTracker) resetCounts(dev protocol.DeviceID) {
m.mut.Lock()
m.dirty = true
for i, c := range m.counts.Counts {
if c.DeviceID == dev {
m.counts.Counts[i] = Counts{
DeviceID: c.DeviceID,
Sequence: c.Sequence,
LocalFlags: c.LocalFlags,
}
}
}
m.mut.Unlock()
}
func (m *countsMap) Counts(dev protocol.DeviceID, flag uint32) Counts {
if bits.OnesCount32(flag) > 1 {
panic("incorrect usage: set at most one bit in flag")
}
idx, ok := m.indexes[metaKey{dev, flag}]
if !ok {
if flag == needFlag {
// If there's nothing about a device in the index yet,
// it needs everything.
return m.allNeededCounts(dev)
}
return Counts{}
}
return m.counts.Counts[idx]
}
// Snapshot returns a copy of the metadata for reading.
func (m *metadataTracker) Snapshot() *countsMap {
m.mut.RLock()
defer m.mut.RUnlock()
c := &countsMap{
counts: CountsSet{
Counts: make([]Counts, len(m.counts.Counts)),
Created: m.counts.Created,
},
indexes: make(map[metaKey]int, len(m.indexes)),
}
for k, v := range m.indexes {
c.indexes[k] = v
}
copy(c.counts.Counts, m.counts.Counts)
return c
}
// nextLocalSeq allocates a new local sequence number
func (m *metadataTracker) nextLocalSeq() int64 {
m.mut.Lock()
defer m.mut.Unlock()
c := m.countsPtr(protocol.LocalDeviceID, 0)
c.Sequence++
return c.Sequence
}
// devices returns the list of devices tracked, excluding the local device
// (which we don't know the ID of)
func (m *metadataTracker) devices() []protocol.DeviceID {
m.mut.RLock()
defer m.mut.RUnlock()
return m.countsMap.devices()
}
func (m *countsMap) devices() []protocol.DeviceID {
devs := make([]protocol.DeviceID, 0, len(m.counts.Counts))
for _, dev := range m.counts.Counts {
if dev.Sequence > 0 {
if dev.DeviceID == protocol.GlobalDeviceID || dev.DeviceID == protocol.LocalDeviceID {
continue
}
devs = append(devs, dev.DeviceID)
}
}
return devs
}
func (m *metadataTracker) Created() time.Time {
m.mut.RLock()
defer m.mut.RUnlock()
return time.Unix(0, m.counts.Created)
}
func (m *metadataTracker) SetCreated() {
m.mut.Lock()
m.counts.Created = time.Now().UnixNano()
m.dirty = true
m.mut.Unlock()
}
// eachFlagBit calls the function once for every bit that is set in flags
func eachFlagBit(flags uint32, fn func(flag uint32)) {
// Test each bit from the right, as long as there are bits left in the
// flag set. Clear any bits found and stop testing as soon as there are
// no more bits set.
currentBit := uint32(1 << 0)
for flags != 0 {
if flags&currentBit != 0 {
fn(currentBit)
flags &^= currentBit
}
currentBit <<= 1
}
}

View File

@@ -1,182 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"math/bits"
"sort"
"testing"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
)
func TestEachFlagBit(t *testing.T) {
cases := []struct {
flags uint32
iterations int
}{
{0, 0},
{1<<0 | 1<<3, 2},
{1 << 0, 1},
{1 << 31, 1},
{1<<10 | 1<<20 | 1<<30, 3},
}
for _, tc := range cases {
var flags uint32
iterations := 0
eachFlagBit(tc.flags, func(f uint32) {
iterations++
flags |= f
if bits.OnesCount32(f) != 1 {
t.Error("expected exactly one bit to be set in every call")
}
})
if flags != tc.flags {
t.Errorf("expected 0x%x flags, got 0x%x", tc.flags, flags)
}
if iterations != tc.iterations {
t.Errorf("expected %d iterations, got %d", tc.iterations, iterations)
}
}
}
func TestMetaDevices(t *testing.T) {
d1 := protocol.DeviceID{1}
d2 := protocol.DeviceID{2}
meta := newMetadataTracker(nil, events.NoopLogger)
meta.addFile(d1, protocol.FileInfo{Sequence: 1})
meta.addFile(d1, protocol.FileInfo{Sequence: 2, LocalFlags: 1})
meta.addFile(d2, protocol.FileInfo{Sequence: 1})
meta.addFile(d2, protocol.FileInfo{Sequence: 2, LocalFlags: 2})
meta.addFile(protocol.LocalDeviceID, protocol.FileInfo{Sequence: 1})
// There are five device/flags combos
if l := len(meta.counts.Counts); l < 5 {
t.Error("expected at least five buckets, not", l)
}
// There are only two non-local devices
devs := meta.devices()
if l := len(devs); l != 2 {
t.Fatal("expected two devices, not", l)
}
// Check that we got the two devices we expect
sort.Slice(devs, func(a, b int) bool {
return devs[a].Compare(devs[b]) == -1
})
if devs[0] != d1 {
t.Error("first device should be d1")
}
if devs[1] != d2 {
t.Error("second device should be d2")
}
}
func TestMetaSequences(t *testing.T) {
d1 := protocol.DeviceID{1}
meta := newMetadataTracker(nil, events.NoopLogger)
meta.addFile(d1, protocol.FileInfo{Sequence: 1})
meta.addFile(d1, protocol.FileInfo{Sequence: 2, RawInvalid: true})
meta.addFile(d1, protocol.FileInfo{Sequence: 3})
meta.addFile(d1, protocol.FileInfo{Sequence: 4, RawInvalid: true})
meta.addFile(protocol.LocalDeviceID, protocol.FileInfo{Sequence: 1})
meta.addFile(protocol.LocalDeviceID, protocol.FileInfo{Sequence: 2})
meta.addFile(protocol.LocalDeviceID, protocol.FileInfo{Sequence: 3, LocalFlags: 1})
meta.addFile(protocol.LocalDeviceID, protocol.FileInfo{Sequence: 4, LocalFlags: 2})
if seq := meta.Sequence(d1); seq != 4 {
t.Error("sequence of first device should be 4, not", seq)
}
if seq := meta.Sequence(protocol.LocalDeviceID); seq != 4 {
t.Error("sequence of first device should be 4, not", seq)
}
}
func TestRecalcMeta(t *testing.T) {
ldb := newLowlevelMemory(t)
defer ldb.Close()
// Add some files
s1 := newFileSet(t, "test", ldb)
files := []protocol.FileInfo{
{Name: "a", Size: 1000},
{Name: "b", Size: 2000},
}
s1.Update(protocol.LocalDeviceID, files)
// Verify local/global size
snap := snapshot(t, s1)
ls := snap.LocalSize()
gs := snap.GlobalSize()
snap.Release()
if ls.Bytes != 3000 {
t.Fatalf("Wrong initial local byte count, %d != 3000", ls.Bytes)
}
if gs.Bytes != 3000 {
t.Fatalf("Wrong initial global byte count, %d != 3000", gs.Bytes)
}
// Reach into the database to make the metadata tracker intentionally
// wrong and out of date
curSeq := s1.meta.Sequence(protocol.LocalDeviceID)
tran, err := ldb.newReadWriteTransaction()
if err != nil {
t.Fatal(err)
}
s1.meta.mut.Lock()
s1.meta.countsPtr(protocol.LocalDeviceID, 0).Sequence = curSeq - 1 // too low
s1.meta.countsPtr(protocol.LocalDeviceID, 0).Bytes = 1234 // wrong
s1.meta.countsPtr(protocol.GlobalDeviceID, 0).Bytes = 1234 // wrong
s1.meta.dirty = true
s1.meta.mut.Unlock()
if err := s1.meta.toDB(tran, []byte("test")); err != nil {
t.Fatal(err)
}
if err := tran.Commit(); err != nil {
t.Fatal(err)
}
// Verify that our bad data "took"
snap = snapshot(t, s1)
ls = snap.LocalSize()
gs = snap.GlobalSize()
snap.Release()
if ls.Bytes != 1234 {
t.Fatalf("Wrong changed local byte count, %d != 1234", ls.Bytes)
}
if gs.Bytes != 1234 {
t.Fatalf("Wrong changed global byte count, %d != 1234", gs.Bytes)
}
// Create a new fileset, which will realize the inconsistency and recalculate
s2 := newFileSet(t, "test", ldb)
// Verify local/global size
snap = snapshot(t, s2)
ls = snap.LocalSize()
gs = snap.GlobalSize()
snap.Release()
if ls.Bytes != 3000 {
t.Fatalf("Wrong fixed local byte count, %d != 3000", ls.Bytes)
}
if gs.Bytes != 3000 {
t.Fatalf("Wrong fixed global byte count, %d != 3000", gs.Bytes)
}
}
func TestMetaKeyCollisions(t *testing.T) {
if protocol.LocalAllFlags&needFlag != 0 {
t.Error("Collision between need flag and protocol local file flags")
}
}

View File

@@ -1,156 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"encoding/binary"
"time"
"github.com/syncthing/syncthing/lib/db/backend"
)
// NamespacedKV is a simple key-value store using a specific namespace within
// a leveldb.
type NamespacedKV struct {
db backend.Backend
prefix string
}
// NewNamespacedKV returns a new NamespacedKV that lives in the namespace
// specified by the prefix.
func NewNamespacedKV(db backend.Backend, prefix string) *NamespacedKV {
return &NamespacedKV{
db: db,
prefix: prefix,
}
}
// PutInt64 stores a new int64. Any existing value (even if of another type)
// is overwritten.
func (n *NamespacedKV) PutInt64(key string, val int64) error {
var valBs [8]byte
binary.BigEndian.PutUint64(valBs[:], uint64(val))
return n.db.Put(n.prefixedKey(key), valBs[:])
}
// Int64 returns the stored value interpreted as an int64 and a boolean that
// is false if no value was stored at the key.
func (n *NamespacedKV) Int64(key string) (int64, bool, error) {
valBs, err := n.db.Get(n.prefixedKey(key))
if err != nil {
return 0, false, filterNotFound(err)
}
val := binary.BigEndian.Uint64(valBs)
return int64(val), true, nil
}
// PutTime stores a new time.Time. Any existing value (even if of another
// type) is overwritten.
func (n *NamespacedKV) PutTime(key string, val time.Time) error {
valBs, _ := val.MarshalBinary() // never returns an error
return n.db.Put(n.prefixedKey(key), valBs)
}
// Time returns the stored value interpreted as a time.Time and a boolean
// that is false if no value was stored at the key.
func (n NamespacedKV) Time(key string) (time.Time, bool, error) {
var t time.Time
valBs, err := n.db.Get(n.prefixedKey(key))
if err != nil {
return t, false, filterNotFound(err)
}
err = t.UnmarshalBinary(valBs)
return t, err == nil, err
}
// PutString stores a new string. Any existing value (even if of another type)
// is overwritten.
func (n *NamespacedKV) PutString(key, val string) error {
return n.db.Put(n.prefixedKey(key), []byte(val))
}
// String returns the stored value interpreted as a string and a boolean that
// is false if no value was stored at the key.
func (n NamespacedKV) String(key string) (string, bool, error) {
valBs, err := n.db.Get(n.prefixedKey(key))
if err != nil {
return "", false, filterNotFound(err)
}
return string(valBs), true, nil
}
// PutBytes stores a new byte slice. Any existing value (even if of another type)
// is overwritten.
func (n *NamespacedKV) PutBytes(key string, val []byte) error {
return n.db.Put(n.prefixedKey(key), val)
}
// Bytes returns the stored value as a raw byte slice and a boolean that
// is false if no value was stored at the key.
func (n NamespacedKV) Bytes(key string) ([]byte, bool, error) {
valBs, err := n.db.Get(n.prefixedKey(key))
if err != nil {
return nil, false, filterNotFound(err)
}
return valBs, true, nil
}
// PutBool stores a new boolean. Any existing value (even if of another type)
// is overwritten.
func (n *NamespacedKV) PutBool(key string, val bool) error {
if val {
return n.db.Put(n.prefixedKey(key), []byte{0x0})
}
return n.db.Put(n.prefixedKey(key), []byte{0x1})
}
// Bool returns the stored value as a boolean and a boolean that
// is false if no value was stored at the key.
func (n NamespacedKV) Bool(key string) (bool, bool, error) {
valBs, err := n.db.Get(n.prefixedKey(key))
if err != nil {
return false, false, filterNotFound(err)
}
return valBs[0] == 0x0, true, nil
}
// Delete deletes the specified key. It is allowed to delete a nonexistent
// key.
func (n NamespacedKV) Delete(key string) error {
return n.db.Delete(n.prefixedKey(key))
}
func (n NamespacedKV) prefixedKey(key string) []byte {
return []byte(n.prefix + key)
}
// Well known namespaces that can be instantiated without knowing the key
// details.
// NewDeviceStatisticsNamespace creates a KV namespace for device statistics
// for the given device.
func NewDeviceStatisticsNamespace(db backend.Backend, device string) *NamespacedKV {
return NewNamespacedKV(db, string(KeyTypeDeviceStatistic)+device)
}
// NewFolderStatisticsNamespace creates a KV namespace for folder statistics
// for the given folder.
func NewFolderStatisticsNamespace(db backend.Backend, folder string) *NamespacedKV {
return NewNamespacedKV(db, string(KeyTypeFolderStatistic)+folder)
}
// NewMiscDataNamespace creates a KV namespace for miscellaneous metadata.
func NewMiscDataNamespace(db backend.Backend) *NamespacedKV {
return NewNamespacedKV(db, string(KeyTypeMiscData))
}
func filterNotFound(err error) error {
if backend.IsNotFound(err) {
return nil
}
return err
}

View File

@@ -1,177 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"testing"
"time"
)
func TestNamespacedInt(t *testing.T) {
ldb := newLowlevelMemory(t)
defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo")
n2 := NewNamespacedKV(ldb, "bar")
// Key is missing to start with
if v, ok, err := n1.Int64("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
if err := n1.PutInt64("test", 42); err != nil {
t.Fatal(err)
}
// It should now exist in n1
if v, ok, err := n1.Int64("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 42 || !ok {
t.Errorf("Incorrect return v %v != 42 || ok %v != true", v, ok)
}
// ... but not in n2, which is in a different namespace
if v, ok, err := n2.Int64("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
if err := n1.Delete("test"); err != nil {
t.Fatal(err)
}
// It should no longer exist
if v, ok, err := n1.Int64("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != 0 || ok {
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
}
}
func TestNamespacedTime(t *testing.T) {
ldb := newLowlevelMemory(t)
defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo")
if v, ok, err := n1.Time("test"); err != nil {
t.Error("Unexpected error:", err)
} else if !v.IsZero() || ok {
t.Errorf("Incorrect return v %v != %v || ok %v != false", v, time.Time{}, ok)
}
now := time.Now()
if err := n1.PutTime("test", now); err != nil {
t.Fatal(err)
}
if v, ok, err := n1.Time("test"); err != nil {
t.Error("Unexpected error:", err)
} else if !v.Equal(now) || !ok {
t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok)
}
}
func TestNamespacedString(t *testing.T) {
ldb := newLowlevelMemory(t)
defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo")
if v, ok, err := n1.String("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "" || ok {
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
}
if err := n1.PutString("test", "yo"); err != nil {
t.Fatal(err)
}
if v, ok, err := n1.String("test"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "yo" || !ok {
t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok)
}
}
func TestNamespacedReset(t *testing.T) {
ldb := newLowlevelMemory(t)
defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo")
if err := n1.PutString("test1", "yo1"); err != nil {
t.Fatal(err)
}
if err := n1.PutString("test2", "yo2"); err != nil {
t.Fatal(err)
}
if err := n1.PutString("test3", "yo3"); err != nil {
t.Fatal(err)
}
if v, ok, err := n1.String("test1"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "yo1" || !ok {
t.Errorf("Incorrect return v %q != \"yo1\" || ok %v != true", v, ok)
}
if v, ok, err := n1.String("test2"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "yo2" || !ok {
t.Errorf("Incorrect return v %q != \"yo2\" || ok %v != true", v, ok)
}
if v, ok, err := n1.String("test3"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "yo3" || !ok {
t.Errorf("Incorrect return v %q != \"yo3\" || ok %v != true", v, ok)
}
reset(n1)
if v, ok, err := n1.String("test1"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "" || ok {
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
}
if v, ok, err := n1.String("test2"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "" || ok {
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
}
if v, ok, err := n1.String("test3"); err != nil {
t.Error("Unexpected error:", err)
} else if v != "" || ok {
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
}
}
// reset removes all entries in this namespace.
func reset(n *NamespacedKV) {
tr, err := n.db.NewWriteTransaction()
if err != nil {
return
}
defer tr.Release()
it, err := tr.NewPrefixIterator([]byte(n.prefix))
if err != nil {
return
}
for it.Next() {
_ = tr.Delete(it.Key())
}
it.Release()
_ = tr.Commit()
}

View File

@@ -1,271 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"fmt"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/bep"
"github.com/syncthing/syncthing/lib/protocol"
)
// dbMigrationVersion is for migrations that do not change the schema and thus
// do not put restrictions on downgrades (e.g. for repairs after a bugfix).
const (
dbVersion = 14
dbMigrationVersion = 20
dbMinSyncthingVersion = "v1.9.0"
)
type migration struct {
schemaVersion int64
migrationVersion int64
minSyncthingVersion string
migration func(prevSchema int) error
}
type databaseDowngradeError struct {
minSyncthingVersion string
}
func (e *databaseDowngradeError) Error() string {
if e.minSyncthingVersion == "" {
return "newer Syncthing required"
}
return fmt.Sprintf("Syncthing %s required", e.minSyncthingVersion)
}
// UpdateSchema updates a possibly outdated database to the current schema and
// also does repairs where necessary.
func UpdateSchema(db *Lowlevel) error {
updater := &schemaUpdater{db}
return updater.updateSchema()
}
type schemaUpdater struct {
*Lowlevel
}
func (db *schemaUpdater) updateSchema() error {
// Updating the schema can touch any and all parts of the database. Make
// sure we do not run GC concurrently with schema migrations.
db.gcMut.Lock()
defer db.gcMut.Unlock()
miscDB := NewMiscDataNamespace(db.Lowlevel)
prevVersion, _, err := miscDB.Int64("dbVersion")
if err != nil {
return err
}
if prevVersion > 0 && prevVersion < 14 {
// This is a database version that is too old to be upgraded directly.
// The user will have to upgrade to an older version first.
return fmt.Errorf("database version %d is too old to be upgraded directly; step via Syncthing v1.27.0 to upgrade", prevVersion)
}
if prevVersion > dbVersion {
err := &databaseDowngradeError{}
if minSyncthingVersion, ok, dbErr := miscDB.String("dbMinSyncthingVersion"); dbErr != nil {
return dbErr
} else if ok {
err.minSyncthingVersion = minSyncthingVersion
}
return err
}
prevMigration, _, err := miscDB.Int64("dbMigrationVersion")
if err != nil {
return err
}
// Cover versions before adding `dbMigrationVersion` (== 0) and possible future weirdness.
if prevMigration < prevVersion {
prevMigration = prevVersion
}
if prevVersion == dbVersion && prevMigration >= dbMigrationVersion {
return nil
}
migrations := []migration{
{14, 14, "v1.9.0", db.updateSchemaTo14},
{14, 16, "v1.9.0", db.checkRepairMigration},
{14, 17, "v1.9.0", db.migration17},
{14, 19, "v1.9.0", db.dropAllIndexIDsMigration},
{14, 20, "v1.9.0", db.dropOutgoingIndexIDsMigration},
}
for _, m := range migrations {
if prevMigration < m.migrationVersion {
l.Infof("Running database migration %d...", m.migrationVersion)
if err := m.migration(int(prevVersion)); err != nil {
return fmt.Errorf("failed to do migration %v: %w", m.migrationVersion, err)
}
if err := db.writeVersions(m, miscDB); err != nil {
return fmt.Errorf("failed to write versions after migration %v: %w", m.migrationVersion, err)
}
}
}
if err := db.writeVersions(migration{
schemaVersion: dbVersion,
migrationVersion: dbMigrationVersion,
minSyncthingVersion: dbMinSyncthingVersion,
}, miscDB); err != nil {
return fmt.Errorf("failed to write versions after migrations: %w", err)
}
l.Infoln("Compacting database after migration...")
return db.Compact()
}
func (*schemaUpdater) writeVersions(m migration, miscDB *NamespacedKV) error {
if err := miscDB.PutInt64("dbVersion", m.schemaVersion); err != nil {
return err
}
if err := miscDB.PutString("dbMinSyncthingVersion", m.minSyncthingVersion); err != nil {
return err
}
if err := miscDB.PutInt64("dbMigrationVersion", m.migrationVersion); err != nil {
return err
}
return nil
}
func (db *schemaUpdater) updateSchemaTo14(_ int) error {
// Checks for missing blocks and marks those entries as requiring a
// rehash/being invalid. The db is checked/repaired afterwards, i.e.
// no care is taken to get metadata and sequences right.
// If the corresponding files changed on disk compared to the global
// version, this will cause a conflict.
var key, gk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
meta := newMetadataTracker(db.keyer, db.evLogger)
meta.counts.Created = 0 // Recalculate metadata afterwards
t, err := db.newReadWriteTransaction(meta.CommitHook(folder))
if err != nil {
return err
}
defer t.close()
key, err = t.keyer.GenerateDeviceFileKey(key, folder, protocol.LocalDeviceID[:], nil)
if err != nil {
return err
}
it, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer it.Release()
for it.Next() {
var bepf bep.FileInfo
if err := proto.Unmarshal(it.Value(), &bepf); err != nil {
return err
}
fi := protocol.FileInfoFromDB(&bepf)
if len(fi.Blocks) > 0 || len(fi.BlocksHash) == 0 {
continue
}
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
_, err := t.Get(key)
if err == nil {
continue
}
fi.SetMustRescan()
if err = t.putFile(it.Key(), fi); err != nil {
return err
}
gk, err = t.keyer.GenerateGlobalVersionKey(gk, folder, []byte(fi.Name))
if err != nil {
return err
}
key, err = t.updateGlobal(gk, key, folder, protocol.LocalDeviceID[:], fi, meta)
if err != nil {
return err
}
}
it.Release()
if err = t.Commit(); err != nil {
return err
}
t.close()
}
return nil
}
func (db *schemaUpdater) checkRepairMigration(_ int) error {
for _, folder := range db.ListFolders() {
_, err := db.getMetaAndCheckGCLocked(folder)
if err != nil {
return err
}
}
return nil
}
// migration17 finds all files that were pulled as invalid from an invalid
// global and make sure they get scanned/pulled again.
func (db *schemaUpdater) migration17(prev int) error {
if prev < 16 {
// Issue was introduced in migration to 16
return nil
}
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
meta, err := db.loadMetadataTracker(folderStr)
if err != nil {
return err
}
batch := NewFileInfoBatch(func(fs []protocol.FileInfo) error {
return db.updateLocalFiles(folder, fs, meta)
})
var innerErr error
err = t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(fi protocol.FileInfo) bool {
if fi.IsInvalid() && fi.FileLocalFlags() == 0 {
fi.SetMustRescan()
fi.Version = protocol.Vector{}
batch.Append(fi)
innerErr = batch.FlushIfFull()
return innerErr == nil
}
return true
})
if innerErr != nil {
return innerErr
}
if err != nil {
return err
}
if err := batch.Flush(); err != nil {
return err
}
}
return nil
}
func (db *schemaUpdater) dropAllIndexIDsMigration(_ int) error {
return db.dropIndexIDs()
}
func (db *schemaUpdater) dropOutgoingIndexIDsMigration(_ int) error {
return db.dropOtherDeviceIndexIDs()
}

View File

@@ -1,553 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
// Package db provides a set type to track local/remote files with newness
// checks. We must do a certain amount of normalization in here. We will get
// fed paths with either native or wire-format separators and encodings
// depending on who calls us. We transform paths to wire-format (NFC and
// slashes) on the way to the database, and transform to native format
// (varying separator and encoding) on the way back out.
package db
import (
"bytes"
"fmt"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
type FileSet struct {
folder string
db *Lowlevel
meta *metadataTracker
updateMutex sync.Mutex // protects database updates and the corresponding metadata changes
}
// The Iterator is called with either a protocol.FileInfo or a
// FileInfoTruncated (depending on the method) and returns true to
// continue iteration, false to stop.
type Iterator func(f protocol.FileInfo) bool
func NewFileSet(folder string, db *Lowlevel) (*FileSet, error) {
select {
case <-db.oneFileSetCreated:
default:
close(db.oneFileSetCreated)
}
meta, err := db.loadMetadataTracker(folder)
if err != nil {
db.handleFailure(err)
return nil, err
}
s := &FileSet{
folder: folder,
db: db,
meta: meta,
updateMutex: sync.NewMutex(),
}
if id := s.IndexID(protocol.LocalDeviceID); id == 0 {
// No index ID set yet. We create one now.
id = protocol.NewIndexID()
err := s.db.setIndexID(protocol.LocalDeviceID[:], []byte(s.folder), id)
if err != nil && !backend.IsClosed(err) {
fatalError(err, fmt.Sprintf("%s Creating new IndexID", s.folder), s.db)
}
}
return s, nil
}
func (s *FileSet) Drop(device protocol.DeviceID) {
opStr := fmt.Sprintf("%s Drop(%v)", s.folder, device)
l.Debugf(opStr)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, s.db)
}
if device == protocol.LocalDeviceID {
s.meta.resetCounts(device)
// We deliberately do not reset the sequence number here. Dropping
// all files for the local device ID only happens in testing - which
// expects the sequence to be retained, like an old Replace() of all
// files would do. However, if we ever did it "in production" we
// would anyway want to retain the sequence for delta indexes to be
// happy.
} else {
// Here, on the other hand, we want to make sure that any file
// announced from the remote is newer than our current sequence
// number.
s.meta.resetAll(device)
}
t, err := s.db.newReadWriteTransaction()
if backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, s.db)
}
defer t.close()
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, s.db)
}
if err := t.Commit(); backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, s.db)
}
}
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
opStr := fmt.Sprintf("%s Update(%v, [%d])", s.folder, device, len(fs))
l.Debugf(opStr)
// do not modify fs in place, it is still used in outer scope
fs = append([]protocol.FileInfo(nil), fs...)
// If one file info is present multiple times, only keep the last.
// Updating the same file multiple times is problematic, because the
// previous updates won't yet be represented in the db when we update it
// again. Additionally even if that problem was taken care of, it would
// be pointless because we remove the previously added file info again
// right away.
fs = normalizeFilenamesAndDropDuplicates(fs)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
if device == protocol.LocalDeviceID {
// For the local device we have a bunch of metadata to track.
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
fatalError(err, opStr, s.db)
}
return
}
// Easy case, just update the files and we're done.
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
fatalError(err, opStr, s.db)
}
}
func (s *FileSet) RemoveLocalItems(items []string) {
opStr := fmt.Sprintf("%s RemoveLocalItems([%d])", s.folder, len(items))
l.Debugf(opStr)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
for i := range items {
items[i] = osutil.NormalizedFilename(items[i])
}
if err := s.db.removeLocalFiles([]byte(s.folder), items, s.meta); err != nil && !backend.IsClosed(err) {
fatalError(err, opStr, s.db)
}
}
type Snapshot struct {
folder string
t readOnlyTransaction
meta *countsMap
fatalError func(error, string)
}
func (s *FileSet) Snapshot() (*Snapshot, error) {
opStr := fmt.Sprintf("%s Snapshot()", s.folder)
l.Debugf(opStr)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
t, err := s.db.newReadOnlyTransaction()
if err != nil {
s.db.handleFailure(err)
return nil, err
}
return &Snapshot{
folder: s.folder,
t: t,
meta: s.meta.Snapshot(),
fatalError: func(err error, opStr string) {
fatalError(err, opStr, s.db)
},
}, nil
}
func (s *Snapshot) Release() {
s.t.close()
}
func (s *Snapshot) WithNeed(device protocol.DeviceID, fn Iterator) {
opStr := fmt.Sprintf("%s WithNeed(%v)", s.folder, device)
l.Debugf(opStr)
if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
opStr := fmt.Sprintf("%s WithNeedTruncated(%v)", s.folder, device)
l.Debugf(opStr)
if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithHave(device protocol.DeviceID, fn Iterator) {
opStr := fmt.Sprintf("%s WithHave(%v)", s.folder, device)
l.Debugf(opStr)
if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
opStr := fmt.Sprintf("%s WithHaveTruncated(%v)", s.folder, device)
l.Debugf(opStr)
if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) {
opStr := fmt.Sprintf("%s WithHaveSequence(%v)", s.folder, startSeq)
l.Debugf(opStr)
if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
// Except for an item with a path equal to prefix, only children of prefix are iterated.
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
func (s *Snapshot) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
opStr := fmt.Sprintf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
l.Debugf(opStr)
if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithGlobal(fn Iterator) {
opStr := fmt.Sprintf("%s WithGlobal()", s.folder)
l.Debugf(opStr)
if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) WithGlobalTruncated(fn Iterator) {
opStr := fmt.Sprintf("%s WithGlobalTruncated()", s.folder)
l.Debugf(opStr)
if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
// Except for an item with a path equal to prefix, only children of prefix are iterated.
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
func (s *Snapshot) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
opStr := fmt.Sprintf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
l.Debugf(opStr)
if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *Snapshot) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
opStr := fmt.Sprintf("%s Get(%v)", s.folder, file)
l.Debugf(opStr)
f, ok, err := s.t.getFile([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
if backend.IsClosed(err) {
return protocol.FileInfo{}, false
} else if err != nil {
s.fatalError(err, opStr)
}
f.Name = osutil.NativeFilename(f.Name)
return f, ok
}
func (s *Snapshot) GetGlobal(file string) (protocol.FileInfo, bool) {
opStr := fmt.Sprintf("%s GetGlobal(%v)", s.folder, file)
l.Debugf(opStr)
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
if backend.IsClosed(err) {
return protocol.FileInfo{}, false
} else if err != nil {
s.fatalError(err, opStr)
}
if !ok {
return protocol.FileInfo{}, false
}
fi.Name = osutil.NativeFilename(fi.Name)
return fi, true
}
func (s *Snapshot) GetGlobalTruncated(file string) (protocol.FileInfo, bool) {
opStr := fmt.Sprintf("%s GetGlobalTruncated(%v)", s.folder, file)
l.Debugf(opStr)
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
if backend.IsClosed(err) {
return protocol.FileInfo{}, false
} else if err != nil {
s.fatalError(err, opStr)
}
if !ok {
return protocol.FileInfo{}, false
}
fi.Name = osutil.NativeFilename(fi.Name)
return fi, true
}
func (s *Snapshot) Availability(file string) []protocol.DeviceID {
opStr := fmt.Sprintf("%s Availability(%v)", s.folder, file)
l.Debugf(opStr)
av, err := s.t.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
if backend.IsClosed(err) {
return nil
} else if err != nil {
s.fatalError(err, opStr)
}
return av
}
func (s *Snapshot) DebugGlobalVersions(file string) *DebugVersionList {
opStr := fmt.Sprintf("%s DebugGlobalVersions(%v)", s.folder, file)
l.Debugf(opStr)
vl, err := s.t.getGlobalVersions(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)))
if backend.IsClosed(err) || backend.IsNotFound(err) {
return nil
} else if err != nil {
s.fatalError(err, opStr)
}
return &DebugVersionList{vl}
}
func (s *Snapshot) Sequence(device protocol.DeviceID) int64 {
return s.meta.Counts(device, 0).Sequence
}
// RemoteSequences returns a map of the sequence numbers seen for each
// remote device sharing this folder.
func (s *Snapshot) RemoteSequences() map[protocol.DeviceID]int64 {
res := make(map[protocol.DeviceID]int64)
for _, device := range s.meta.devices() {
switch device {
case protocol.EmptyDeviceID, protocol.LocalDeviceID, protocol.GlobalDeviceID:
continue
default:
if seq := s.Sequence(device); seq > 0 {
res[device] = seq
}
}
}
return res
}
func (s *Snapshot) LocalSize() Counts {
local := s.meta.Counts(protocol.LocalDeviceID, 0)
return local.Add(s.ReceiveOnlyChangedSize())
}
func (s *Snapshot) ReceiveOnlyChangedSize() Counts {
return s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly)
}
func (s *Snapshot) GlobalSize() Counts {
return s.meta.Counts(protocol.GlobalDeviceID, 0)
}
func (s *Snapshot) NeedSize(device protocol.DeviceID) Counts {
return s.meta.Counts(device, needFlag)
}
func (s *Snapshot) WithBlocksHash(hash []byte, fn Iterator) {
opStr := fmt.Sprintf(`%s WithBlocksHash("%x")`, s.folder, hash)
l.Debugf(opStr)
if err := s.t.withBlocksHash([]byte(s.folder), hash, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
s.fatalError(err, opStr)
}
}
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
return s.meta.Sequence(device)
}
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
opStr := fmt.Sprintf("%s IndexID(%v)", s.folder, device)
l.Debugf(opStr)
id, err := s.db.getIndexID(device[:], []byte(s.folder))
if backend.IsClosed(err) {
return 0
} else if err != nil {
fatalError(err, opStr, s.db)
}
return id
}
func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) {
if device == protocol.LocalDeviceID {
panic("do not explicitly set index ID for local device")
}
opStr := fmt.Sprintf("%s SetIndexID(%v, %v)", s.folder, device, id)
l.Debugf(opStr)
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
fatalError(err, opStr, s.db)
}
}
func (s *FileSet) MtimeOption() fs.Option {
opStr := fmt.Sprintf("%s MtimeOption()", s.folder)
l.Debugf(opStr)
prefix, err := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder))
if backend.IsClosed(err) {
return nil
} else if err != nil {
fatalError(err, opStr, s.db)
}
kv := NewNamespacedKV(s.db, string(prefix))
return fs.NewMtimeOption(kv)
}
func (s *FileSet) ListDevices() []protocol.DeviceID {
return s.meta.devices()
}
func (s *FileSet) RepairSequence() (int, error) {
s.updateAndGCMutexLock() // Ensures consistent locking order
defer s.updateMutex.Unlock()
defer s.db.gcMut.RUnlock()
return s.db.repairSequenceGCLocked(s.folder, s.meta)
}
func (s *FileSet) updateAndGCMutexLock() {
s.updateMutex.Lock()
s.db.gcMut.RLock()
}
// DropFolder clears out all information related to the given folder from the
// database.
func DropFolder(db *Lowlevel, folder string) {
opStr := fmt.Sprintf("DropFolder(%v)", folder)
l.Debugf(opStr)
droppers := []func([]byte) error{
db.dropFolder,
db.dropMtimes,
db.dropFolderMeta,
db.dropFolderIndexIDs,
db.folderIdx.Delete,
}
for _, drop := range droppers {
if err := drop([]byte(folder)); backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, db)
}
}
}
// DropDeltaIndexIDs removes all delta index IDs from the database.
// This will cause a full index transmission on the next connection.
// Must be called before using FileSets, i.e. before NewFileSet is called for
// the first time.
func DropDeltaIndexIDs(db *Lowlevel) {
select {
case <-db.oneFileSetCreated:
panic("DropDeltaIndexIDs must not be called after NewFileSet for the same Lowlevel")
default:
}
opStr := "DropDeltaIndexIDs"
l.Debugf(opStr)
err := db.dropIndexIDs()
if backend.IsClosed(err) {
return
} else if err != nil {
fatalError(err, opStr, db)
}
}
func normalizeFilenamesAndDropDuplicates(fs []protocol.FileInfo) []protocol.FileInfo {
positions := make(map[string]int, len(fs))
for i, f := range fs {
norm := osutil.NormalizedFilename(f.Name)
if pos, ok := positions[norm]; ok {
fs[pos] = protocol.FileInfo{}
}
positions[norm] = i
fs[i].Name = norm
}
for i := 0; i < len(fs); {
if fs[i].Name == "" {
fs = append(fs[:i], fs[i+1:]...)
continue
}
i++
}
return fs
}
func nativeFileIterator(fn Iterator) Iterator {
return func(fi protocol.FileInfo) bool {
fi.Name = osutil.NativeFilename(fi.Name)
return fn(fi)
}
}
func fatalError(err error, opStr string, db *Lowlevel) {
db.checkErrorForRepair(err)
l.Warnf("Fatal error: %v: %v", opStr, err)
panic(ldbPathRe.ReplaceAllString(err.Error(), "$1 x: "))
}
// DebugFileVersion is the database-internal representation of a file
// version, with a nicer string representation, used only by API debug
// methods.
type DebugVersionList struct {
*dbproto.VersionList
}
func (vl DebugVersionList) String() string {
var b bytes.Buffer
var id protocol.DeviceID
b.WriteString("[")
for i, v := range vl.Versions {
if i > 0 {
b.WriteString(", ")
}
fmt.Fprintf(&b, "{Version:%v, Deleted:%v, Devices:[", protocol.VectorFromWire(v.Version), v.Deleted)
for j, dev := range v.Devices {
if j > 0 {
b.WriteString(", ")
}
copy(id[:], dev)
fmt.Fprint(&b, id.Short())
}
b.WriteString("], Invalid:[")
for j, dev := range v.InvalidDevices {
if j > 0 {
b.WriteString(", ")
}
copy(id[:], dev)
fmt.Fprint(&b, id.Short())
}
fmt.Fprint(&b, "]}")
}
b.WriteString("]")
return b.String()
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"testing"
)
func TestSmallIndex(t *testing.T) {
db := newLowlevelMemory(t)
idx := newSmallIndex(db, []byte{12, 34})
// ID zero should be unallocated
if val, ok := idx.Val(0); ok || val != nil {
t.Fatal("Unexpected return for nonexistent ID 0")
}
// A new key should get ID zero
if id, err := idx.ID([]byte("hello")); err != nil {
t.Fatal(err)
} else if id != 0 {
t.Fatal("Expected 0, not", id)
}
// Looking up ID zero should work
if val, ok := idx.Val(0); !ok || string(val) != "hello" {
t.Fatalf(`Expected true, "hello", not %v, %q`, ok, val)
}
// Delete the key
idx.Delete([]byte("hello"))
// Next ID should be one
if id, err := idx.ID([]byte("key2")); err != nil {
t.Fatal(err)
} else if id != 1 {
t.Fatal("Expected 1, not", id)
}
// Now lets create a new index instance based on what's actually serialized to the database.
idx = newSmallIndex(db, []byte{12, 34})
// Status should be about the same as before.
if val, ok := idx.Val(0); ok || val != nil {
t.Fatal("Unexpected return for deleted ID 0")
}
if id, err := idx.ID([]byte("key2")); err != nil {
t.Fatal(err)
} else if id != 1 {
t.Fatal("Expected 1, not", id)
}
// Setting "hello" again should get us ID 2, not 0 as it was originally.
if id, err := idx.ID([]byte("hello")); err != nil {
t.Fatal(err)
} else if id != 2 {
t.Fatal("Expected 2, not", id)
}
}

View File

@@ -1,363 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"fmt"
"strings"
"google.golang.org/protobuf/proto"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/lib/protocol"
)
type CountsSet struct {
Counts []Counts
Created int64 // unix nanos
}
type Counts struct {
Files int
Directories int
Symlinks int
Deleted int
Bytes int64
Sequence int64 // zero for the global state
DeviceID protocol.DeviceID // device ID for remote devices, or special values for local/global
LocalFlags uint32 // the local flag for this count bucket
}
func (c Counts) toWire() *dbproto.Counts {
return &dbproto.Counts{
Files: int32(c.Files),
Directories: int32(c.Directories),
Symlinks: int32(c.Symlinks),
Deleted: int32(c.Deleted),
Bytes: c.Bytes,
Sequence: c.Sequence,
DeviceId: c.DeviceID[:],
LocalFlags: c.LocalFlags,
}
}
func countsFromWire(w *dbproto.Counts) Counts {
return Counts{
Files: int(w.Files),
Directories: int(w.Directories),
Symlinks: int(w.Symlinks),
Deleted: int(w.Deleted),
Bytes: w.Bytes,
Sequence: w.Sequence,
DeviceID: protocol.DeviceID(w.DeviceId),
LocalFlags: w.LocalFlags,
}
}
func (c Counts) Add(other Counts) Counts {
return Counts{
Files: c.Files + other.Files,
Directories: c.Directories + other.Directories,
Symlinks: c.Symlinks + other.Symlinks,
Deleted: c.Deleted + other.Deleted,
Bytes: c.Bytes + other.Bytes,
Sequence: c.Sequence + other.Sequence,
DeviceID: protocol.EmptyDeviceID,
LocalFlags: c.LocalFlags | other.LocalFlags,
}
}
func (c Counts) TotalItems() int {
return c.Files + c.Directories + c.Symlinks + c.Deleted
}
func (c Counts) String() string {
var flags strings.Builder
if c.LocalFlags&needFlag != 0 {
flags.WriteString("Need")
}
if c.LocalFlags&protocol.FlagLocalIgnored != 0 {
flags.WriteString("Ignored")
}
if c.LocalFlags&protocol.FlagLocalMustRescan != 0 {
flags.WriteString("Rescan")
}
if c.LocalFlags&protocol.FlagLocalReceiveOnly != 0 {
flags.WriteString("Recvonly")
}
if c.LocalFlags&protocol.FlagLocalUnsupported != 0 {
flags.WriteString("Unsupported")
}
if c.LocalFlags != 0 {
flags.WriteString(fmt.Sprintf("(%x)", c.LocalFlags))
}
if flags.Len() == 0 {
flags.WriteString("---")
}
return fmt.Sprintf("{Device:%v, Files:%d, Dirs:%d, Symlinks:%d, Del:%d, Bytes:%d, Seq:%d, Flags:%s}", c.DeviceID, c.Files, c.Directories, c.Symlinks, c.Deleted, c.Bytes, c.Sequence, flags.String())
}
// Equal compares the numbers only, not sequence/dev/flags.
func (c Counts) Equal(o Counts) bool {
return c.Files == o.Files && c.Directories == o.Directories && c.Symlinks == o.Symlinks && c.Deleted == o.Deleted && c.Bytes == o.Bytes
}
// update brings the VersionList up to date with file. It returns the updated
// VersionList, a device that has the global/newest version, a device that previously
// had the global/newest version, a boolean indicating if the global version has
// changed and if any error occurred (only possible in db interaction).
func vlUpdate(vl *dbproto.VersionList, folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (*dbproto.FileVersion, *dbproto.FileVersion, *dbproto.FileVersion, bool, bool, bool, error) {
if len(vl.Versions) == 0 {
nv := newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted())
vl.Versions = append(vl.Versions, nv)
return nv, nil, nil, false, false, true, nil
}
// Get the current global (before updating)
oldFV, haveOldGlobal := vlGetGlobal(vl)
oldFV = fvCopy(oldFV)
// Remove ourselves first
removedFV, haveRemoved, _ := vlPop(vl, device)
// Find position and insert the file
err := vlInsert(vl, folder, device, file, t)
if err != nil {
return nil, nil, nil, false, false, false, err
}
newFV, _ := vlGetGlobal(vl) // We just inserted something above, can't be empty
if !haveOldGlobal {
return newFV, nil, removedFV, false, haveRemoved, true, nil
}
globalChanged := true
if fvIsInvalid(oldFV) == fvIsInvalid(newFV) && protocol.VectorFromWire(oldFV.Version).Equal(protocol.VectorFromWire(newFV.Version)) {
globalChanged = false
}
return newFV, oldFV, removedFV, true, haveRemoved, globalChanged, nil
}
func vlInsert(vl *dbproto.VersionList, folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) error {
var added bool
var err error
i := 0
for ; i < len(vl.Versions); i++ {
// Insert our new version
added, err = vlCheckInsertAt(vl, i, folder, device, file, t)
if err != nil {
return err
}
if added {
break
}
}
if i == len(vl.Versions) {
// Append to the end
vl.Versions = append(vl.Versions, newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted()))
}
return nil
}
func vlInsertAt(vl *dbproto.VersionList, i int, v *dbproto.FileVersion) {
vl.Versions = append(vl.Versions, &dbproto.FileVersion{})
copy(vl.Versions[i+1:], vl.Versions[i:])
vl.Versions[i] = v
}
// pop removes the given device from the VersionList and returns the FileVersion
// before removing the device, whether it was found/removed at all and whether
// the global changed in the process.
func vlPop(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool, bool) {
invDevice, i, j, ok := vlFindDevice(vl, device)
if !ok {
return nil, false, false
}
globalPos := vlFindGlobal(vl)
fv := vl.Versions[i]
if fvDeviceCount(fv) == 1 {
vlPopVersionAt(vl, i)
return fv, true, globalPos == i
}
oldFV := fvCopy(fv)
if invDevice {
vl.Versions[i].InvalidDevices = popDeviceAt(vl.Versions[i].InvalidDevices, j)
return oldFV, true, false
}
vl.Versions[i].Devices = popDeviceAt(vl.Versions[i].Devices, j)
// If the last valid device of the previous global was removed above,
// the global changed.
return oldFV, true, len(vl.Versions[i].Devices) == 0 && globalPos == i
}
// Get returns a FileVersion that contains the given device and whether it has
// been found at all.
func vlGet(vl *dbproto.VersionList, device []byte) (*dbproto.FileVersion, bool) {
_, i, _, ok := vlFindDevice(vl, device)
if !ok {
return &dbproto.FileVersion{}, false
}
return vl.Versions[i], true
}
// GetGlobal returns the current global FileVersion. The returned FileVersion
// may be invalid, if all FileVersions are invalid. Returns false only if
// VersionList is empty.
func vlGetGlobal(vl *dbproto.VersionList) (*dbproto.FileVersion, bool) {
i := vlFindGlobal(vl)
if i == -1 {
return nil, false
}
return vl.Versions[i], true
}
// findGlobal returns the first version that isn't invalid, or if all versions are
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
func vlFindGlobal(vl *dbproto.VersionList) int {
for i := range vl.Versions {
if !fvIsInvalid(vl.Versions[i]) {
return i
}
}
if len(vl.Versions) == 0 {
return -1
}
return 0
}
// findDevice returns whether the device is in InvalidVersions or Versions and
// in InvalidDevices or Devices (true for invalid), the positions in the version
// and device slices and whether it has been found at all.
func vlFindDevice(vl *dbproto.VersionList, device []byte) (bool, int, int, bool) {
for i, v := range vl.Versions {
if j := deviceIndex(v.Devices, device); j != -1 {
return false, i, j, true
}
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
return true, i, j, true
}
}
return false, -1, -1, false
}
func vlPopVersionAt(vl *dbproto.VersionList, i int) {
vl.Versions = append(vl.Versions[:i], vl.Versions[i+1:]...)
}
// checkInsertAt determines if the given device and associated file should be
// inserted into the FileVersion at position i or into a new FileVersion at
// position i.
func vlCheckInsertAt(vl *dbproto.VersionList, i int, folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (bool, error) {
fv := vl.Versions[i]
ordering := protocol.VectorFromWire(fv.Version).Compare(file.FileVersion())
if ordering == protocol.Equal {
if !file.IsInvalid() {
fv.Devices = append(fv.Devices, device)
} else {
fv.InvalidDevices = append(fv.InvalidDevices, device)
}
return true, nil
}
existingDevice, _ := fvFirstDevice(fv)
insert, err := shouldInsertBefore(ordering, folder, existingDevice, fvIsInvalid(fv), file, t)
if err != nil {
return false, err
}
if insert {
vlInsertAt(vl, i, newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted()))
return true, nil
}
return false, nil
}
// shouldInsertBefore determines whether the file comes before an existing
// entry, given the version ordering (existing compared to new one), existing
// device and if the existing version is invalid.
func shouldInsertBefore(ordering protocol.Ordering, folder, existingDevice []byte, existingInvalid bool, file protocol.FileInfo, t readOnlyTransaction) (bool, error) {
switch ordering {
case protocol.Lesser:
// The version at this point in the list is lesser
// ("older") than us. We insert ourselves in front of it.
return true, nil
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
// The version in conflict with us.
// Check if we can shortcut due to one being invalid.
if existingInvalid != file.IsInvalid() {
return existingInvalid, nil
}
// We must pull the actual file metadata to determine who wins.
// If we win, we insert ourselves in front of the loser here.
// (The "Lesser" and "Greater" in the condition above is just
// based on the device IDs in the version vector, which is not
// the only thing we use to determine the winner.)
of, ok, err := t.getFile(folder, existingDevice, []byte(file.FileName()))
if err != nil {
return false, err
}
// A surprise missing file entry here is counted as a win for us.
if !ok {
return true, nil
}
if file.WinsConflict(of) {
return true, nil
}
}
return false, nil
}
func deviceIndex(devices [][]byte, device []byte) int {
for i, dev := range devices {
if bytes.Equal(device, dev) {
return i
}
}
return -1
}
func popDeviceAt(devices [][]byte, i int) [][]byte {
return append(devices[:i], devices[i+1:]...)
}
func newFileVersion(device []byte, version protocol.Vector, invalid, deleted bool) *dbproto.FileVersion {
fv := &dbproto.FileVersion{
Version: version.ToWire(),
Deleted: deleted,
}
if invalid {
fv.InvalidDevices = [][]byte{device}
} else {
fv.Devices = [][]byte{device}
}
return fv
}
func fvFirstDevice(fv *dbproto.FileVersion) ([]byte, bool) {
if len(fv.Devices) != 0 {
return fv.Devices[0], true
}
if len(fv.InvalidDevices) != 0 {
return fv.InvalidDevices[0], true
}
return nil, false
}
func fvIsInvalid(fv *dbproto.FileVersion) bool {
return fv == nil || len(fv.Devices) == 0
}
func fvDeviceCount(fv *dbproto.FileVersion) int {
return len(fv.Devices) + len(fv.InvalidDevices)
}
func fvCopy(fv *dbproto.FileVersion) *dbproto.FileVersion {
return proto.Clone(fv).(*dbproto.FileVersion)
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,232 +0,0 @@
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"encoding/json"
"errors"
"io"
"os"
"testing"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
)
// writeJSONS serializes the database to a JSON stream that can be checked
// in to the repo and used for tests.
func writeJSONS(w io.Writer, db backend.Backend) {
it, err := db.NewPrefixIterator(nil)
if err != nil {
panic(err)
}
defer it.Release()
enc := json.NewEncoder(w)
for it.Next() {
err := enc.Encode(map[string][]byte{
"k": it.Key(),
"v": it.Value(),
})
if err != nil {
panic(err)
}
}
}
// we know this function isn't generally used, nonetheless we want it in
// here and the linter to not complain.
var _ = writeJSONS
// openJSONS reads a JSON stream file into a backend DB
func openJSONS(file string) (backend.Backend, error) {
fd, err := os.Open(file)
if err != nil {
return nil, err
}
dec := json.NewDecoder(fd)
db := backend.OpenMemory()
for {
var row map[string][]byte
err := dec.Decode(&row)
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if err := db.Put(row["k"], row["v"]); err != nil {
return nil, err
}
}
return db, nil
}
func newLowlevel(t testing.TB, backend backend.Backend) *Lowlevel {
t.Helper()
ll, err := NewLowlevel(backend, events.NoopLogger)
if err != nil {
t.Fatal(err)
}
return ll
}
func newLowlevelMemory(t testing.TB) *Lowlevel {
return newLowlevel(t, backend.OpenMemory())
}
func newFileSet(t testing.TB, folder string, db *Lowlevel) *FileSet {
t.Helper()
fset, err := NewFileSet(folder, db)
if err != nil {
t.Fatal(err)
}
return fset
}
func snapshot(t testing.TB, fset *FileSet) *Snapshot {
t.Helper()
snap, err := fset.Snapshot()
if err != nil {
t.Fatal(err)
}
return snap
}
// The following commented tests were used to generate jsons files to stdout for
// future tests and are kept here for reference (reuse).
// TestGenerateIgnoredFilesDB generates a database with files with invalid flags,
// local and remote, in the format used in 0.14.48.
// func TestGenerateIgnoredFilesDB(t *testing.T) {
// db := OpenMemory()
// fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{
// { // invalid (ignored) file
// Name: "foo",
// Type: protocol.FileInfoTypeFile,
// Invalid: true,
// Version: protocol.Vector{Counters: []protocol.Counter{{ID: 1, Value: 1000}}},
// },
// { // regular file
// Name: "bar",
// Type: protocol.FileInfoTypeFile,
// Version: protocol.Vector{Counters: []protocol.Counter{{ID: 1, Value: 1001}}},
// },
// })
// fs.Update(protocol.DeviceID{42}, []protocol.FileInfo{
// { // invalid file
// Name: "baz",
// Type: protocol.FileInfoTypeFile,
// Invalid: true,
// Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1000}}},
// },
// { // regular file
// Name: "quux",
// Type: protocol.FileInfoTypeFile,
// Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1002}}},
// },
// })
// writeJSONS(os.Stdout, db.DB)
// }
// TestGenerateUpdate0to3DB generates a database with files with invalid flags, prefixed
// by a slash and other files to test database migration from version 0 to 3, in the
// format used in 0.14.45.
// func TestGenerateUpdate0to3DB(t *testing.T) {
// db := OpenMemory()
// fs := newFileSet(t, update0to3Folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// for devID, files := range haveUpdate0to3 {
// fs.Update(devID, files)
// }
// writeJSONS(os.Stdout, db.DB)
// }
// func TestGenerateUpdateTo10(t *testing.T) {
// db := newLowlevelMemory(t)
// defer db.Close()
// if err := UpdateSchema(db); err != nil {
// t.Fatal(err)
// }
// fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), db)
// files := []protocol.FileInfo{
// {Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Deleted: true, Sequence: 1},
// {Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2), Sequence: 2},
// {Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Deleted: true, Sequence: 3},
// }
// fs.Update(protocol.LocalDeviceID, files)
// files[1].Version = files[1].Version.Update(remoteDevice0.Short())
// files[1].Deleted = true
// files[2].Version = files[2].Version.Update(remoteDevice0.Short())
// files[2].Blocks = genBlocks(1)
// files[2].Deleted = false
// fs.Update(remoteDevice0, files)
// fd, err := os.Create("./testdata/v1.4.0-updateTo10.json")
// if err != nil {
// panic(err)
// }
// defer fd.Close()
// writeJSONS(fd, db)
// }
func TestFileInfoBatchError(t *testing.T) {
// Verify behaviour of the flush function returning an error.
var errReturn error
var called int
b := NewFileInfoBatch(func([]protocol.FileInfo) error {
called += 1
return errReturn
})
// Flush should work when the flush function error is nil
b.Append(protocol.FileInfo{Name: "test"})
if err := b.Flush(); err != nil {
t.Fatalf("expected nil, got %v", err)
}
if called != 1 {
t.Fatalf("expected 1, got %d", called)
}
// Flush should fail with an error retur
errReturn = errors.New("problem")
b.Append(protocol.FileInfo{Name: "test"})
if err := b.Flush(); err != errReturn {
t.Fatalf("expected %v, got %v", errReturn, err)
}
if called != 2 {
t.Fatalf("expected 2, got %d", called)
}
// Flush function should not be called again when it's already errored,
// same error should be returned by Flush()
if err := b.Flush(); err != errReturn {
t.Fatalf("expected %v, got %v", errReturn, err)
}
if called != 2 {
t.Fatalf("expected 2, got %d", called)
}
// Reset should clear the error (and the file list)
errReturn = nil
b.Reset()
b.Append(protocol.FileInfo{Name: "test"})
if err := b.Flush(); err != nil {
t.Fatalf("expected nil, got %v", err)
}
if called != 3 {
t.Fatalf("expected 3, got %d", called)
}
}

View File

@@ -189,7 +189,7 @@ func TestRepro9677MissingMtimeFS(t *testing.T) {
testTime := time.Unix(1723491493, 123456789)
// Create a file with an mtime FS entry
firstFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB))
firstFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB, ""))
// Create a file, set its mtime and check that we get the expected mtime when stat-ing.
file, err := firstFS.Create(name)
@@ -231,6 +231,6 @@ func TestRepro9677MissingMtimeFS(t *testing.T) {
// be without mtime, even if requested:
NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{})
newFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB))
newFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB, ""))
checkMtime(newFS)
}

View File

@@ -7,21 +7,21 @@
package fs
import (
"errors"
"time"
)
// The database is where we store the virtual mtimes
type database interface {
Bytes(key string) (data []byte, ok bool, err error)
PutBytes(key string, data []byte) error
Delete(key string) error
GetMtime(folder, name string) (ondisk, virtual time.Time)
PutMtime(folder, name string, ondisk, virtual time.Time) error
DeleteMtime(folder, name string) error
}
type mtimeFS struct {
Filesystem
chtimes func(string, time.Time, time.Time) error
db database
folderID string
caseInsensitive bool
}
@@ -34,16 +34,18 @@ func WithCaseInsensitivity(v bool) MtimeFSOption {
}
type optionMtime struct {
db database
options []MtimeFSOption
db database
folderID string
options []MtimeFSOption
}
// NewMtimeOption makes any filesystem provide nanosecond mtime precision,
// regardless of what shenanigans the underlying filesystem gets up to.
func NewMtimeOption(db database, options ...MtimeFSOption) Option {
func NewMtimeOption(db database, folderID string, options ...MtimeFSOption) Option {
return &optionMtime{
db: db,
options: options,
db: db,
folderID: folderID,
options: options,
}
}
@@ -52,6 +54,7 @@ func (o *optionMtime) apply(fs Filesystem) Filesystem {
Filesystem: fs,
chtimes: fs.Chtimes, // for mocking it out in the tests
db: o.db,
folderID: o.folderID,
}
for _, opt := range o.options {
opt(f)
@@ -84,14 +87,11 @@ func (f *mtimeFS) Stat(name string) (FileInfo, error) {
return nil, err
}
mtimeMapping, err := f.load(name)
if err != nil {
return nil, err
}
if mtimeMapping.Real.Equal(info.ModTime()) {
ondisk, virtual := f.load(name)
if ondisk.Equal(info.ModTime()) {
info = mtimeFileInfo{
FileInfo: info,
mtime: mtimeMapping.Virtual,
mtime: virtual,
}
}
@@ -104,14 +104,11 @@ func (f *mtimeFS) Lstat(name string) (FileInfo, error) {
return nil, err
}
mtimeMapping, err := f.load(name)
if err != nil {
return nil, err
}
if mtimeMapping.Real.Equal(info.ModTime()) {
ondisk, virtual := f.load(name)
if ondisk.Equal(info.ModTime()) {
info = mtimeFileInfo{
FileInfo: info,
mtime: mtimeMapping.Virtual,
mtime: virtual,
}
}
@@ -150,43 +147,27 @@ func (*mtimeFS) wrapperType() filesystemWrapperType {
return filesystemWrapperTypeMtime
}
func (f *mtimeFS) save(name string, real, virtual time.Time) {
func (f *mtimeFS) save(name string, ondisk, virtual time.Time) {
if f.caseInsensitive {
name = UnicodeLowercaseNormalized(name)
}
if real.Equal(virtual) {
if ondisk.Equal(virtual) {
// If the virtual time and the real on disk time are equal we don't
// need to store anything.
f.db.Delete(name)
_ = f.db.DeleteMtime(f.folderID, name)
return
}
mtime := MtimeMapping{
Real: real,
Virtual: virtual,
}
bs, _ := mtime.Marshal() // Can't fail
f.db.PutBytes(name, bs)
_ = f.db.PutMtime(f.folderID, name, ondisk, virtual)
}
func (f *mtimeFS) load(name string) (MtimeMapping, error) {
func (f *mtimeFS) load(name string) (ondisk, virtual time.Time) {
if f.caseInsensitive {
name = UnicodeLowercaseNormalized(name)
}
data, exists, err := f.db.Bytes(name)
if err != nil {
return MtimeMapping{}, err
} else if !exists {
return MtimeMapping{}, nil
}
var mtime MtimeMapping
if err := mtime.Unmarshal(data); err != nil {
return MtimeMapping{}, err
}
return mtime, nil
return f.db.GetMtime(f.folderID, name)
}
// The mtimeFileInfo is an os.FileInfo that lies about the ModTime().
@@ -211,14 +192,11 @@ func (f mtimeFile) Stat() (FileInfo, error) {
return nil, err
}
mtimeMapping, err := f.fs.load(f.Name())
if err != nil {
return nil, err
}
if mtimeMapping.Real.Equal(info.ModTime()) {
ondisk, virtual := f.fs.load(f.Name())
if ondisk.Equal(info.ModTime()) {
info = mtimeFileInfo{
FileInfo: info,
mtime: mtimeMapping.Virtual,
mtime: virtual,
}
}
@@ -230,38 +208,14 @@ func (f mtimeFile) unwrap() File {
return f.File
}
// MtimeMapping represents the mapping as stored in the database
type MtimeMapping struct {
// "Real" is the on disk timestamp
Real time.Time `json:"real"`
// "Virtual" is what want the timestamp to be
Virtual time.Time `json:"virtual"`
}
func (t *MtimeMapping) Marshal() ([]byte, error) {
bs0, _ := t.Real.MarshalBinary()
bs1, _ := t.Virtual.MarshalBinary()
return append(bs0, bs1...), nil
}
func (t *MtimeMapping) Unmarshal(bs []byte) error {
if err := t.Real.UnmarshalBinary(bs[:len(bs)/2]); err != nil {
return err
}
if err := t.Virtual.UnmarshalBinary(bs[len(bs)/2:]); err != nil {
return err
}
return nil
}
func GetMtimeMapping(fs Filesystem, file string) (MtimeMapping, error) {
func GetMtimeMapping(fs Filesystem, file string) (ondisk, virtual time.Time) {
fs, ok := unwrapFilesystem(fs, filesystemWrapperTypeMtime)
if !ok {
return MtimeMapping{}, errors.New("failed to unwrap")
return time.Time{}, time.Time{}
}
mtimeFs, ok := fs.(*mtimeFS)
if !ok {
return MtimeMapping{}, errors.New("unwrapping failed")
return time.Time{}, time.Time{}
}
return mtimeFs.load(file)
}

Some files were not shown because too many files have changed in this diff Show More