mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-07 21:39:18 -05:00
Compare commits
31 Commits
v2.0.0-bet
...
v2.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
95b39a791d | ||
|
|
fa0d933e49 | ||
|
|
e0c1abc5fe | ||
|
|
8372c0288f | ||
|
|
5f5d672a7d | ||
|
|
d23cd197e1 | ||
|
|
d7ca483df1 | ||
|
|
e48be98cd5 | ||
|
|
cbded11c43 | ||
|
|
d5aa991b73 | ||
|
|
05210d0325 | ||
|
|
55da878452 | ||
|
|
e9a2ff3aa6 | ||
|
|
c9650fc7d5 | ||
|
|
cf1cf85ce6 | ||
|
|
2301f72c5b | ||
|
|
7d51b1b620 | ||
|
|
f7c8efd93c | ||
|
|
3e7ccf7c48 | ||
|
|
fa3b9acca3 | ||
|
|
bae976905c | ||
|
|
6bc2784e9a | ||
|
|
1dbdd6b720 | ||
|
|
f15d50c2e8 | ||
|
|
8a2d8ebf81 | ||
|
|
b88aea34b6 | ||
|
|
82a0dd8eaa | ||
|
|
4096a35b86 | ||
|
|
86cbc2486f | ||
|
|
0bcc31d058 | ||
|
|
f9007ed106 |
64
.github/workflows/build-syncthing.yaml
vendored
64
.github/workflows/build-syncthing.yaml
vendored
@@ -734,15 +734,12 @@ jobs:
|
||||
- name: Push artifacts
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
with:
|
||||
args: sync packages objstore:${{ secrets.S3_BUCKET }}/nightly
|
||||
args: sync -v packages objstore:nightly
|
||||
|
||||
#
|
||||
# Push release artifacts to Spaces
|
||||
@@ -788,28 +785,22 @@ jobs:
|
||||
- name: Push to object store (${{ env.VERSION }})
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: cool
|
||||
with:
|
||||
args: sync packages objstore:${{ secrets.S3_BUCKET }}/release/${{ env.VERSION }}
|
||||
args: sync -v packages objstore:release/${{ env.VERSION }}
|
||||
|
||||
- name: Push to object store (latest)
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
with:
|
||||
args: sync objstore:${{ secrets.S3_BUCKET }}/release/${{ env.VERSION }} objstore:${{ secrets.S3_BUCKET }}/release/latest
|
||||
args: sync -v objstore:release/${{ env.VERSION }} objstore:release/latest
|
||||
|
||||
#
|
||||
# Push Debian/APT archive
|
||||
@@ -858,15 +849,11 @@ jobs:
|
||||
- name: Pull archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
with:
|
||||
args: sync objstore:syncthing-apt/dists dists
|
||||
args: sync objstore:apt/dists dists
|
||||
|
||||
- name: Update archive
|
||||
uses: docker://ghcr.io/kastelo/ezapt:latest
|
||||
@@ -881,15 +868,12 @@ jobs:
|
||||
- name: Push archive
|
||||
uses: docker://docker.io/rclone/rclone:latest
|
||||
env:
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: s3
|
||||
RCLONE_CONFIG_OBJSTORE_PROVIDER: ${{ secrets.S3_PROVIDER }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }}
|
||||
RCLONE_CONFIG_OBJSTORE_SECRET_ACCESS_KEY: ${{ secrets.S3_SECRET_ACCESS_KEY }}
|
||||
RCLONE_CONFIG_OBJSTORE_ENDPOINT: ${{ secrets.S3_ENDPOINT }}
|
||||
RCLONE_CONFIG_OBJSTORE_REGION: ${{ secrets.S3_REGION }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACL: public-read
|
||||
RCLONE_CONFIG_OBJSTORE_TYPE: ${{ secrets.AZUREBLOB_TYPE }}
|
||||
RCLONE_CONFIG_OBJSTORE_ACCOUNT: ${{ secrets.AZUREBLOB_ACCOUNT }}
|
||||
RCLONE_CONFIG_OBJSTORE_KEY: ${{ secrets.AZUREBLOB_KEY }}
|
||||
RCLONE_AZUREBLOB_ACCESS_TIER: hot
|
||||
with:
|
||||
args: sync dists -v objstore:syncthing-apt/dists
|
||||
args: sync -v dists objstore:apt/dists
|
||||
|
||||
#
|
||||
# Build and push to Docker Hub
|
||||
|
||||
1
AUTHORS
1
AUTHORS
@@ -324,6 +324,7 @@ Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Sven Bachmann <dev@mcbachmann.de>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Sébastien WENSKE <sebastien@wenske.fr>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
|
||||
3
build.go
3
build.go
@@ -628,6 +628,9 @@ func buildDeb(target target) {
|
||||
// than just 0.14.26. This rectifies that.
|
||||
debver = strings.Replace(debver, "-", "~", -1)
|
||||
}
|
||||
if strings.Contains(debver, "_") {
|
||||
debver = strings.Replace(debver, "_", "~", -1)
|
||||
}
|
||||
args := []string{
|
||||
"-t", "deb",
|
||||
"-s", "dir",
|
||||
|
||||
@@ -10,7 +10,7 @@ to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
servers. If you are looking to set up a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
|
||||
@@ -14,13 +14,10 @@ import (
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
cmdutil.DirOptions
|
||||
|
||||
GUIAddress string `name:"gui-address" env:"STGUIADDRESS"`
|
||||
GUIAPIKey string `name:"gui-apikey" env:"STGUIAPIKEY"`
|
||||
|
||||
@@ -37,11 +34,6 @@ type Context struct {
|
||||
}
|
||||
|
||||
func (cli CLI) AfterApply(kongCtx *kong.Context) error {
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(cli.HomeDir, cli.ConfDir, cli.DataDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command line options: %w", err)
|
||||
}
|
||||
|
||||
clientFactory := &apiClientFactory{
|
||||
cfg: config.GUIConfiguration{
|
||||
RawAddress: cli.GUIAddress,
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
// DirOptions are reused among several subcommands
|
||||
type DirOptions struct {
|
||||
ConfDir string `name:"config" short:"C" placeholder:"PATH" env:"STCONFDIR" help:"Set configuration directory (config and keys)"`
|
||||
DataDir string `name:"data" short:"D" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
|
||||
HomeDir string `name:"home" short:"H" placeholder:"PATH" env:"STHOMEDIR" help:"Set configuration and data directory"`
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
func SetConfigDataLocationsFromFlags(homeDir, confDir, dataDir string) error {
|
||||
homeSet := homeDir != ""
|
||||
confSet := confDir != ""
|
||||
dataSet := dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
return errors.New("either both or none of --config and --data must be given, use --home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
return errors.New("--home must not be used together with --config and --data")
|
||||
case homeSet:
|
||||
confDir = homeDir
|
||||
dataDir = homeDir
|
||||
fallthrough
|
||||
case dataSet:
|
||||
if err := locations.SetBaseDir(locations.ConfigBaseDir, confDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return locations.SetBaseDir(locations.DataBaseDir, dataDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,11 +11,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
@@ -26,7 +24,6 @@ import (
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
cmdutil.DirOptions
|
||||
GUIUser string `placeholder:"STRING" help:"Specify new GUI authentication user name"`
|
||||
GUIPassword string `placeholder:"STRING" help:"Specify new GUI authentication password (use - to read from standard input)"`
|
||||
NoDefaultFolder bool `help:"Don't create the \"default\" folder on first startup" env:"STNODEFAULTFOLDER"`
|
||||
@@ -34,16 +31,6 @@ type CLI struct {
|
||||
}
|
||||
|
||||
func (c *CLI) Run(l logger.Logger) error {
|
||||
if c.HomeDir != "" {
|
||||
if c.ConfDir != "" {
|
||||
return errors.New("--home must not be used together with --config")
|
||||
}
|
||||
c.ConfDir = c.HomeDir
|
||||
}
|
||||
if c.ConfDir == "" {
|
||||
c.ConfDir = locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
}
|
||||
|
||||
// Support reading the password from a pipe or similar
|
||||
if c.GUIPassword == "-" {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
@@ -54,7 +41,7 @@ func (c *CLI) Run(l logger.Logger) error {
|
||||
c.GUIPassword = string(password)
|
||||
}
|
||||
|
||||
if err := Generate(l, c.ConfDir, c.GUIUser, c.GUIPassword, c.NoDefaultFolder, c.NoPortProbing); err != nil {
|
||||
if err := Generate(l, locations.GetBaseDir(locations.ConfigBaseDir), c.GUIUser, c.GUIPassword, c.NoDefaultFolder, c.NoPortProbing); err != nil {
|
||||
return fmt.Errorf("failed to generate config and keys: %w", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"github.com/willabides/kongplete"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cli"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/generate"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
@@ -128,9 +127,17 @@ var (
|
||||
// The entrypoint struct is the main entry point for the command line parser. The
|
||||
// commands and options here are top level commands to syncthing.
|
||||
// Cli is just a placeholder for the help text (see main).
|
||||
var entrypoint struct {
|
||||
Serve serveOptions `cmd:"" help:"Run Syncthing (default)" default:"withargs"`
|
||||
CLI cli.CLI `cmd:"" help:"Command line interface for Syncthing"`
|
||||
type CLI struct {
|
||||
// The directory options are defined at top level and available for all
|
||||
// subcommands. Their settings take effect on the `locations` package by
|
||||
// way of the command line parser, so anything using `locations.Get` etc
|
||||
// will be doing the right thing.
|
||||
ConfDir string `name:"config" short:"C" placeholder:"PATH" env:"STCONFDIR" help:"Set configuration directory (config and keys)"`
|
||||
DataDir string `name:"data" short:"D" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
|
||||
HomeDir string `name:"home" short:"H" placeholder:"PATH" env:"STHOMEDIR" help:"Set configuration and data directory"`
|
||||
|
||||
Serve serveCmd `cmd:"" help:"Run Syncthing (default)" default:"withargs"`
|
||||
CLI cli.CLI `cmd:"" help:"Command line interface for Syncthing"`
|
||||
|
||||
Browser browserCmd `cmd:"" help:"Open GUI in browser, then exit"`
|
||||
Decrypt decrypt.CLI `cmd:"" help:"Decrypt or verify an encrypted folder"`
|
||||
@@ -144,29 +151,35 @@ var entrypoint struct {
|
||||
InstallCompletions kongplete.InstallCompletions `cmd:"" help:"Print commands to install shell completions"`
|
||||
}
|
||||
|
||||
// serveOptions are the options for the `syncthing serve` command.
|
||||
type serveOptions struct {
|
||||
cmdutil.DirOptions
|
||||
func (c *CLI) AfterApply() error {
|
||||
// Executed after parsing command line options but before running actual
|
||||
// subcommands
|
||||
return setConfigDataLocationsFromFlags(c.HomeDir, c.ConfDir, c.DataDir)
|
||||
}
|
||||
|
||||
// serveCmd are the options for the `syncthing serve` command.
|
||||
type serveCmd struct {
|
||||
buildSpecificOptions
|
||||
|
||||
AllowNewerConfig bool `help:"Allow loading newer than current config version" env:"STALLOWNEWERCONFIG"`
|
||||
Audit bool `help:"Write events to audit file" env:"STAUDIT"`
|
||||
AuditFile string `name:"auditfile" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)" placeholder:"PATH" env:"STAUDITFILE"`
|
||||
DBMaintenanceInterval time.Duration `help:"Database maintenance interval" default:"8h" env:"STDBMAINTINTERVAL"`
|
||||
GUIAddress string `name:"gui-address" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")" placeholder:"URL" env:"STGUIADDRESS"`
|
||||
GUIAPIKey string `name:"gui-apikey" help:"Override GUI API key" placeholder:"API-KEY" env:"STGUIAPIKEY"`
|
||||
LogFile string `name:"logfile" help:"Log file name (see below)" default:"${logFile}" placeholder:"PATH" env:"STLOGFILE"`
|
||||
LogFlags int `name:"logflags" help:"Select information in log line prefix (see below)" default:"${logFlags}" placeholder:"BITS" env:"STLOGFLAGS"`
|
||||
LogMaxFiles int `name:"log-max-old-files" help:"Number of old files to keep (zero to keep only current)" default:"${logMaxFiles}" placeholder:"N" env:"STNUMLOGFILES"`
|
||||
LogMaxSize int `help:"Maximum size of any file (zero to disable log rotation)" default:"${logMaxSize}" placeholder:"BYTES" env:"STLOGMAXSIZE"`
|
||||
NoBrowser bool `help:"Do not start browser" env:"STNOBROWSER"`
|
||||
NoDefaultFolder bool `help:"Don't create the \"default\" folder on first startup" env:"STNODEFAULTFOLDER"`
|
||||
NoPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup" env:"STNOPORTPROBING"`
|
||||
NoRestart bool `help:"Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash" env:"STNORESTART"`
|
||||
NoUpgrade bool `help:"Disable automatic upgrades" env:"STNOUPGRADE"`
|
||||
Paused bool `help:"Start with all devices and folders paused" env:"STPAUSED"`
|
||||
Unpaused bool `help:"Start with all devices and folders unpaused" env:"STUNPAUSED"`
|
||||
Verbose bool `help:"Print verbose log output" env:"STVERBOSE"`
|
||||
AllowNewerConfig bool `help:"Allow loading newer than current config version" env:"STALLOWNEWERCONFIG"`
|
||||
Audit bool `help:"Write events to audit file" env:"STAUDIT"`
|
||||
AuditFile string `name:"auditfile" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)" placeholder:"PATH" env:"STAUDITFILE"`
|
||||
DBMaintenanceInterval time.Duration `help:"Database maintenance interval" default:"8h" env:"STDBMAINTENANCEINTERVAL"`
|
||||
DBDeleteRetentionInterval time.Duration `help:"Database deleted item retention interval" default:"4320h" env:"STDBDELETERETENTIONINTERVAL"`
|
||||
GUIAddress string `name:"gui-address" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")" placeholder:"URL" env:"STGUIADDRESS"`
|
||||
GUIAPIKey string `name:"gui-apikey" help:"Override GUI API key" placeholder:"API-KEY" env:"STGUIAPIKEY"`
|
||||
LogFile string `name:"logfile" help:"Log file name (see below)" default:"${logFile}" placeholder:"PATH" env:"STLOGFILE"`
|
||||
LogFlags int `name:"logflags" help:"Select information in log line prefix (see below)" default:"${logFlags}" placeholder:"BITS" env:"STLOGFLAGS"`
|
||||
LogMaxFiles int `name:"log-max-old-files" help:"Number of old files to keep (zero to keep only current)" default:"${logMaxFiles}" placeholder:"N" env:"STLOGMAXOLDFILES"`
|
||||
LogMaxSize int `help:"Maximum size of any file (zero to disable log rotation)" default:"${logMaxSize}" placeholder:"BYTES" env:"STLOGMAXSIZE"`
|
||||
NoBrowser bool `help:"Do not start browser" env:"STNOBROWSER"`
|
||||
NoDefaultFolder bool `help:"Don't create the \"default\" folder on first startup" env:"STNODEFAULTFOLDER"`
|
||||
NoPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup" env:"STNOPORTPROBING"`
|
||||
NoRestart bool `help:"Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash" env:"STNORESTART"`
|
||||
NoUpgrade bool `help:"Disable automatic upgrades" env:"STNOUPGRADE"`
|
||||
Paused bool `help:"Start with all devices and folders paused" env:"STPAUSED"`
|
||||
Unpaused bool `help:"Start with all devices and folders unpaused" env:"STUNPAUSED"`
|
||||
Verbose bool `help:"Print verbose log output" env:"STVERBOSE"`
|
||||
|
||||
// Debug options below
|
||||
DebugGUIAssetsDir string `help:"Directory to load GUI assets from" placeholder:"PATH" env:"STGUIASSETS"`
|
||||
@@ -209,6 +222,7 @@ func defaultVars() kong.Vars {
|
||||
func main() {
|
||||
// Create a parser with an overridden help function to print our extra
|
||||
// help info.
|
||||
var entrypoint CLI
|
||||
parser, err := kong.New(
|
||||
&entrypoint,
|
||||
kong.ConfigureHelp(kong.HelpOptions{
|
||||
@@ -242,46 +256,39 @@ func helpHandler(options kong.HelpOptions, ctx *kong.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveOptions.Run() is the entrypoint for `syncthing serve`
|
||||
func (options serveOptions) Run() error {
|
||||
l.SetFlags(options.LogFlags)
|
||||
// serveCmd.Run() is the entrypoint for `syncthing serve`
|
||||
func (c *serveCmd) Run() error {
|
||||
l.SetFlags(c.LogFlags)
|
||||
|
||||
if options.GUIAddress != "" {
|
||||
if c.GUIAddress != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIADDRESS", options.GUIAddress)
|
||||
os.Setenv("STGUIADDRESS", c.GUIAddress)
|
||||
}
|
||||
if options.GUIAPIKey != "" {
|
||||
if c.GUIAPIKey != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIAPIKEY", options.GUIAPIKey)
|
||||
os.Setenv("STGUIAPIKEY", c.GUIAPIKey)
|
||||
}
|
||||
|
||||
if options.HideConsole {
|
||||
if c.HideConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
|
||||
// Not set as default above because the strings can be really long.
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(options.HomeDir, options.ConfDir, options.DataDir)
|
||||
if err != nil {
|
||||
l.Warnln("Command line options:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
// Treat an explicitly empty log file name as no log file
|
||||
if options.LogFile == "" {
|
||||
options.LogFile = "-"
|
||||
if c.LogFile == "" {
|
||||
c.LogFile = "-"
|
||||
}
|
||||
if options.LogFile != "default" {
|
||||
if c.LogFile != "default" {
|
||||
// We must set this *after* expandLocations above.
|
||||
if err := locations.Set(locations.LogFile, options.LogFile); err != nil {
|
||||
if err := locations.Set(locations.LogFile, c.LogFile); err != nil {
|
||||
l.Warnln("Setting log file path:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
}
|
||||
|
||||
if options.DebugGUIAssetsDir != "" {
|
||||
if c.DebugGUIAssetsDir != "" {
|
||||
// The asset dir is blank if STGUIASSETS wasn't set, in which case we
|
||||
// should look for extra assets in the default place.
|
||||
if err := locations.Set(locations.GUIAssets, options.DebugGUIAssetsDir); err != nil {
|
||||
if err := locations.Set(locations.GUIAssets, c.DebugGUIAssetsDir); err != nil {
|
||||
l.Warnln("Setting GUI assets path:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
@@ -293,10 +300,10 @@ func (options serveOptions) Run() error {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
if options.InternalInnerProcess {
|
||||
syncthingMain(options)
|
||||
if c.InternalInnerProcess {
|
||||
c.syncthingMain()
|
||||
} else {
|
||||
monitorMain(options)
|
||||
c.monitorMain()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -405,14 +412,14 @@ func upgradeViaRest() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func syncthingMain(options serveOptions) {
|
||||
if options.DebugProfileBlock {
|
||||
func (c *serveCmd) syncthingMain() {
|
||||
if c.DebugProfileBlock {
|
||||
startBlockProfiler()
|
||||
}
|
||||
if options.DebugProfileHeap {
|
||||
if c.DebugProfileHeap {
|
||||
startHeapProfiler()
|
||||
}
|
||||
if options.DebugPerfStats {
|
||||
if c.DebugPerfStats {
|
||||
startPerfStats()
|
||||
}
|
||||
|
||||
@@ -457,7 +464,7 @@ func syncthingMain(options serveOptions) {
|
||||
evLogger := events.NewLogger()
|
||||
earlyService.Add(evLogger)
|
||||
|
||||
cfgWrapper, err := syncthing.LoadConfigAtStartup(locations.Get(locations.ConfigFile), cert, evLogger, options.AllowNewerConfig, options.NoDefaultFolder, options.NoPortProbing)
|
||||
cfgWrapper, err := syncthing.LoadConfigAtStartup(locations.Get(locations.ConfigFile), cert, evLogger, c.AllowNewerConfig, c.NoDefaultFolder, c.NoPortProbing)
|
||||
if err != nil {
|
||||
l.Warnln("Failed to initialize config:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
@@ -468,7 +475,7 @@ func syncthingMain(options serveOptions) {
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
// environment variable is set.
|
||||
|
||||
if build.IsCandidate && !upgrade.DisabledByCompilation && !options.NoUpgrade {
|
||||
if build.IsCandidate && !upgrade.DisabledByCompilation && !c.NoUpgrade {
|
||||
cfgWrapper.Modify(func(cfg *config.Configuration) {
|
||||
l.Infoln("Automatic upgrade is always enabled for candidate releases.")
|
||||
if cfg.Options.AutoUpgradeIntervalH == 0 || cfg.Options.AutoUpgradeIntervalH > 24 {
|
||||
@@ -481,12 +488,12 @@ func syncthingMain(options serveOptions) {
|
||||
})
|
||||
}
|
||||
|
||||
if err := syncthing.TryMigrateDatabase(); err != nil {
|
||||
if err := syncthing.TryMigrateDatabase(c.DBDeleteRetentionInterval); err != nil {
|
||||
l.Warnln("Failed to migrate old-style database:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sdb, err := syncthing.OpenDatabase(locations.Get(locations.Database))
|
||||
sdb, err := syncthing.OpenDatabase(locations.Get(locations.Database), c.DBDeleteRetentionInterval)
|
||||
if err != nil {
|
||||
l.Warnln("Error opening database:", err)
|
||||
os.Exit(1)
|
||||
@@ -495,7 +502,7 @@ func syncthingMain(options serveOptions) {
|
||||
// Check if auto-upgrades is possible, and if yes, and it's enabled do an initial
|
||||
// upgrade immediately. The auto-upgrade routine can only be started
|
||||
// later after App is initialised.
|
||||
autoUpgradePossible := autoUpgradePossible(options)
|
||||
autoUpgradePossible := c.autoUpgradePossible()
|
||||
if autoUpgradePossible && cfgWrapper.Options().AutoUpgradeEnabled() {
|
||||
// try to do upgrade directly and log the error if relevant.
|
||||
miscDB := db.NewMiscDB(sdb)
|
||||
@@ -515,21 +522,21 @@ func syncthingMain(options serveOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
if options.Unpaused {
|
||||
if c.Unpaused {
|
||||
setPauseState(cfgWrapper, false)
|
||||
} else if options.Paused {
|
||||
} else if c.Paused {
|
||||
setPauseState(cfgWrapper, true)
|
||||
}
|
||||
|
||||
appOpts := syncthing.Options{
|
||||
NoUpgrade: options.NoUpgrade,
|
||||
ProfilerAddr: options.DebugProfilerListen,
|
||||
ResetDeltaIdxs: options.DebugResetDeltaIdxs,
|
||||
Verbose: options.Verbose,
|
||||
DBMaintenanceInterval: options.DBMaintenanceInterval,
|
||||
NoUpgrade: c.NoUpgrade,
|
||||
ProfilerAddr: c.DebugProfilerListen,
|
||||
ResetDeltaIdxs: c.DebugResetDeltaIdxs,
|
||||
Verbose: c.Verbose,
|
||||
DBMaintenanceInterval: c.DBMaintenanceInterval,
|
||||
}
|
||||
if options.Audit {
|
||||
appOpts.AuditWriter = auditWriter(options.AuditFile)
|
||||
if c.Audit {
|
||||
appOpts.AuditWriter = auditWriter(c.AuditFile)
|
||||
}
|
||||
|
||||
app, err := syncthing.New(cfgWrapper, sdb, evLogger, cert, appOpts)
|
||||
@@ -544,7 +551,7 @@ func syncthingMain(options serveOptions) {
|
||||
|
||||
setupSignalHandling(app)
|
||||
|
||||
if options.DebugProfileCPU {
|
||||
if c.DebugProfileCPU {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
if err != nil {
|
||||
l.Warnln("Creating profile:", err)
|
||||
@@ -562,7 +569,7 @@ func syncthingMain(options serveOptions) {
|
||||
|
||||
cleanConfigDirectory()
|
||||
|
||||
if cfgWrapper.Options().StartBrowser && !options.NoBrowser && !options.InternalRestarting {
|
||||
if cfgWrapper.Options().StartBrowser && !c.NoBrowser && !c.InternalRestarting {
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in its own routine.
|
||||
go func() { _ = openURL(cfgWrapper.GUI().URL()) }()
|
||||
@@ -574,7 +581,7 @@ func syncthingMain(options serveOptions) {
|
||||
l.Warnln("Syncthing stopped with error:", app.Error())
|
||||
}
|
||||
|
||||
if options.DebugProfileCPU {
|
||||
if c.DebugProfileCPU {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
@@ -648,15 +655,11 @@ func auditWriter(auditFile string) io.Writer {
|
||||
return fd
|
||||
}
|
||||
|
||||
func resetDB() error {
|
||||
return os.RemoveAll(locations.Get(locations.Database))
|
||||
}
|
||||
|
||||
func autoUpgradePossible(options serveOptions) bool {
|
||||
func (c *serveCmd) autoUpgradePossible() bool {
|
||||
if upgrade.DisabledByCompilation {
|
||||
return false
|
||||
}
|
||||
if options.NoUpgrade {
|
||||
if c.NoUpgrade {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
return false
|
||||
}
|
||||
@@ -921,10 +924,33 @@ type debugCmd struct {
|
||||
type resetDatabaseCmd struct{}
|
||||
|
||||
func (resetDatabaseCmd) Run() error {
|
||||
if err := resetDB(); err != nil {
|
||||
l.Infoln("Removing database in", locations.Get(locations.Database))
|
||||
if err := os.RemoveAll(locations.Get(locations.Database)); err != nil {
|
||||
l.Warnln("Resetting database:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
l.Infoln("Successfully reset database - it will be rebuilt after next start.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func setConfigDataLocationsFromFlags(homeDir, confDir, dataDir string) error {
|
||||
homeSet := homeDir != ""
|
||||
confSet := confDir != ""
|
||||
dataSet := dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
return errors.New("either both or none of --config and --data must be given, use --home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
return errors.New("--home must not be used together with --config and --data")
|
||||
case homeSet:
|
||||
confDir = homeDir
|
||||
dataDir = homeDir
|
||||
fallthrough
|
||||
case dataSet:
|
||||
if err := locations.SetBaseDir(locations.ConfigBaseDir, confDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return locations.SetBaseDir(locations.DataBaseDir, dataDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
panicUploadNoticeWait = 10 * time.Second
|
||||
)
|
||||
|
||||
func monitorMain(options serveOptions) {
|
||||
func (c *serveCmd) monitorMain() {
|
||||
l.SetPrefix("[monitor] ")
|
||||
|
||||
var dst io.Writer = os.Stdout
|
||||
@@ -58,13 +58,13 @@ func monitorMain(options serveOptions) {
|
||||
open := func(name string) (io.WriteCloser, error) {
|
||||
return newAutoclosedFile(name, logFileAutoCloseDelay, logFileMaxOpenTime)
|
||||
}
|
||||
if options.LogMaxSize > 0 {
|
||||
fileDst, err = newRotatedFile(logFile, open, int64(options.LogMaxSize), options.LogMaxFiles)
|
||||
if c.LogMaxSize > 0 {
|
||||
fileDst, err = newRotatedFile(logFile, open, int64(c.LogMaxSize), c.LogMaxFiles)
|
||||
} else {
|
||||
fileDst, err = open(logFile)
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Failed to setup logging to file, proceeding with logging to stdout only:", err)
|
||||
l.Warnln("Failed to set up logging to file, proceeding with logging to stdout only:", err)
|
||||
} else {
|
||||
if build.IsWindows {
|
||||
// Translate line breaks to Windows standard
|
||||
@@ -178,7 +178,7 @@ func monitorMain(options serveOptions) {
|
||||
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode := exiterr.ExitCode()
|
||||
if stopped || options.NoRestart {
|
||||
if stopped || c.NoRestart {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
if exitCode == svcutil.ExitUpgrade.AsInt() {
|
||||
@@ -192,7 +192,7 @@ func monitorMain(options serveOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
if options.NoRestart {
|
||||
if c.NoRestart {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
func startPerfStats() {
|
||||
@@ -29,37 +31,68 @@ func savePerfStats(file string) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var prevUsage int64
|
||||
var prevTime int64
|
||||
var rusage syscall.Rusage
|
||||
var memstats runtime.MemStats
|
||||
var prevTime time.Time
|
||||
var curRus, prevRus syscall.Rusage
|
||||
var curMem, prevMem runtime.MemStats
|
||||
var prevIn, prevOut int64
|
||||
|
||||
t0 := time.Now()
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &prevRus)
|
||||
runtime.ReadMemStats(&prevMem)
|
||||
|
||||
fmt.Fprintf(fd, "TIME_S\tCPU_S\tHEAP_KIB\tRSS_KIB\tNETIN_KBPS\tNETOUT_KBPS\tDBSIZE_KIB\n")
|
||||
|
||||
for t := range time.NewTicker(250 * time.Millisecond).C {
|
||||
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &rusage); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsagePercent := 100 * float64(usageDiff) / float64(timeDiff)
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &curRus)
|
||||
runtime.ReadMemStats(&curMem)
|
||||
in, out := protocol.TotalInOut()
|
||||
var inRate, outRate float64
|
||||
if timeDiff > 0 {
|
||||
inRate = float64(in-prevIn) / (float64(timeDiff) / 1e9) // bytes per second
|
||||
outRate = float64(out-prevOut) / (float64(timeDiff) / 1e9) // bytes per second
|
||||
}
|
||||
timeDiff := t.Sub(prevTime)
|
||||
|
||||
fmt.Fprintf(fd, "%.03f\t%f\t%d\t%d\t%.0f\t%.0f\t%d\n",
|
||||
t.Sub(t0).Seconds(),
|
||||
rate(cpusec(&prevRus), cpusec(&curRus), timeDiff, 1),
|
||||
(curMem.Sys-curMem.HeapReleased)/1024,
|
||||
curRus.Maxrss/1024,
|
||||
rate(prevIn, in, timeDiff, 1e3),
|
||||
rate(prevOut, out, timeDiff, 1e3),
|
||||
dirsize(locations.Get(locations.Database))/1024,
|
||||
)
|
||||
|
||||
prevTime = t
|
||||
prevRus = curRus
|
||||
prevMem = curMem
|
||||
prevIn, prevOut = in, out
|
||||
|
||||
runtime.ReadMemStats(&memstats)
|
||||
|
||||
startms := int(t.Sub(t0).Seconds() * 1000)
|
||||
|
||||
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\t%.0f\t%.0f\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys-memstats.HeapReleased, inRate, outRate)
|
||||
}
|
||||
}
|
||||
|
||||
func cpusec(r *syscall.Rusage) float64 {
|
||||
return float64(r.Utime.Nano()+r.Stime.Nano()) / float64(time.Second)
|
||||
}
|
||||
|
||||
type number interface {
|
||||
constraints.Float | constraints.Integer
|
||||
}
|
||||
|
||||
func rate[T number](prev, cur T, d time.Duration, div float64) float64 {
|
||||
diff := cur - prev
|
||||
rate := float64(diff) / d.Seconds() / div
|
||||
return rate
|
||||
}
|
||||
|
||||
func dirsize(location string) int64 {
|
||||
entries, err := os.ReadDir(location)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var size int64
|
||||
for _, entry := range entries {
|
||||
fi, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
size += fi.Size()
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
26
go.mod
26
go.mod
@@ -4,7 +4,7 @@ go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f
|
||||
github.com/alecthomas/kong v1.9.0
|
||||
github.com/alecthomas/kong v1.10.0
|
||||
github.com/aws/aws-sdk-go v1.55.6
|
||||
github.com/calmh/incontainer v1.0.0
|
||||
github.com/calmh/xdr v1.2.0
|
||||
@@ -20,8 +20,8 @@ require (
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/maruel/panicparse/v2 v2.4.0
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/maruel/panicparse/v2 v2.5.0
|
||||
github.com/mattn/go-sqlite3 v1.14.27
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
|
||||
@@ -29,10 +29,10 @@ require (
|
||||
github.com/pierrec/lz4/v4 v4.1.22
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/quic-go/quic-go v0.50.0
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/shirou/gopsutil/v4 v4.25.2
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
|
||||
github.com/shirou/gopsutil/v4 v4.25.3
|
||||
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
@@ -41,13 +41,13 @@ require (
|
||||
github.com/willabides/kongplete v0.4.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/time v0.11.0
|
||||
golang.org/x/tools v0.31.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
modernc.org/sqlite v1.36.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
modernc.org/sqlite v1.37.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
@@ -66,7 +66,7 @@ require (
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
@@ -95,13 +95,13 @@ require (
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.61.13 // indirect
|
||||
modernc.org/libc v1.62.1 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.8.2 // indirect
|
||||
modernc.org/memory v1.9.1 // indirect
|
||||
)
|
||||
|
||||
// https://github.com/gobwas/glob/pull/55
|
||||
|
||||
64
go.sum
64
go.sum
@@ -7,8 +7,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzS
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kong v1.9.0 h1:Wgg0ll5Ys7xDnpgYBuBn/wPeLGAuK0NvYmEcisJgrIs=
|
||||
github.com/alecthomas/kong v1.9.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/kong v1.10.0 h1:8K4rGDpT7Iu+jEXCIJUeKqvpwZHbsFRoebLbnzlmrpw=
|
||||
github.com/alecthomas/kong v1.10.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
@@ -89,8 +89,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA=
|
||||
github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
@@ -148,13 +148,13 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/maruel/panicparse/v2 v2.4.0 h1:yQKMIbQ0DKfinzVkTkcUzQyQ60UCiNnYfR7PWwTs2VI=
|
||||
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
|
||||
github.com/maruel/panicparse/v2 v2.5.0 h1:yCtuS0FWjfd0RTYMXGpDvWcb0kINm8xJGu18/xMUh00=
|
||||
github.com/maruel/panicparse/v2 v2.5.0/go.mod h1:DA2fDiBk63bKfBf4CVZP9gb4fuvzdPbLDsSI873hweQ=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU=
|
||||
github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ=
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0 h1:sdtTHzzQNJlXF5+fd/EoPTucRHyMonYt/Cok8xzzfqA=
|
||||
@@ -210,12 +210,12 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
|
||||
github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY=
|
||||
@@ -226,8 +226,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
|
||||
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.2 h1:NMscG3l2CqtWFS86kj3vP7soOczqrQYIEhO/pMvvQkk=
|
||||
github.com/shirou/gopsutil/v4 v4.25.2/go.mod h1:34gBYJzyqCDT11b6bMHP0XCvWeU3J61XRT7a2EmCRTA=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3 h1:SeA68lsu8gLggyMbmCn8cmp97V1TI9ld9sVzAUcKcKE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
@@ -281,8 +281,8 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
@@ -309,8 +309,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -398,8 +398,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -415,26 +415,26 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
|
||||
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo=
|
||||
modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo=
|
||||
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
|
||||
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
|
||||
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw=
|
||||
modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8=
|
||||
modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
|
||||
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
|
||||
modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
|
||||
modernc.org/memory v1.9.1 h1:V/Z1solwAVmMW1yttq3nDdZPJqV1rM05Ccq6KMSZ34g=
|
||||
modernc.org/memory v1.9.1/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.36.0 h1:EQXNRn4nIS+gfsKeUTymHIz1waxuv5BzU7558dHSfH8=
|
||||
modernc.org/sqlite v1.36.0/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU=
|
||||
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
|
||||
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "العناصر الفاشلة",
|
||||
"Failed to load file versions.": "لم يُتَوَصَّل لنسخة الملف.",
|
||||
"Failed to load ignore patterns.": "فشل التَّوَصُّل إلى مُرَشِّحات التجاهل.",
|
||||
"Failed to setup, retrying": "فشل الإعداد، تجري المحاولة مرة أخرى",
|
||||
"Failed to set up, retrying": "فشل الإعداد، تجري المحاولة مرة أخرى",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "يُتوقع فشل الاتصال بخوادم IPv6، إذا لم يكن IPv6 متاحا.",
|
||||
"File Pull Order": "ترتيب استيراد الملفات",
|
||||
"File Versioning": "إصدارات الملف",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Елементи с грешка",
|
||||
"Failed to load file versions.": "Грешка при зареждане на версии.",
|
||||
"Failed to load ignore patterns.": "Грешка при зареждане на шаблони за пренебрегване.",
|
||||
"Failed to setup, retrying": "Грешка при настройване, извършва се повторен опит",
|
||||
"Failed to set up, retrying": "Грешка при настройване, извършва се повторен опит",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Неуспешна връзка към сървъри по IPv6 може да се очаква ако няма свързаност по IPv6.",
|
||||
"File Pull Order": "Ред на изтегляне",
|
||||
"File Versioning": "Версии на файловете",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Elements fallats",
|
||||
"Failed to load file versions.": "No s'han pogut carregar les versions dels fitxers.",
|
||||
"Failed to load ignore patterns.": "No s'han pogut carregar els patrons ignorats.",
|
||||
"Failed to setup, retrying": "No s'ha pogut configurar, s'està tornant a provar",
|
||||
"Failed to set up, retrying": "No s'ha pogut configurar, s'està tornant a provar",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "S'espera que no es pugui connectar als servidors IPv6 si no hi ha connectivitat IPv6.",
|
||||
"File Pull Order": "Ordre d'agafar fitxers",
|
||||
"File Versioning": "Versionat de Fitxers",
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
"Failed Items": "Objectes fallits",
|
||||
"Failed to load file versions.": "No s'han pogut carregar les versions dels fitxers.",
|
||||
"Failed to load ignore patterns.": "No s'han pogut carregar els patrons ignorats.",
|
||||
"Failed to setup, retrying": "Errada en la configuració, reintentant",
|
||||
"Failed to set up, retrying": "Errada en la configuració, reintentant",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "És possible que es produïsca una fallada al connectar als servidors IPv6 si no hi ha connectivitat IPv6.",
|
||||
"File Pull Order": "Ordre de fitxers del pull",
|
||||
"File Versioning": "Versionat de fitxer",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Nezdařené položky",
|
||||
"Failed to load file versions.": "Nepodařilo se nahrát verze souboru.",
|
||||
"Failed to load ignore patterns.": "Načtení vzorů ignorovaného se nezdařilo.",
|
||||
"Failed to setup, retrying": "Nastavování se nezdařilo, zkouší se znovu",
|
||||
"Failed to set up, retrying": "Nastavování se nezdařilo, zkouší se znovu",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Je v pořádku, když se připojení k IPv6 serverům nezdaří, pokud není k dispozici IPv6 konektivita.",
|
||||
"File Pull Order": "Pořadí stahování souborů",
|
||||
"File Versioning": "Správa verzí souborů",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Mislykkede filer",
|
||||
"Failed to load file versions.": "Fil versioner kunne ikke indlæses.",
|
||||
"Failed to load ignore patterns.": "Ignorerings-mønstre kunne ikke indlæses.",
|
||||
"Failed to setup, retrying": "Opsætning mislykkedes; prøver igen",
|
||||
"Failed to set up, retrying": "Opsætning mislykkedes; prøver igen",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Fejl i forbindelse med opkobling til IPv6-servere skal forventes, hvis der ikke er IPv6-forbindelse.",
|
||||
"File Pull Order": "Hentningsrækkefølge for filer",
|
||||
"File Versioning": "Filversionering",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Fehlgeschlagene Elemente",
|
||||
"Failed to load file versions.": "Fehler beim Laden der Dateiversionen.",
|
||||
"Failed to load ignore patterns.": "Fehler beim Laden der Ignoriermuster.",
|
||||
"Failed to setup, retrying": "Fehler beim Einrichten, erneuter Versuch",
|
||||
"Failed to set up, retrying": "Fehler beim Einrichten, erneuter Versuch",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Ein Verbindungsfehler zu IPv6-Servern ist zu erwarten, wenn es keine IPv6-Konnektivität gibt.",
|
||||
"File Pull Order": "Dateiübertragungsreihenfolge",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Αρχεία που απέτυχαν",
|
||||
"Failed to load file versions.": "Η φόρτωση των εκδόσεων αρχείων απέτυχε.",
|
||||
"Failed to load ignore patterns.": "Αποτυχία φόρτωσης μοτίβων παράβλεψης.",
|
||||
"Failed to setup, retrying": "Αποτυχία ενεργοποίησης, γίνεται νέα προσπάθεια",
|
||||
"Failed to set up, retrying": "Αποτυχία ενεργοποίησης, γίνεται νέα προσπάθεια",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Είναι φυσιολογική η αποτυχία σύνδεσης σε εξυπηρετητές IPv6 όταν δεν υπάρχει συνδεσιμότητα IPv6.",
|
||||
"File Pull Order": "Σειρά με την οποία θα κατεβαίνουν τα αρχεία",
|
||||
"File Versioning": "Τήρηση εκδόσεων αρχείων",
|
||||
|
||||
@@ -150,7 +150,7 @@
|
||||
"Failed Items": "Failed Items",
|
||||
"Failed to load file versions.": "Failed to load file versions.",
|
||||
"Failed to load ignore patterns.": "Failed to load ignore patterns.",
|
||||
"Failed to setup, retrying": "Failed to setup, retrying",
|
||||
"Failed to set up, retrying": "Failed to set up, retrying",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.",
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "File Versioning",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Failed Items",
|
||||
"Failed to load file versions.": "Failed to load file versions.",
|
||||
"Failed to load ignore patterns.": "Failed to load ignore patterns.",
|
||||
"Failed to setup, retrying": "Failed to setup, retrying",
|
||||
"Failed to set up, retrying": "Failed to set up, retrying",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.",
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "File Versioning",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Failed Items",
|
||||
"Failed to load file versions.": "Failed to load file versions.",
|
||||
"Failed to load ignore patterns.": "Failed to load ignore patterns.",
|
||||
"Failed to setup, retrying": "Failed to setup, retrying",
|
||||
"Failed to set up, retrying": "Failed to set up, retrying",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.",
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "File Versioning",
|
||||
|
||||
@@ -106,7 +106,7 @@
|
||||
"Error": "Eraro",
|
||||
"External File Versioning": "Ekstera Versionado de Dosiero",
|
||||
"Failed Items": "Malsukcesaj Eroj",
|
||||
"Failed to setup, retrying": "Malsukcesis agordi, provante denove",
|
||||
"Failed to set up, retrying": "Malsukcesis agordi, provante denove",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Malsukceso por konekti al IPv6 serviloj atendante se ekzistas neniu IPv6 konektebleco.",
|
||||
"File Pull Order": "Ordo por Tiri Dosieron",
|
||||
"File Versioning": "Versionado de Dosieroj",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Elementos fallidos",
|
||||
"Failed to load file versions.": "Error al cargar las versiones de los archivos.",
|
||||
"Failed to load ignore patterns.": "No se pudieron cargar los patrones de ignorar.",
|
||||
"Failed to setup, retrying": "Fallo en la configuración, reintentando",
|
||||
"Failed to set up, retrying": "Fallo en la configuración, reintentando",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Se espera un fallo al conectar a los servidores IPv6 si no hay conectividad IPv6.",
|
||||
"File Pull Order": "Orden de Obtención de los Archivos",
|
||||
"File Versioning": "Versionado de ficheros",
|
||||
|
||||
@@ -130,7 +130,7 @@
|
||||
"External File Versioning": "Fitxategi bertsioen kanpoko kudeaketa",
|
||||
"Failed Items": "Huts egin duten fitxategiak",
|
||||
"Failed to load ignore patterns.": "Huts egin du baztertze ereduak kargatzean.",
|
||||
"Failed to setup, retrying": "Konfigurazioan huts egitea, berriro saiatuz",
|
||||
"Failed to set up, retrying": "Konfigurazioan huts egitea, berriro saiatuz",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "IPv6 zerbitzariei buruzko konexioak huts eginen du, IPv6 konektibitaterik ez bada",
|
||||
"File Pull Order": "Fitxategiak berreskuratzeko ordena",
|
||||
"File Versioning": "Fitxategiak zaintzeko metodoa",
|
||||
|
||||
@@ -122,7 +122,7 @@
|
||||
"External": "Ulkoinen",
|
||||
"External File Versioning": "Ulkoinen tiedostoversionti",
|
||||
"Failed Items": "Epäonnistuneet kohteet",
|
||||
"Failed to setup, retrying": "Käyttöönotto epäonnistui, Yritetään uudelleen",
|
||||
"Failed to set up, retrying": "Käyttöönotto epäonnistui, Yritetään uudelleen",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Yhteys IPv6-palvelimiin todennäköisesti epäonnistuu, koska IPv6-yhteyksiä ei ole.",
|
||||
"File Pull Order": "Tiedostojen noutojärjestys",
|
||||
"File Versioning": "Tiedostoversiointi",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Mga Nabigong Item",
|
||||
"Failed to load file versions.": "Nabigong i-load ang mga bersyon ng file.",
|
||||
"Failed to load ignore patterns.": "Nabigong i-load ang mga ignore pattern.",
|
||||
"Failed to setup, retrying": "Nabigong i-set up, sinusubukan muli",
|
||||
"Failed to set up, retrying": "Nabigong i-set up, sinusubukan muli",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Inaasahan ang pagbigo sa pagkonekta sa mga IPv6 na server kapag walang konektibidad sa IPv6.",
|
||||
"File Pull Order": "Order ng Pagkuha ng File",
|
||||
"File Versioning": "File Versioning",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Éléments en échec",
|
||||
"Failed to load file versions.": "Échec de chargement des versions de fichiers.",
|
||||
"Failed to load ignore patterns.": "Échec du chargement des masques d'exclusions.",
|
||||
"Failed to setup, retrying": "Échec, nouvel essai",
|
||||
"Failed to set up, retrying": "Échec, nouvel essai",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "La connexion aux serveurs en IPv6 va échouer s'il n'y a pas de connectivité IPv6.",
|
||||
"File Pull Order": "Ordre de récupération des fichiers",
|
||||
"File Versioning": "Préservation des fichiers",
|
||||
|
||||
@@ -146,7 +146,7 @@
|
||||
"Error": "Flater",
|
||||
"External File Versioning": "Ekstern ferzjebehear foar triemen",
|
||||
"Failed Items": "Mislearre items",
|
||||
"Failed to setup, retrying": "Ynskeakeljen mislearre, wurd no opnij besocht",
|
||||
"Failed to set up, retrying": "Ynskeakeljen mislearre, wurd no opnij besocht",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Mislearjen fan it ferbinen mei IPv6-tsjinners wurd ferwachte as der gjin stipe foar IPv6-ferbinings is.",
|
||||
"File Pull Order": "Triemlûkfolchoarder",
|
||||
"File Versioning": "Triemferzjebehear",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Líonraí Ceadaithe",
|
||||
"Alphabetic": "Aibítreach",
|
||||
"Altered by ignoring deletes.": "Athraithe trí neamhaird a dhéanamh ar scriosadh.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Cuirtear ar siúl i gcónaí é nuair is é \"{{foldertype}}\" an cineál fillteáin.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Láimhseálann ordú seachtrach an leagan. Caithfidh sé an comhad a bhaint den fhillteán comhroinnte. Má tá spásanna sa chosán chuig an bhfeidhmchlár, ba chóir é a lua.",
|
||||
"Anonymous Usage Reporting": "Tuairisciú Úsáide Gan Ainm",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Tá athrú tagtha ar fhormáid na tuarascála úsáide gan ainm. Ar mhaith leat bogadh go dtí an fhormáid nua?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Comhlacht:",
|
||||
"Bugs": "Fabhtanna",
|
||||
"Cancel": "Cuir ar ceal",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Ní féidir é a chumasú nuair is é \"{{foldertype}}\" an cineál fillteáin.",
|
||||
"Changelog": "ChangelogName",
|
||||
"Clean out after": "Glan amach tar éis",
|
||||
"Cleaning Versions": "Leaganacha Glantacháin",
|
||||
@@ -154,7 +156,7 @@
|
||||
"Failed Items": "Míreanna Teipthe",
|
||||
"Failed to load file versions.": "Theip ar luchtú leaganacha comhaid.",
|
||||
"Failed to load ignore patterns.": "Theip ar phatrúin neamhairde a luchtú.",
|
||||
"Failed to setup, retrying": "Theip ar thus, ag triail arís",
|
||||
"Failed to set up, retrying": "Theip ar thus, ag triail arís",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Táthar ag súil le mainneachtain ceangal le freastalaithe IPv6 mura bhfuil nascacht IPv6 ann.",
|
||||
"File Pull Order": "Ordú Tarraingthe Comhad",
|
||||
"File Versioning": "Leagan Comhaid",
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"Failed Items": "Elmentos fallados",
|
||||
"Failed to load file versions.": "Fallou a carga das versións dos ficheiros.",
|
||||
"Failed to load ignore patterns.": "Fallou a carga de patróns ignorados.",
|
||||
"Failed to setup, retrying": "Fallou a configuración, reintentando",
|
||||
"Failed to set up, retrying": "Fallou a configuración, reintentando",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "É de agardar o fallo ao conectar con servidores IPv6 se non hai conexión por IPv6.",
|
||||
"File Pull Order": "Orde de Obtención de Arquivos",
|
||||
"File Versioning": "Versionado de Ficheiros",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "פריטים שנכשלו",
|
||||
"Failed to load file versions.": "טעינת גרסאות קבצים נכשלה.",
|
||||
"Failed to load ignore patterns.": "טעינת דפוסי התעלמות נכשלה.",
|
||||
"Failed to setup, retrying": "ההגדרה נכשלה, מנסה שוב",
|
||||
"Failed to set up, retrying": "ההגדרה נכשלה, מנסה שוב",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "צפוי כשל בהתחברות לשרתי IPv6 אם אין קישוריות IPv6.",
|
||||
"File Pull Order": "סדר משיכת קבצים",
|
||||
"File Versioning": "ניהול גרסאות קבצים",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "विफल वस्तुएं",
|
||||
"Failed to load file versions.": "फाइल संस्करण लोड करने में विफल।",
|
||||
"Failed to load ignore patterns.": "नजरअंदाज प्रतिमान लोड करने में विफल।",
|
||||
"Failed to setup, retrying": "स्थापना करने में विफल, पुनः प्रयास किया जा रहा है",
|
||||
"Failed to set up, retrying": "स्थापना करने में विफल, पुनः प्रयास किया जा रहा है",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "यदि IPv6 संयोजकता नहीं है तो IPv6 सर्वर से जुड़ने में विफलता अपेक्षित है।",
|
||||
"File Pull Order": "फाइल खींचने का क्रम",
|
||||
"File Versioning": "फाइल संस्करणीकरण",
|
||||
|
||||
@@ -146,7 +146,7 @@
|
||||
"Failed Items": "Hibás elemek",
|
||||
"Failed to load file versions.": "Nem sikerült betölteni a fájlverziókat.",
|
||||
"Failed to load ignore patterns.": "Nem sikerült betölteni a mellőzési mintákat.",
|
||||
"Failed to setup, retrying": "Telepítés nem sikerült, újrapróbálkozás",
|
||||
"Failed to set up, retrying": "Telepítés nem sikerült, újrapróbálkozás",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Mivel nincs IPv6 kapcsolat, ezért várhatóan nem fog sikerülni IPv6-os szerverekhez csatlakozni.",
|
||||
"File Pull Order": "Fájlküldési sorrend",
|
||||
"File Versioning": "Fájlverzió-követés",
|
||||
|
||||
@@ -146,7 +146,7 @@
|
||||
"Failed Items": "Berkas yang gagal",
|
||||
"Failed to load file versions.": "Gagal memuat versi berkas.",
|
||||
"Failed to load ignore patterns.": "Gagal memuat pola pengabaian.",
|
||||
"Failed to setup, retrying": "Gagal menyiapkan, mengulang",
|
||||
"Failed to set up, retrying": "Gagal menyiapkan, mengulang",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Gagal untuk menyambung ke server IPv6 itu disangka apabila tidak ada konektivitas IPv6.",
|
||||
"File Pull Order": "Urutan Penarikan Berkas",
|
||||
"File Versioning": "Pemversian Berkas",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Elementi Errati",
|
||||
"Failed to load file versions.": "Impossibile caricare le versioni dei file.",
|
||||
"Failed to load ignore patterns.": "Impossibile caricare gli schemi di esclusione.",
|
||||
"Failed to setup, retrying": "Configurazione fallita, riprovo",
|
||||
"Failed to set up, retrying": "Configurazione fallita, riprovo",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "La connessione a server IPv6 fallisce se non c'è connettività IPv6.",
|
||||
"File Pull Order": "Ordine Prelievo File",
|
||||
"File Versioning": "Controllo Versione File",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "허가된 망",
|
||||
"Alphabetic": "가나다순",
|
||||
"Altered by ignoring deletes.": "삭제 항목 무시로 변경됨",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "{{foldertype}} 폴더 유형일 때는 항상 활성화되어 있습니다.",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "외부 명령이 파일 버전을 관리합니다. 공유 폴더에서 파일을 삭제해야 합니다. 응용 프로그램의 경로에 공백이 있으면 따옴표로 묶어야 합니다.",
|
||||
"Anonymous Usage Reporting": "익명 사용 보고",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "익명 사용 보고의 형식이 변경되었습니다. 새 형식으로 설정을 변경하시겠습니까?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "내용:",
|
||||
"Bugs": "버그",
|
||||
"Cancel": "취소",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "{{foldertype}} 폴더 유형일 때는 활성화할 수 없습니다.",
|
||||
"Changelog": "변경 기록",
|
||||
"Clean out after": "보관 기간",
|
||||
"Cleaning Versions": "버전 정리",
|
||||
@@ -154,7 +156,7 @@
|
||||
"Failed Items": "실패 항목",
|
||||
"Failed to load file versions.": "파일 버전을 불러오기에 실패했습니다.",
|
||||
"Failed to load ignore patterns.": "무시 양식을 불러오기에 실패했습니다.",
|
||||
"Failed to setup, retrying": "설정 적용 실패; 재시도 중",
|
||||
"Failed to set up, retrying": "설정 적용 실패; 재시도 중",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "IPv6에 연결되어 있지 않을 때는 IPv6 서버에 접속하지 못하는 것이 정상입니다.",
|
||||
"File Pull Order": "파일 수신 순서",
|
||||
"File Versioning": "파일 버전 관리",
|
||||
|
||||
@@ -144,7 +144,7 @@
|
||||
"Failed Items": "Nepavykę siuntimai",
|
||||
"Failed to load file versions.": "Nepavyko įkelti failo versijų.",
|
||||
"Failed to load ignore patterns.": "Nepavyko įkelti nepaisymo šablonų.",
|
||||
"Failed to setup, retrying": "Nepavyko nustatyti, bandoma iš naujo",
|
||||
"Failed to set up, retrying": "Nepavyko nustatyti, bandoma iš naujo",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Nesėkmė prisijungti prie IPv6 serverių yra tikėtina, jei nėra IPv6 ryšio.",
|
||||
"File Pull Order": "Failų siuntimo tvarka",
|
||||
"File Versioning": "Versijų valdymas",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Elementsynkronisering som har mislyktes",
|
||||
"Failed to load file versions.": "Lasting av fil-versjoner feilet.",
|
||||
"Failed to load ignore patterns.": "Lasting av ignorer mønstre feilet.",
|
||||
"Failed to setup, retrying": "Klarte ikke å utføre oppsett, prøver igjen",
|
||||
"Failed to set up, retrying": "Klarte ikke å utføre oppsett, prøver igjen",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Å ikke klare å koble til IPv6-tjenere er forventet hvis det ikke er noen IPv6-tilknytning.",
|
||||
"File Pull Order": "Filenes henterekkefølge",
|
||||
"File Versioning": "Versjonskontroll",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Mislukte items",
|
||||
"Failed to load file versions.": "Laden van bestandsversies mislukt.",
|
||||
"Failed to load ignore patterns.": "Laden van negeerpatronen mislukt.",
|
||||
"Failed to setup, retrying": "Instellen mislukt, opnieuw proberen",
|
||||
"Failed to set up, retrying": "Instellen mislukt, opnieuw proberen",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Als er geen IPv6-connectiviteit is worden problemen bij verbinden met IPv6-servers verwacht.",
|
||||
"File Pull Order": "Volgorde voor binnenhalen van bestanden",
|
||||
"File Versioning": "Versiebeheer",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Elementy zakończone niepowodzeniem",
|
||||
"Failed to load file versions.": "Nie udało się załadować wersji plików.",
|
||||
"Failed to load ignore patterns.": "Nie udało się załadować wzorców ignorowania.",
|
||||
"Failed to setup, retrying": "Nie udało się ustawić; ponawiam próbę",
|
||||
"Failed to set up, retrying": "Nie udało się ustawić; ponawiam próbę",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Błąd połączenia do serwerów IPv6 może wystąpić, gdy w ogóle nie ma połączenia po IPv6.",
|
||||
"File Pull Order": "Kolejność pobierania plików",
|
||||
"File Versioning": "Wersjonowanie plików",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Itens com falha",
|
||||
"Failed to load file versions.": "Falha ao carregar versões do arquivo.",
|
||||
"Failed to load ignore patterns.": "Falha ao carregar os padrões para ignorar.",
|
||||
"Failed to setup, retrying": "Não foi possível configurar, tentando novamente",
|
||||
"Failed to set up, retrying": "Não foi possível configurar, tentando novamente",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Falhas na conexão a servidores IPv6 são esperadas caso não haja conectividade IPv6.",
|
||||
"File Pull Order": "Ordem de retirada do arquivo",
|
||||
"File Versioning": "Versionamento de arquivos",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Itens que falharam",
|
||||
"Failed to load file versions.": "Falha ao carregar as versões do ficheiro.",
|
||||
"Failed to load ignore patterns.": "Falha ao carregar os padrões de exclusão.",
|
||||
"Failed to setup, retrying": "A preparação falhou, a tentar novamente",
|
||||
"Failed to set up, retrying": "A preparação falhou, a tentar novamente",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "São esperadas falhas na ligação a servidores IPv6 se não existir conectividade IPv6.",
|
||||
"File Pull Order": "Ordem de obtenção de ficheiros",
|
||||
"File Versioning": "Gestão de versões de ficheiros",
|
||||
|
||||
@@ -143,7 +143,6 @@
|
||||
"Error": "Eroare",
|
||||
"External File Versioning": "Administrare externă a versiunilor documentului",
|
||||
"Failed Items": "Failed Items",
|
||||
"Failed to setup, retrying": "Failed to setup, retrying",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.",
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "Versiune Fișier",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Сбойные объекты",
|
||||
"Failed to load file versions.": "Не удалось загрузить версии файлов.",
|
||||
"Failed to load ignore patterns.": "Не удалось загрузить шаблоны игнорирования.",
|
||||
"Failed to setup, retrying": "Не удалось настроить, пробуем ещё",
|
||||
"Failed to set up, retrying": "Не удалось настроить, пробуем ещё",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Если нет IPv6-соединений, при подключении к IPv6-серверам произойдёт ошибка.",
|
||||
"File Pull Order": "Порядок получения файлов",
|
||||
"File Versioning": "Управление версиями",
|
||||
|
||||
@@ -143,7 +143,7 @@
|
||||
"Failed Items": "අසාර්ථක අයිතම",
|
||||
"Failed to load file versions.": "ගොනු අනුවාද පූරණය කිරීමට අසමත් විය.",
|
||||
"Failed to load ignore patterns.": "නොසලකා හැරීමේ රටා පූරණය කිරීමට අසමත් විය.",
|
||||
"Failed to setup, retrying": "පිහිටුවීමට අසමත් විය, උත්සාහ කරමින්",
|
||||
"Failed to set up, retrying": "පිහිටුවීමට අසමත් විය, උත්සාහ කරමින්",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "IPv6 සම්බන්ධතාවක් නොමැති නම් IPv6 සේවාදායක වෙත සම්බන්ධ වීමට අසමත් වීම අපේක්ෂා කෙරේ.",
|
||||
"File Pull Order": "ගොනු ඇදීමේ නියෝගය",
|
||||
"File Versioning": "ගොනු අනුවාදය",
|
||||
|
||||
@@ -153,7 +153,7 @@
|
||||
"Failed Items": "Zlyhané položky",
|
||||
"Failed to load file versions.": "Nepodarilo sa načítať verzie súborov.",
|
||||
"Failed to load ignore patterns.": "Nepodarilo sa načítať ignorované vzory.",
|
||||
"Failed to setup, retrying": "Nepodarilo sa nastaviť, opakujem",
|
||||
"Failed to set up, retrying": "Nepodarilo sa nastaviť, opakujem",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Zlyhanie pripojenia k IPv6 serverom je očakávané ak neexistujú žiadne IPv6 pripojenia.",
|
||||
"File Pull Order": "Poradie sťahovania súborov",
|
||||
"File Versioning": "Verzie súborov",
|
||||
|
||||
@@ -131,7 +131,7 @@
|
||||
"External File Versioning": "Zunanje beleženje različic datotek",
|
||||
"Failed Items": "Neuspeli predmeti",
|
||||
"Failed to load ignore patterns.": "Prezrih vzorcev ni bilo mogoče naložiti.",
|
||||
"Failed to setup, retrying": "Nastavitev ni uspela, ponovni poskus",
|
||||
"Failed to set up, retrying": "Nastavitev ni uspela, ponovni poskus",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Neuspeh povezav z IPv6 strežniki je pričakovan, če ni IPv6 povezljivost.",
|
||||
"File Pull Order": "Vrstni red prenosa datotek",
|
||||
"File Versioning": "Beleženje različic datotek",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "Misslyckade objekt",
|
||||
"Failed to load file versions.": "Det gick inte att läsa in filversioner.",
|
||||
"Failed to load ignore patterns.": "Det gick inte att läsa in ignoreringsmönster.",
|
||||
"Failed to setup, retrying": "Det gick inte att ställa in, försöker igen",
|
||||
"Failed to set up, retrying": "Det gick inte att ställa in, försöker igen",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "Det går inte att ansluta till IPv6-servrar om det inte finns någon IPv6-anslutning.",
|
||||
"File Pull Order": "Filhämtningsprioritering",
|
||||
"File Versioning": "Filversionshantering",
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "Başarısız Olan Öğeler",
|
||||
"Failed to load file versions.": "Dosya sürümlerini yükleme başarısız.",
|
||||
"Failed to load ignore patterns.": "Yoksayma şekillerini yükleme başarısız.",
|
||||
"Failed to setup, retrying": "Ayarlama başarısız, yeniden deneniyor",
|
||||
"Failed to set up, retrying": "Ayarlama başarısız, yeniden deneniyor",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "IPv6 bağlanabilirliği yoksa IPv6 sunucularına bağlanma hatası beklenmekte.",
|
||||
"File Pull Order": "Dosya Çekme Sırası",
|
||||
"File Versioning": "Dosya Sürümlendirme",
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
"Allowed Networks": "Дозволені мережі",
|
||||
"Alphabetic": "За абеткою",
|
||||
"Altered by ignoring deletes.": "Змінено, ігноруючи видалення.",
|
||||
"Always turned on when the folder type is \"{%foldertype%}\".": "Завжди вмикається, якщо тип теки «{{foldertype}}».",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder. If the path to the application contains spaces, it should be quoted.": "Зовнішня команда керує версіями. Вона повинна видалити файл із спільної теки. Якщо шлях до застосунку містить пробіли, його слід взяти в лапки.",
|
||||
"Anonymous Usage Reporting": "Анонімне звітування про використання",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Формат анонімного звітування про використання змінився. Бажаєте перейти на новий формат?",
|
||||
@@ -52,6 +53,7 @@
|
||||
"Body:": "Повідомлення:",
|
||||
"Bugs": "Помилки",
|
||||
"Cancel": "Скасувати",
|
||||
"Cannot be enabled when the folder type is \"{%foldertype%}\".": "Неможливо ввімкнути, якщо тип теки «{{foldertype}}».",
|
||||
"Changelog": "Журнал змін",
|
||||
"Clean out after": "Очистити після",
|
||||
"Cleaning Versions": "Очищення версій",
|
||||
@@ -154,7 +156,7 @@
|
||||
"Failed Items": "Невдалі",
|
||||
"Failed to load file versions.": "Не вдалося завантажити версії файлів.",
|
||||
"Failed to load ignore patterns.": "Не вдалося завантажити шаблони ігнорування.",
|
||||
"Failed to setup, retrying": "Не вдалося налаштувати, повторна спроба",
|
||||
"Failed to set up, retrying": "Не вдалося налаштувати, повторна спроба",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "За відсутності з'єднання IPv6 очікується неможливість під'єднання до серверів IPv6.",
|
||||
"File Pull Order": "Порядок витягнення файлів",
|
||||
"File Versioning": "Версіонування файлів",
|
||||
@@ -550,6 +552,6 @@
|
||||
},
|
||||
"unknown device": "невідомий пристрій",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} хоче поділитися папкою \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} хоче поділитися папкою \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} хоче поділитися текою \"{{folderlabel}}\" ({{folder}}).",
|
||||
"{%reintroducer%} might reintroduce this device.": "{{reintroducer}} може повторно порекомендувати цей пристрій."
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"Failed Items": "失败的项目",
|
||||
"Failed to load file versions.": "加载文件版本失败。",
|
||||
"Failed to load ignore patterns.": "加载忽略模式失败。",
|
||||
"Failed to setup, retrying": "设置失败,正在重试",
|
||||
"Failed to set up, retrying": "设置失败,正在重试",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "如果本机没有配置 IPv6,则无法连接 IPv6 服务器是正常的。",
|
||||
"File Pull Order": "文件拉取顺序",
|
||||
"File Versioning": "文件版本控制",
|
||||
|
||||
@@ -145,7 +145,7 @@
|
||||
"Failed Items": "失敗的項目",
|
||||
"Failed to load file versions.": "無法加載文件版本。",
|
||||
"Failed to load ignore patterns.": "無法加載忽略模式。",
|
||||
"Failed to setup, retrying": "設置失敗,正在重試。",
|
||||
"Failed to set up, retrying": "設置失敗,正在重試。",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "如果本機沒有配置IPv6,則無法連接IPv6服務器是正常的。",
|
||||
"File Pull Order": "文件拉取順序",
|
||||
"File Versioning": "版本控制",
|
||||
|
||||
@@ -154,7 +154,7 @@
|
||||
"Failed Items": "失敗的項目",
|
||||
"Failed to load file versions.": "無法載入檔案版本。",
|
||||
"Failed to load ignore patterns.": "無法載入忽略模式。",
|
||||
"Failed to setup, retrying": "無法設定,正在重試",
|
||||
"Failed to set up, retrying": "無法設定,正在重試",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "若沒有 IPv6 連線能力,則無法連接 IPv6 伺服器為正常現象。",
|
||||
"File Pull Order": "提取檔案的順序",
|
||||
"File Versioning": "檔案版本控制",
|
||||
|
||||
@@ -521,7 +521,7 @@
|
||||
</span>
|
||||
<span ng-if="folder.fsWatcherEnabled && !folder.paused && folderStatus(folder) !== 'stopped' && model[folder.id].watchError" tooltip data-original-title="{{'Periodic scanning at given interval and failed setting up watching for changes, retrying every 1m:' | translate}}<br/>{{model[folder.id].watchError}}">
|
||||
<span class="far fa-clock"></span> {{folder.rescanIntervalS | duration}} 
|
||||
<span class="fas fa-eye-slash"></span> <span translate>Failed to setup, retrying</span>
|
||||
<span class="fas fa-eye-slash"></span> <span translate>Failed to set up, retrying</span>
|
||||
</span>
|
||||
</div>
|
||||
<div ng-if="folder.rescanIntervalS <= 0">
|
||||
@@ -535,7 +535,7 @@
|
||||
</span>
|
||||
<span ng-if="folder.fsWatcherEnabled && !folder.paused && folderStatus(folder) !== 'stopped' && model[folder.id].watchError" tooltip data-original-title="{{'Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:' | translate}}<br/>{{model[folder.id].watchError}}">
|
||||
<span class="far fa-clock"></span> <span translate>Disabled</span> 
|
||||
<span class="fas fa-eye-slash"></span> <span translate>Failed to setup, retrying</span>
|
||||
<span class="fas fa-eye-slash"></span> <span translate>Failed to set up, retrying</span>
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
<h4 class="text-center" translate>The Syncthing Authors</h4>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Alexander Graf, Alexandre Viau, Anderson Mesquita, André Colomb, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Emil Lundberg, Eric P, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ross Smith II, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, bt90, greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Aleksey Vasenev, Alessandro G., Alex Ionescu, Alex Lindeman, Alex Xu, Alexander Seiler, Alexandre Alves, Aman Gupta, Anatoli Babenia, Andreas Sommer, Andrew Dunham, Andrew Meyer, Andrew Rabert, Andrey D, Anjan Momi, Anthony Goeckner, Antoine Lamielle, Anur, Aranjedeath, Arkadiusz Tymiński, Aroun, Arthur Axel fREW Schmidt, Artur Zubilewicz, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Beat Reichenbach, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benjamin Nater, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Catfriend1, Cathryne Linenweaver, Cedric Staniewski, Chih-Hsuan Yen, Choongkyu, Chris Howie, Chris Joel, Chris Tonkinson, Christian Kujau, Christian Prescott, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Barczyk, Daniel Bergmann, Daniel Martí, Daniel Padrta, Darshil Chanpura, David Rimmer, DeflateAwning, Denis A., Dennis Wilson, DerRockWolf, Devon G. Redekopp, Dimitri Papadopoulos Orfanos, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Eng Zer Jun, Eric Lesiuta, Erik Meitner, Evan Spensley, Federico Castagnini, Felix, Felix Ableitner, Felix Lampe, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gahl Saraf, Gilli Sigurdsson, Gleb Sinyavskiy, Graham Miln, Greg, Gusted, Han Boetes, HansK-p, Harrison Jones, Heiko Zuerker, Hireworks, Hugo Locurcio, Iain Barnett, Ian Johnson, Ikko Ashimine, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jack Croft, Jacob, Jake Peterson, James O'Beirne, James Patterson, Jaroslav Lichtblau, Jaroslav Malec, Jaspitta, Jauder Ho, Jaya Chithra, Jaya Kumar, Jeffery To, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan, Jonathan Cross, Jonta, Jose Manuel Delicado, Julian Lehrhuber, Jörg Thalheim, Jędrzej Kula, K.B.Dharun Krishna, Kalle Laine, Kapil Sareen, Karol Różycki, Kebin Liu, Keith Harrison, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, LSmithx2, Lars Lehtonen, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Luke Hamburg, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Marcus B Spencer, Marcus Legendre, Mario Majila, Mark Pulford, Martchus, Martin Polehla, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max, Max Schulze, MaximAL, Maxime Thirouin, Maximilian, MichaIng, Michael Jephcote, Michael Rienstra, Michael Tilli, Migelo, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Naveen, Nicholas Rishel, Nick Busey, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, NinoM4ster, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phani Rithvij, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Quentin Hibon, Rahmi Pruitt, Richard Hartmann, Robert Carosi, Roberto Santalla, Robin Schoonover, Roman Zaynetdinov, Ruslan Yevdokymov, Ryan Qian, Sacheendra Talluri, Scott Klupfel, Sertonix, Severin von Wnuck-Lipinski, Shaarad Dalvi, Simon Mwepu, Simon Pickup, Sly_tom_cat, Sonu Kumar Saw, Stefan Kuntz, Steven Eckhoff, Suhas Gundimeda, Sven Bachmann, Taylor Khan, Terrance, Thomas, Thomas Hipp, Tim Abell, Tim Howes, Tim Nordenfur, Tobias Frölich, Tobias Klauser, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tommy Thorn, Tommy van der Vorst, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vik, Vil Brekin, Vladimir Rusinov, WangXi, Will Rouesnel, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, cjc7373, cui fliter, d-volution, dashangcun, derekriemer, desbma, diemade, digital, entity0xfe, georgespatton, ghjklw, guangwu, gudvinr, ignacy123, janost, jaseg, jelle van der Waa, jtagcat, klemens, kylosus, luchenhan, luzpaz, marco-m, mathias4833, maxice8, mclang, mv1005, nf, orangekame3, otbutz, overkill, perewa, polyfloyd, red_led, rubenbe, sec65, vapatel2, villekalliomaki, wangguoliang, wouter bolsterlee, xarx00, xjtdy888, 佛跳墙, 落心
|
||||
Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Alexander Graf, Alexandre Viau, Anderson Mesquita, André Colomb, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Emil Lundberg, Eric P, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ross Smith II, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, bt90, greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Aleksey Vasenev, Alessandro G., Alex Ionescu, Alex Lindeman, Alex Xu, Alexander Seiler, Alexandre Alves, Aman Gupta, Anatoli Babenia, Andreas Sommer, Andrew Dunham, Andrew Meyer, Andrew Rabert, Andrey D, Anjan Momi, Anthony Goeckner, Antoine Lamielle, Anur, Aranjedeath, Arkadiusz Tymiński, Aroun, Arthur Axel fREW Schmidt, Artur Zubilewicz, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Beat Reichenbach, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benjamin Nater, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Catfriend1, Cathryne Linenweaver, Cedric Staniewski, Chih-Hsuan Yen, Choongkyu, Chris Howie, Chris Joel, Chris Tonkinson, Christian Kujau, Christian Prescott, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Barczyk, Daniel Bergmann, Daniel Martí, Daniel Padrta, Darshil Chanpura, David Rimmer, DeflateAwning, Denis A., Dennis Wilson, DerRockWolf, Devon G. Redekopp, Dimitri Papadopoulos Orfanos, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Eng Zer Jun, Eric Lesiuta, Erik Meitner, Evan Spensley, Federico Castagnini, Felix, Felix Ableitner, Felix Lampe, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gahl Saraf, Gilli Sigurdsson, Gleb Sinyavskiy, Graham Miln, Greg, Gusted, Han Boetes, HansK-p, Harrison Jones, Heiko Zuerker, Hireworks, Hugo Locurcio, Iain Barnett, Ian Johnson, Ikko Ashimine, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jack Croft, Jacob, Jake Peterson, James O'Beirne, James Patterson, Jaroslav Lichtblau, Jaroslav Malec, Jaspitta, Jauder Ho, Jaya Chithra, Jaya Kumar, Jeffery To, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan, Jonathan Cross, Jonta, Jose Manuel Delicado, Julian Lehrhuber, Jörg Thalheim, Jędrzej Kula, K.B.Dharun Krishna, Kalle Laine, Kapil Sareen, Karol Różycki, Kebin Liu, Keith Harrison, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, LSmithx2, Lars Lehtonen, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Luke Hamburg, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Marcus B Spencer, Marcus Legendre, Mario Majila, Mark Pulford, Martchus, Martin Polehla, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max, Max Schulze, MaximAL, Maxime Thirouin, Maximilian, MichaIng, Michael Jephcote, Michael Rienstra, Michael Tilli, Migelo, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Naveen, Nicholas Rishel, Nick Busey, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, NinoM4ster, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phani Rithvij, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Quentin Hibon, Rahmi Pruitt, Richard Hartmann, Robert Carosi, Roberto Santalla, Robin Schoonover, Roman Zaynetdinov, Ruslan Yevdokymov, Ryan Qian, Sacheendra Talluri, Scott Klupfel, Sertonix, Severin von Wnuck-Lipinski, Shaarad Dalvi, Simon Mwepu, Simon Pickup, Sly_tom_cat, Sonu Kumar Saw, Stefan Kuntz, Steven Eckhoff, Suhas Gundimeda, Sven Bachmann, Sébastien WENSKE, Taylor Khan, Terrance, Thomas, Thomas Hipp, Tim Abell, Tim Howes, Tim Nordenfur, Tobias Frölich, Tobias Klauser, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tommy Thorn, Tommy van der Vorst, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vik, Vil Brekin, Vladimir Rusinov, WangXi, Will Rouesnel, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, cjc7373, cui fliter, d-volution, dashangcun, derekriemer, desbma, diemade, digital, entity0xfe, georgespatton, ghjklw, guangwu, gudvinr, ignacy123, janost, jaseg, jelle van der Waa, jtagcat, klemens, kylosus, luchenhan, luzpaz, marco-m, mathias4833, maxice8, mclang, mv1005, nf, orangekame3, otbutz, overkill, perewa, polyfloyd, red_led, rubenbe, sec65, vapatel2, villekalliomaki, wangguoliang, wouter bolsterlee, xarx00, xjtdy888, 佛跳墙, 落心
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1165,7 +1165,7 @@ angular.module('syncthing.core')
|
||||
}
|
||||
|
||||
// Disconnected
|
||||
if (!unused && $scope.deviceStats[deviceCfg.deviceID] && $scope.deviceStats[deviceCfg.deviceID].lastSeenDays && $scope.deviceStats[deviceCfg.deviceID].lastSeenDays >= 7) {
|
||||
if (!unused && $scope.deviceStats[deviceCfg.deviceID] && (!$scope.deviceStats[deviceCfg.deviceID].lastSeenDays || $scope.deviceStats[deviceCfg.deviceID].lastSeenDays >= 7)) {
|
||||
return status + 'disconnected-inactive';
|
||||
} else {
|
||||
return status + 'disconnected';
|
||||
|
||||
@@ -36,13 +36,13 @@ type DB interface {
|
||||
// required.
|
||||
AllGlobalFiles(folder string) (iter.Seq[FileMetadata], func() error)
|
||||
AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[FileMetadata], func() error)
|
||||
AllLocalBlocksWithHash(hash []byte) (iter.Seq[BlockMapEntry], func() error)
|
||||
AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error)
|
||||
AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error)
|
||||
AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error)
|
||||
AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[FileMetadata], func() error)
|
||||
AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, FileMetadata], func() error)
|
||||
AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error)
|
||||
AllLocalBlocksWithHash(hash []byte) ([]BlockMapEntry, error)
|
||||
AllLocalFilesWithBlocksHashAnyFolder(hash []byte) (map[string][]FileMetadata, error)
|
||||
|
||||
// Cleanup
|
||||
DropAllFiles(folder string, device protocol.DeviceID) error
|
||||
|
||||
@@ -67,9 +67,9 @@ func (m metricsDB) AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Se
|
||||
return m.DB.AllLocalFilesWithBlocksHash(folder, h)
|
||||
}
|
||||
|
||||
func (m metricsDB) AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, FileMetadata], func() error) {
|
||||
func (m metricsDB) AllLocalFilesWithBlocksHashAnyFolder(hash []byte) (map[string][]FileMetadata, error) {
|
||||
defer m.account("-", "AllLocalFilesWithBlocksHashAnyFolder")()
|
||||
return m.DB.AllLocalFilesWithBlocksHashAnyFolder(h)
|
||||
return m.DB.AllLocalFilesWithBlocksHashAnyFolder(hash)
|
||||
}
|
||||
|
||||
func (m metricsDB) AllGlobalFiles(folder string) (iter.Seq[FileMetadata], func() error) {
|
||||
@@ -107,7 +107,7 @@ func (m metricsDB) GetGlobalAvailability(folder, file string) ([]protocol.Device
|
||||
return m.DB.GetGlobalAvailability(folder, file)
|
||||
}
|
||||
|
||||
func (m metricsDB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[BlockMapEntry], func() error) {
|
||||
func (m metricsDB) AllLocalBlocksWithHash(hash []byte) ([]BlockMapEntry, error) {
|
||||
defer m.account("-", "AllLocalBlocksWithHash")()
|
||||
return m.DB.AllLocalBlocksWithHash(hash)
|
||||
}
|
||||
|
||||
249
internal/db/sqlite/basedb.go
Normal file
249
internal/db/sqlite/basedb.go
Normal file
@@ -0,0 +1,249 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const currentSchemaVersion = 1
|
||||
|
||||
//go:embed sql/**
|
||||
var embedded embed.FS
|
||||
|
||||
type baseDB struct {
|
||||
path string
|
||||
baseName string
|
||||
sql *sqlx.DB
|
||||
|
||||
updateLock sync.Mutex
|
||||
updatePoints int
|
||||
checkpointsCount int
|
||||
|
||||
statementsMut sync.RWMutex
|
||||
statements map[string]*sqlx.Stmt
|
||||
tplInput map[string]any
|
||||
}
|
||||
|
||||
func openBase(path string, maxConns int, pragmas, schemaScripts, migrationScripts []string) (*baseDB, error) {
|
||||
// Open the database with options to enable foreign keys and recursive
|
||||
// triggers (needed for the delete+insert triggers on row replace).
|
||||
sqlDB, err := sqlx.Open(dbDriver, "file:"+path+"?"+commonOptions)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(maxConns)
|
||||
|
||||
for _, pragma := range pragmas {
|
||||
if _, err := sqlDB.Exec("PRAGMA " + pragma); err != nil {
|
||||
return nil, wrap(err, "PRAGMA "+pragma)
|
||||
}
|
||||
}
|
||||
|
||||
db := &baseDB{
|
||||
path: path,
|
||||
baseName: filepath.Base(path),
|
||||
sql: sqlDB,
|
||||
statements: make(map[string]*sqlx.Stmt),
|
||||
}
|
||||
|
||||
for _, script := range schemaScripts {
|
||||
if err := db.runScripts(script); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
ver, _ := db.getAppliedSchemaVersion()
|
||||
if ver.SchemaVersion > 0 {
|
||||
filter := func(scr string) bool {
|
||||
scr = filepath.Base(scr)
|
||||
nstr, _, ok := strings.Cut(scr, "-")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
n, err := strconv.ParseInt(nstr, 10, 32)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return int(n) > ver.SchemaVersion
|
||||
}
|
||||
for _, script := range migrationScripts {
|
||||
if err := db.runScripts(script, filter); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the current schema version, if not already set
|
||||
if err := db.setAppliedSchemaVersion(currentSchemaVersion); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
db.tplInput = map[string]any{
|
||||
"FlagLocalUnsupported": protocol.FlagLocalUnsupported,
|
||||
"FlagLocalIgnored": protocol.FlagLocalIgnored,
|
||||
"FlagLocalMustRescan": protocol.FlagLocalMustRescan,
|
||||
"FlagLocalReceiveOnly": protocol.FlagLocalReceiveOnly,
|
||||
"FlagLocalGlobal": protocol.FlagLocalGlobal,
|
||||
"FlagLocalNeeded": protocol.FlagLocalNeeded,
|
||||
"SyncthingVersion": build.LongVersion,
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s *baseDB) Close() error {
|
||||
s.updateLock.Lock()
|
||||
s.statementsMut.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
defer s.statementsMut.Unlock()
|
||||
for _, stmt := range s.statements {
|
||||
stmt.Close()
|
||||
}
|
||||
return wrap(s.sql.Close())
|
||||
}
|
||||
|
||||
var tplFuncs = template.FuncMap{
|
||||
"or": func(vs ...int) int {
|
||||
v := vs[0]
|
||||
for _, ov := range vs[1:] {
|
||||
v |= ov
|
||||
}
|
||||
return v
|
||||
},
|
||||
}
|
||||
|
||||
// stmt returns a prepared statement for the given SQL string, after
|
||||
// applying local template expansions. The statement is cached.
|
||||
func (s *baseDB) stmt(tpl string) stmt {
|
||||
tpl = strings.TrimSpace(tpl)
|
||||
|
||||
// Fast concurrent lookup of cached statement
|
||||
s.statementsMut.RLock()
|
||||
stmt, ok := s.statements[tpl]
|
||||
s.statementsMut.RUnlock()
|
||||
if ok {
|
||||
return stmt
|
||||
}
|
||||
|
||||
// On miss, take the full lock, check again
|
||||
s.statementsMut.Lock()
|
||||
defer s.statementsMut.Unlock()
|
||||
stmt, ok = s.statements[tpl]
|
||||
if ok {
|
||||
return stmt
|
||||
}
|
||||
|
||||
// Apply template expansions
|
||||
var sb strings.Builder
|
||||
compTpl := template.Must(template.New("tpl").Funcs(tplFuncs).Parse(tpl))
|
||||
if err := compTpl.Execute(&sb, s.tplInput); err != nil {
|
||||
panic("bug: bad template: " + err.Error())
|
||||
}
|
||||
|
||||
// Prepare and cache
|
||||
stmt, err := s.sql.Preparex(sb.String())
|
||||
if err != nil {
|
||||
return failedStmt{err}
|
||||
}
|
||||
s.statements[tpl] = stmt
|
||||
return stmt
|
||||
}
|
||||
|
||||
type stmt interface {
|
||||
Exec(args ...any) (sql.Result, error)
|
||||
Get(dest any, args ...any) error
|
||||
Queryx(args ...any) (*sqlx.Rows, error)
|
||||
Select(dest any, args ...any) error
|
||||
}
|
||||
|
||||
type failedStmt struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (f failedStmt) Exec(_ ...any) (sql.Result, error) { return nil, f.err }
|
||||
func (f failedStmt) Get(_ any, _ ...any) error { return f.err }
|
||||
func (f failedStmt) Queryx(_ ...any) (*sqlx.Rows, error) { return nil, f.err }
|
||||
func (f failedStmt) Select(_ any, _ ...any) error { return f.err }
|
||||
|
||||
func (s *baseDB) runScripts(glob string, filter ...func(s string) bool) error {
|
||||
scripts, err := fs.Glob(embedded, glob)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.Begin()
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
|
||||
nextScript:
|
||||
for _, scr := range scripts {
|
||||
for _, fn := range filter {
|
||||
if !fn(scr) {
|
||||
continue nextScript
|
||||
}
|
||||
}
|
||||
bs, err := fs.ReadFile(embedded, scr)
|
||||
if err != nil {
|
||||
return wrap(err, scr)
|
||||
}
|
||||
// SQLite requires one statement per exec, so we split the init
|
||||
// files on lines containing only a semicolon and execute them
|
||||
// separately. We require it on a separate line because there are
|
||||
// also statement-internal semicolons in the triggers.
|
||||
for _, stmt := range strings.Split(string(bs), "\n;") {
|
||||
if _, err := tx.Exec(stmt); err != nil {
|
||||
return wrap(err, stmt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
type schemaVersion struct {
|
||||
SchemaVersion int
|
||||
AppliedAt int64
|
||||
SyncthingVersion string
|
||||
}
|
||||
|
||||
func (s *schemaVersion) AppliedTime() time.Time {
|
||||
return time.Unix(0, s.AppliedAt)
|
||||
}
|
||||
|
||||
func (s *baseDB) setAppliedSchemaVersion(ver int) error {
|
||||
_, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO schemamigrations (schema_version, applied_at, syncthing_version)
|
||||
VALUES (?, ?, ?)
|
||||
`).Exec(ver, time.Now().UnixNano(), build.LongVersion)
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *baseDB) getAppliedSchemaVersion() (schemaVersion, error) {
|
||||
var v schemaVersion
|
||||
err := s.stmt(`
|
||||
SELECT schema_version as schemaversion, applied_at as appliedat, syncthing_version as syncthingversion FROM schemamigrations
|
||||
ORDER BY schema_version DESC
|
||||
LIMIT 1
|
||||
`).Get(&v)
|
||||
return v, wrap(err)
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
type DB struct {
|
||||
sql *sqlx.DB
|
||||
localDeviceIdx int64
|
||||
updateLock sync.Mutex
|
||||
|
||||
statementsMut sync.RWMutex
|
||||
statements map[string]*sqlx.Stmt
|
||||
tplInput map[string]any
|
||||
}
|
||||
|
||||
var _ db.DB = (*DB)(nil)
|
||||
|
||||
func (s *DB) Close() error {
|
||||
s.updateLock.Lock()
|
||||
s.statementsMut.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
defer s.statementsMut.Unlock()
|
||||
for _, stmt := range s.statements {
|
||||
stmt.Close()
|
||||
}
|
||||
return wrap(s.sql.Close())
|
||||
}
|
||||
|
||||
func (s *DB) Service(maintenanceInterval time.Duration) suture.Service {
|
||||
return newService(s, maintenanceInterval)
|
||||
}
|
||||
|
||||
func (s *DB) ListFolders() ([]string, error) {
|
||||
var res []string
|
||||
err := s.stmt(`
|
||||
SELECT folder_id FROM folders
|
||||
ORDER BY folder_id
|
||||
`).Select(&res)
|
||||
return res, wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) ListDevicesForFolder(folder string) ([]protocol.DeviceID, error) {
|
||||
var res []string
|
||||
err := s.stmt(`
|
||||
SELECT d.device_id FROM counts s
|
||||
INNER JOIN folders o ON o.idx = s.folder_idx
|
||||
INNER JOIN devices d ON d.idx = s.device_idx
|
||||
WHERE o.folder_id = ? AND s.count > 0 AND s.device_idx != {{.LocalDeviceIdx}}
|
||||
GROUP BY d.device_id
|
||||
ORDER BY d.device_id
|
||||
`).Select(&res, folder)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
devs := make([]protocol.DeviceID, len(res))
|
||||
for i, s := range res {
|
||||
devs[i], err = protocol.DeviceIDFromString(s)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
}
|
||||
return devs, nil
|
||||
}
|
||||
402
internal/db/sqlite/db_folderdb.go
Normal file
402
internal/db/sqlite/db_folderdb.go
Normal file
@@ -0,0 +1,402 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/itererr"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
)
|
||||
|
||||
var errNoSuchFolder = errors.New("no such folder")
|
||||
|
||||
func (s *DB) getFolderDB(folder string, create bool) (*folderDB, error) {
|
||||
// Check for an already open database
|
||||
s.folderDBsMut.RLock()
|
||||
fdb, ok := s.folderDBs[folder]
|
||||
s.folderDBsMut.RUnlock()
|
||||
if ok {
|
||||
return fdb, nil
|
||||
}
|
||||
|
||||
// Check for an existing database. If we're not supposed to create the
|
||||
// folder, we don't move on if it doesn't already have a database name.
|
||||
var dbName string
|
||||
if err := s.stmt(`
|
||||
SELECT database_name FROM folders
|
||||
WHERE folder_id = ?
|
||||
`).Get(&dbName, folder); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
if dbName == "" && !create {
|
||||
return nil, errNoSuchFolder
|
||||
}
|
||||
|
||||
// Create a folder ID and database if it does not already exist
|
||||
s.folderDBsMut.Lock()
|
||||
defer s.folderDBsMut.Unlock()
|
||||
if fdb, ok := s.folderDBs[folder]; ok {
|
||||
return fdb, nil
|
||||
}
|
||||
|
||||
if dbName == "" {
|
||||
// First time we want to access this folder, need to create a new
|
||||
// folder ID
|
||||
idx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
// The database name is the folder index ID and a random slug.
|
||||
slug := strings.ToLower(rand.String(8))
|
||||
dbName = fmt.Sprintf("folder.%04x-%s.db", idx, slug)
|
||||
if _, err := s.stmt(`UPDATE folders SET database_name = ? WHERE idx = ?`).Exec(dbName, idx); err != nil {
|
||||
return nil, wrap(err, "set name")
|
||||
}
|
||||
}
|
||||
|
||||
l.Debugf("Folder %s in database %s", folder, dbName)
|
||||
path := dbName
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join(s.pathBase, dbName)
|
||||
}
|
||||
fdb, err := s.folderDBOpener(folder, path, s.deleteRetention)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
s.folderDBs[folder] = fdb
|
||||
return fdb, nil
|
||||
}
|
||||
|
||||
func (s *DB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
|
||||
fdb, err := s.getFolderDB(folder, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.Update(device, fs)
|
||||
}
|
||||
|
||||
func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return protocol.FileInfo{}, false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return protocol.FileInfo{}, false, err
|
||||
}
|
||||
return fdb.GetDeviceFile(device, file)
|
||||
}
|
||||
|
||||
func (s *DB) GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fdb.GetGlobalAvailability(file)
|
||||
}
|
||||
|
||||
func (s *DB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return protocol.FileInfo{}, false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return protocol.FileInfo{}, false, err
|
||||
}
|
||||
return fdb.GetGlobalFile(file)
|
||||
}
|
||||
|
||||
func (s *DB) AllGlobalFiles(folder string) (iter.Seq[db.FileMetadata], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllGlobalFiles()
|
||||
}
|
||||
|
||||
func (s *DB) AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[db.FileMetadata], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllGlobalFilesPrefix(prefix)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalBlocksWithHash(hash []byte) ([]db.BlockMapEntry, error) {
|
||||
var entries []db.BlockMapEntry
|
||||
err := s.forEachFolder(func(fdb *folderDB) error {
|
||||
es, err := itererr.Collect(fdb.AllLocalBlocksWithHash(hash))
|
||||
entries = append(entries, es...)
|
||||
return err
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithBlocksHashAnyFolder(hash []byte) (map[string][]db.FileMetadata, error) {
|
||||
res := make(map[string][]db.FileMetadata)
|
||||
err := s.forEachFolder(func(fdb *folderDB) error {
|
||||
files, err := itererr.Collect(fdb.AllLocalFilesWithBlocksHash(hash))
|
||||
res[fdb.folderID] = files
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllLocalFiles(device)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllLocalFilesBySequence(device, startSeq, limit)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllLocalFilesWithPrefix(device, prefix)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[db.FileMetadata], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(db.FileMetadata) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllLocalFilesWithBlocksHash(h)
|
||||
}
|
||||
|
||||
func (s *DB) AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return nil }
|
||||
}
|
||||
if err != nil {
|
||||
return func(yield func(protocol.FileInfo) bool) {}, func() error { return err }
|
||||
}
|
||||
return fdb.AllNeededGlobalFiles(device, order, limit, offset)
|
||||
}
|
||||
|
||||
func (s *DB) DropAllFiles(folder string, device protocol.DeviceID) error {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.DropAllFiles(device)
|
||||
}
|
||||
|
||||
func (s *DB) DropFilesNamed(folder string, device protocol.DeviceID, names []string) error {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.DropFilesNamed(device, names)
|
||||
}
|
||||
|
||||
func (s *DB) ListDevicesForFolder(folder string) ([]protocol.DeviceID, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fdb.ListDevicesForFolder()
|
||||
}
|
||||
|
||||
func (s *DB) RemoteSequences(folder string) (map[protocol.DeviceID]int64, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fdb.RemoteSequences()
|
||||
}
|
||||
|
||||
func (s *DB) CountGlobal(folder string) (db.Counts, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return db.Counts{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return db.Counts{}, err
|
||||
}
|
||||
return fdb.CountGlobal()
|
||||
}
|
||||
|
||||
func (s *DB) CountLocal(folder string, device protocol.DeviceID) (db.Counts, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return db.Counts{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return db.Counts{}, err
|
||||
}
|
||||
return fdb.CountLocal(device)
|
||||
}
|
||||
|
||||
func (s *DB) CountNeed(folder string, device protocol.DeviceID) (db.Counts, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return db.Counts{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return db.Counts{}, err
|
||||
}
|
||||
return fdb.CountNeed(device)
|
||||
}
|
||||
|
||||
func (s *DB) CountReceiveOnlyChanged(folder string) (db.Counts, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return db.Counts{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return db.Counts{}, err
|
||||
}
|
||||
return fdb.CountReceiveOnlyChanged()
|
||||
}
|
||||
|
||||
func (s *DB) DropAllIndexIDs() error {
|
||||
return s.forEachFolder(func(fdb *folderDB) error {
|
||||
return fdb.DropAllIndexIDs()
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.IndexID, error) {
|
||||
fdb, err := s.getFolderDB(folder, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return fdb.GetIndexID(device)
|
||||
}
|
||||
|
||||
func (s *DB) SetIndexID(folder string, device protocol.DeviceID, id protocol.IndexID) error {
|
||||
fdb, err := s.getFolderDB(folder, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.SetIndexID(device, id)
|
||||
}
|
||||
|
||||
func (s *DB) GetDeviceSequence(folder string, device protocol.DeviceID) (int64, error) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return fdb.GetDeviceSequence(device)
|
||||
}
|
||||
|
||||
func (s *DB) DeleteMtime(folder, name string) error {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.DeleteMtime(name)
|
||||
}
|
||||
|
||||
func (s *DB) GetMtime(folder, name string) (ondisk, virtual time.Time) {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if errors.Is(err, errNoSuchFolder) {
|
||||
return time.Time{}, time.Time{}
|
||||
}
|
||||
if err != nil {
|
||||
return time.Time{}, time.Time{}
|
||||
}
|
||||
return fdb.GetMtime(name)
|
||||
}
|
||||
|
||||
func (s *DB) PutMtime(folder, name string, ondisk, virtual time.Time) error {
|
||||
fdb, err := s.getFolderDB(folder, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fdb.PutMtime(name, ondisk, virtual)
|
||||
}
|
||||
|
||||
func (s *DB) DropDevice(device protocol.DeviceID) error {
|
||||
return s.forEachFolder(func(fdb *folderDB) error {
|
||||
return fdb.DropDevice(device)
|
||||
})
|
||||
}
|
||||
|
||||
// forEachFolder runs the function for each currently open folderDB,
|
||||
// returning the first error that was encountered.
|
||||
func (s *DB) forEachFolder(fn func(fdb *folderDB) error) error {
|
||||
folders, err := s.ListFolders()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var firstError error
|
||||
for _, folder := range folders {
|
||||
fdb, err := s.getFolderDB(folder, false)
|
||||
if err != nil {
|
||||
if firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := fn(fdb); err != nil && firstError == nil {
|
||||
firstError = err
|
||||
}
|
||||
}
|
||||
return firstError
|
||||
}
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
)
|
||||
|
||||
func (s *DB) GetKV(key string) ([]byte, error) {
|
||||
func (s *baseDB) GetKV(key string) ([]byte, error) {
|
||||
var val []byte
|
||||
if err := s.stmt(`
|
||||
SELECT value FROM kv
|
||||
@@ -24,7 +24,7 @@ func (s *DB) GetKV(key string) ([]byte, error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (s *DB) PutKV(key string, val []byte) error {
|
||||
func (s *baseDB) PutKV(key string, val []byte) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`
|
||||
@@ -34,7 +34,7 @@ func (s *DB) PutKV(key string, val []byte) error {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) DeleteKV(key string) error {
|
||||
func (s *baseDB) DeleteKV(key string) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`
|
||||
@@ -43,7 +43,7 @@ func (s *DB) DeleteKV(key string) error {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) PrefixKV(prefix string) (iter.Seq[db.KeyValue], func() error) {
|
||||
func (s *baseDB) PrefixKV(prefix string) (iter.Seq[db.KeyValue], func() error) {
|
||||
var rows *sqlx.Rows
|
||||
var err error
|
||||
if prefix == "" {
|
||||
|
||||
@@ -9,8 +9,6 @@ package sqlite
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/itererr"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -52,7 +50,7 @@ func TestBlocks(t *testing.T) {
|
||||
|
||||
// Search for blocks
|
||||
|
||||
vals, err := itererr.Collect(db.AllLocalBlocksWithHash([]byte{1, 2, 3}))
|
||||
vals, err := db.AllLocalBlocksWithHash([]byte{1, 2, 3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -66,27 +64,23 @@ func TestBlocks(t *testing.T) {
|
||||
|
||||
// Get FileInfos for those blocks
|
||||
|
||||
found := 0
|
||||
it, errFn := db.AllLocalFilesWithBlocksHashAnyFolder(vals[0].BlocklistHash)
|
||||
for folder, fileInfo := range it {
|
||||
if folder != folderID {
|
||||
t.Fatal("should be same folder")
|
||||
}
|
||||
if fileInfo.Name != "file1" {
|
||||
t.Fatal("should be file1")
|
||||
}
|
||||
found++
|
||||
}
|
||||
if err := errFn(); err != nil {
|
||||
res, err := db.AllLocalFilesWithBlocksHashAnyFolder(vals[0].BlocklistHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if found != 1 {
|
||||
if len(res) != 1 {
|
||||
t.Fatal("should return one folder")
|
||||
}
|
||||
if len(res[folderID]) != 1 {
|
||||
t.Fatal("should find one file")
|
||||
}
|
||||
if res[folderID][0].Name != "file1" {
|
||||
t.Fatal("should be file1")
|
||||
}
|
||||
|
||||
// Get the other blocks
|
||||
|
||||
vals, err = itererr.Collect(db.AllLocalBlocksWithHash([]byte{3, 4, 5}))
|
||||
vals, err = db.AllLocalBlocksWithHash([]byte{3, 4, 5})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -125,7 +119,10 @@ func TestBlocksDeleted(t *testing.T) {
|
||||
|
||||
// We should find one entry for the block hash
|
||||
search := file.Blocks[0].Hash
|
||||
es := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(search))
|
||||
es, err := sdb.AllLocalBlocksWithHash(search)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(es) != 1 {
|
||||
t.Fatal("expected one hit")
|
||||
}
|
||||
@@ -137,13 +134,17 @@ func TestBlocksDeleted(t *testing.T) {
|
||||
}
|
||||
|
||||
// Searching for the old hash should yield no hits
|
||||
if hits := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(search)); len(hits) != 0 {
|
||||
if hits, err := sdb.AllLocalBlocksWithHash(search); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(hits) != 0 {
|
||||
t.Log(hits)
|
||||
t.Error("expected no hits")
|
||||
}
|
||||
|
||||
// Searching for the new hash should yield one hits
|
||||
if hits := mustCollect[db.BlockMapEntry](t)(sdb.AllLocalBlocksWithHash(file.Blocks[0].Hash)); len(hits) != 1 {
|
||||
if hits, err := sdb.AllLocalBlocksWithHash(file.Blocks[0].Hash); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(hits) != 1 {
|
||||
t.Log(hits)
|
||||
t.Error("expected one hit")
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *DB) GetMtime(folder, name string) (ondisk, virtual time.Time) {
|
||||
var res struct {
|
||||
Ondisk int64
|
||||
Virtual int64
|
||||
}
|
||||
if err := s.stmt(`
|
||||
SELECT m.ondisk, m.virtual FROM mtimes m
|
||||
INNER JOIN folders o ON o.idx = m.folder_idx
|
||||
WHERE o.folder_id = ? AND m.name = ?
|
||||
`).Get(&res, folder, name); err != nil {
|
||||
return time.Time{}, time.Time{}
|
||||
}
|
||||
return time.Unix(0, res.Ondisk), time.Unix(0, res.Virtual)
|
||||
}
|
||||
|
||||
func (s *DB) PutMtime(folder, name string, ondisk, virtual time.Time) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
_, err = s.stmt(`
|
||||
INSERT OR REPLACE INTO mtimes (folder_idx, name, ondisk, virtual)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`).Exec(folderIdx, name, ondisk.UnixNano(), virtual.UnixNano())
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) DeleteMtime(folder, name string) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
_, err = s.stmt(`
|
||||
DELETE FROM mtimes
|
||||
WHERE folder_idx = ? AND name = ?
|
||||
`).Exec(folderIdx, name)
|
||||
return wrap(err)
|
||||
}
|
||||
@@ -7,61 +7,108 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
)
|
||||
|
||||
const maxDBConns = 128
|
||||
const maxDBConns = 16
|
||||
|
||||
func Open(path string) (*DB, error) {
|
||||
// Open the database with options to enable foreign keys and recursive
|
||||
// triggers (needed for the delete+insert triggers on row replace).
|
||||
sqlDB, err := sqlx.Open(dbDriver, "file:"+path+"?"+commonOptions)
|
||||
type DB struct {
|
||||
pathBase string
|
||||
deleteRetention time.Duration
|
||||
|
||||
*baseDB
|
||||
|
||||
folderDBsMut sync.RWMutex
|
||||
folderDBs map[string]*folderDB
|
||||
folderDBOpener func(folder, path string, deleteRetention time.Duration) (*folderDB, error)
|
||||
}
|
||||
|
||||
var _ db.DB = (*DB)(nil)
|
||||
|
||||
type Option func(*DB)
|
||||
|
||||
func WithDeleteRetention(d time.Duration) Option {
|
||||
return func(s *DB) {
|
||||
s.deleteRetention = d
|
||||
}
|
||||
}
|
||||
|
||||
func Open(path string, opts ...Option) (*DB, error) {
|
||||
pragmas := []string{
|
||||
"journal_mode = WAL",
|
||||
"optimize = 0x10002",
|
||||
"auto_vacuum = INCREMENTAL",
|
||||
"default_temp_store = MEMORY",
|
||||
"temp_store = MEMORY",
|
||||
}
|
||||
schemas := []string{
|
||||
"sql/schema/common/*",
|
||||
"sql/schema/main/*",
|
||||
}
|
||||
|
||||
os.MkdirAll(path, 0o700)
|
||||
mainPath := filepath.Join(path, "main.db")
|
||||
mainBase, err := openBase(mainPath, maxDBConns, pragmas, schemas, nil)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
return nil, err
|
||||
}
|
||||
sqlDB.SetMaxOpenConns(maxDBConns)
|
||||
if _, err := sqlDB.Exec(`PRAGMA journal_mode = WAL`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA journal_mode")
|
||||
|
||||
db := &DB{
|
||||
pathBase: path,
|
||||
baseDB: mainBase,
|
||||
folderDBs: make(map[string]*folderDB),
|
||||
folderDBOpener: openFolderDB,
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA optimize = 0x10002`); err != nil {
|
||||
// https://www.sqlite.org/pragma.html#pragma_optimize
|
||||
return nil, wrap(err, "PRAGMA optimize")
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(db)
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA journal_size_limit = 6144000`); err != nil {
|
||||
// https://www.powersync.com/blog/sqlite-optimizations-for-ultra-high-performance
|
||||
return nil, wrap(err, "PRAGMA journal_size_limit")
|
||||
}
|
||||
return openCommon(sqlDB)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Open the database with options suitable for the migration inserts. This
|
||||
// is not a safe mode of operation for normal processing, use only for bulk
|
||||
// inserts with a close afterwards.
|
||||
func OpenForMigration(path string) (*DB, error) {
|
||||
sqlDB, err := sqlx.Open(dbDriver, "file:"+path+"?"+commonOptions)
|
||||
pragmas := []string{
|
||||
"journal_mode = OFF",
|
||||
"default_temp_store = MEMORY",
|
||||
"temp_store = MEMORY",
|
||||
"foreign_keys = 0",
|
||||
"synchronous = 0",
|
||||
"locking_mode = EXCLUSIVE",
|
||||
}
|
||||
schemas := []string{
|
||||
"sql/schema/common/*",
|
||||
"sql/schema/main/*",
|
||||
}
|
||||
|
||||
os.MkdirAll(path, 0o700)
|
||||
mainPath := filepath.Join(path, "main.db")
|
||||
mainBase, err := openBase(mainPath, 1, pragmas, schemas, nil)
|
||||
if err != nil {
|
||||
return nil, wrap(err, "open")
|
||||
return nil, err
|
||||
}
|
||||
sqlDB.SetMaxOpenConns(1)
|
||||
if _, err := sqlDB.Exec(`PRAGMA foreign_keys = 0`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA foreign_keys")
|
||||
|
||||
db := &DB{
|
||||
pathBase: path,
|
||||
baseDB: mainBase,
|
||||
folderDBs: make(map[string]*folderDB),
|
||||
folderDBOpener: openFolderDBForMigration,
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA journal_mode = OFF`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA journal_mode")
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA synchronous = 0`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA synchronous")
|
||||
}
|
||||
return openCommon(sqlDB)
|
||||
|
||||
// // Touch device IDs that should always exist and have a low index
|
||||
// // numbers, and will never change
|
||||
// db.localDeviceIdx, _ = db.deviceIdxLocked(protocol.LocalDeviceID)
|
||||
// db.tplInput["LocalDeviceIdx"] = db.localDeviceIdx
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func OpenTemp() (*DB, error) {
|
||||
@@ -77,127 +124,12 @@ func OpenTemp() (*DB, error) {
|
||||
return Open(path)
|
||||
}
|
||||
|
||||
func openCommon(sqlDB *sqlx.DB) (*DB, error) {
|
||||
if _, err := sqlDB.Exec(`PRAGMA auto_vacuum = INCREMENTAL`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA auto_vacuum")
|
||||
func (s *DB) Close() error {
|
||||
s.folderDBsMut.Lock()
|
||||
defer s.folderDBsMut.Unlock()
|
||||
for folder, fdb := range s.folderDBs {
|
||||
fdb.Close()
|
||||
delete(s.folderDBs, folder)
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA default_temp_store = MEMORY`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA default_temp_store")
|
||||
}
|
||||
if _, err := sqlDB.Exec(`PRAGMA temp_store = MEMORY`); err != nil {
|
||||
return nil, wrap(err, "PRAGMA temp_store")
|
||||
}
|
||||
|
||||
db := &DB{
|
||||
sql: sqlDB,
|
||||
statements: make(map[string]*sqlx.Stmt),
|
||||
}
|
||||
|
||||
if err := db.runScripts("sql/schema/*"); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
ver, _ := db.getAppliedSchemaVersion()
|
||||
if ver.SchemaVersion > 0 {
|
||||
filter := func(scr string) bool {
|
||||
scr = filepath.Base(scr)
|
||||
nstr, _, ok := strings.Cut(scr, "-")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
n, err := strconv.ParseInt(nstr, 10, 32)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return int(n) > ver.SchemaVersion
|
||||
}
|
||||
if err := db.runScripts("sql/migrations/*", filter); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Touch device IDs that should always exist and have a low index
|
||||
// numbers, and will never change
|
||||
db.localDeviceIdx, _ = db.deviceIdxLocked(protocol.LocalDeviceID)
|
||||
|
||||
// Set the current schema version, if not already set
|
||||
if err := db.setAppliedSchemaVersion(currentSchemaVersion); err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
db.tplInput = map[string]any{
|
||||
"FlagLocalUnsupported": protocol.FlagLocalUnsupported,
|
||||
"FlagLocalIgnored": protocol.FlagLocalIgnored,
|
||||
"FlagLocalMustRescan": protocol.FlagLocalMustRescan,
|
||||
"FlagLocalReceiveOnly": protocol.FlagLocalReceiveOnly,
|
||||
"FlagLocalGlobal": protocol.FlagLocalGlobal,
|
||||
"FlagLocalNeeded": protocol.FlagLocalNeeded,
|
||||
"LocalDeviceIdx": db.localDeviceIdx,
|
||||
"SyncthingVersion": build.LongVersion,
|
||||
}
|
||||
|
||||
return db, nil
|
||||
return wrap(s.baseDB.Close())
|
||||
}
|
||||
|
||||
var tplFuncs = template.FuncMap{
|
||||
"or": func(vs ...int) int {
|
||||
v := vs[0]
|
||||
for _, ov := range vs[1:] {
|
||||
v |= ov
|
||||
}
|
||||
return v
|
||||
},
|
||||
}
|
||||
|
||||
// stmt returns a prepared statement for the given SQL string, after
|
||||
// applying local template expansions. The statement is cached.
|
||||
func (s *DB) stmt(tpl string) stmt {
|
||||
tpl = strings.TrimSpace(tpl)
|
||||
|
||||
// Fast concurrent lookup of cached statement
|
||||
s.statementsMut.RLock()
|
||||
stmt, ok := s.statements[tpl]
|
||||
s.statementsMut.RUnlock()
|
||||
if ok {
|
||||
return stmt
|
||||
}
|
||||
|
||||
// On miss, take the full lock, check again
|
||||
s.statementsMut.Lock()
|
||||
defer s.statementsMut.Unlock()
|
||||
stmt, ok = s.statements[tpl]
|
||||
if ok {
|
||||
return stmt
|
||||
}
|
||||
|
||||
// Apply template expansions
|
||||
var sb strings.Builder
|
||||
compTpl := template.Must(template.New("tpl").Funcs(tplFuncs).Parse(tpl))
|
||||
if err := compTpl.Execute(&sb, s.tplInput); err != nil {
|
||||
panic("bug: bad template: " + err.Error())
|
||||
}
|
||||
|
||||
// Prepare and cache
|
||||
stmt, err := s.sql.Preparex(sb.String())
|
||||
if err != nil {
|
||||
return failedStmt{err}
|
||||
}
|
||||
s.statements[tpl] = stmt
|
||||
return stmt
|
||||
}
|
||||
|
||||
type stmt interface {
|
||||
Exec(args ...any) (sql.Result, error)
|
||||
Get(dest any, args ...any) error
|
||||
Queryx(args ...any) (*sqlx.Rows, error)
|
||||
Select(dest any, args ...any) error
|
||||
}
|
||||
|
||||
type failedStmt struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (f failedStmt) Exec(_ ...any) (sql.Result, error) { return nil, f.err }
|
||||
func (f failedStmt) Get(_ any, _ ...any) error { return f.err }
|
||||
func (f failedStmt) Queryx(_ ...any) (*sqlx.Rows, error) { return nil, f.err }
|
||||
func (f failedStmt) Select(_ any, _ ...any) error { return f.err }
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"io/fs"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
const currentSchemaVersion = 1
|
||||
|
||||
//go:embed sql/**
|
||||
var embedded embed.FS
|
||||
|
||||
func (s *DB) runScripts(glob string, filter ...func(s string) bool) error {
|
||||
scripts, err := fs.Glob(embedded, glob)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.Begin()
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
|
||||
nextScript:
|
||||
for _, scr := range scripts {
|
||||
for _, fn := range filter {
|
||||
if !fn(scr) {
|
||||
l.Debugln("Skipping script", scr)
|
||||
continue nextScript
|
||||
}
|
||||
}
|
||||
l.Debugln("Executing script", scr)
|
||||
bs, err := fs.ReadFile(embedded, scr)
|
||||
if err != nil {
|
||||
return wrap(err, scr)
|
||||
}
|
||||
// SQLite requires one statement per exec, so we split the init
|
||||
// files on lines containing only a semicolon and execute them
|
||||
// separately. We require it on a separate line because there are
|
||||
// also statement-internal semicolons in the triggers.
|
||||
for _, stmt := range strings.Split(string(bs), "\n;") {
|
||||
if _, err := tx.Exec(stmt); err != nil {
|
||||
return wrap(err, stmt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
type schemaVersion struct {
|
||||
SchemaVersion int
|
||||
AppliedAt int64
|
||||
SyncthingVersion string
|
||||
}
|
||||
|
||||
func (s *schemaVersion) AppliedTime() time.Time {
|
||||
return time.Unix(0, s.AppliedAt)
|
||||
}
|
||||
|
||||
func (s *DB) setAppliedSchemaVersion(ver int) error {
|
||||
_, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO schemamigrations (schema_version, applied_at, syncthing_version)
|
||||
VALUES (?, ?, ?)
|
||||
`).Exec(ver, time.Now().UnixNano(), build.LongVersion)
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) getAppliedSchemaVersion() (schemaVersion, error) {
|
||||
var v schemaVersion
|
||||
err := s.stmt(`
|
||||
SELECT schema_version as schemaversion, applied_at as appliedat, syncthing_version as syncthingversion FROM schemamigrations
|
||||
ORDER BY schema_version DESC
|
||||
LIMIT 1
|
||||
`).Get(&v)
|
||||
return v, wrap(err)
|
||||
}
|
||||
@@ -8,22 +8,35 @@ package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
internalMetaPrefix = "dbsvc"
|
||||
lastMaintKey = "lastMaint"
|
||||
internalMetaPrefix = "dbsvc"
|
||||
lastMaintKey = "lastMaint"
|
||||
defaultDeleteRetention = 180 * 24 * time.Hour
|
||||
minDeleteRetention = 24 * time.Hour
|
||||
)
|
||||
|
||||
func (s *DB) Service(maintenanceInterval time.Duration) suture.Service {
|
||||
return newService(s, maintenanceInterval)
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
sdb *DB
|
||||
maintenanceInterval time.Duration
|
||||
internalMeta *db.Typed
|
||||
}
|
||||
|
||||
func (s *Service) String() string {
|
||||
return fmt.Sprintf("sqlite.service@%p", s)
|
||||
}
|
||||
|
||||
func newService(sdb *DB, maintenanceInterval time.Duration) *Service {
|
||||
return &Service{
|
||||
sdb: sdb,
|
||||
@@ -73,19 +86,60 @@ func (s *Service) periodic(ctx context.Context) error {
|
||||
t1 := time.Now()
|
||||
defer func() { l.Debugln("Periodic done in", time.Since(t1), "+", t1.Sub(t0)) }()
|
||||
|
||||
if err := s.garbageCollectBlocklistsAndBlocksLocked(ctx); err != nil {
|
||||
tidy(ctx, s.sdb.sql)
|
||||
|
||||
return wrap(s.sdb.forEachFolder(func(fdb *folderDB) error {
|
||||
fdb.updateLock.Lock()
|
||||
defer fdb.updateLock.Unlock()
|
||||
|
||||
if err := garbageCollectOldDeletedLocked(fdb); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if err := garbageCollectBlocklistsAndBlocksLocked(ctx, fdb); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
tidy(ctx, fdb.sql)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func tidy(ctx context.Context, db *sqlx.DB) error {
|
||||
conn, err := db.Conn(ctx)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
_, _ = s.sdb.sql.ExecContext(ctx, `ANALYZE`)
|
||||
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA optimize`)
|
||||
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA incremental_vacuum`)
|
||||
_, _ = s.sdb.sql.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`)
|
||||
|
||||
defer conn.Close()
|
||||
_, _ = conn.ExecContext(ctx, `ANALYZE`)
|
||||
_, _ = conn.ExecContext(ctx, `PRAGMA optimize`)
|
||||
_, _ = conn.ExecContext(ctx, `PRAGMA incremental_vacuum`)
|
||||
_, _ = conn.ExecContext(ctx, `PRAGMA journal_size_limit = 8388608`)
|
||||
_, _ = conn.ExecContext(ctx, `PRAGMA wal_checkpoint(TRUNCATE)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) garbageCollectBlocklistsAndBlocksLocked(ctx context.Context) error {
|
||||
func garbageCollectOldDeletedLocked(fdb *folderDB) error {
|
||||
if fdb.deleteRetention <= 0 {
|
||||
l.Debugln(fdb.baseName, "delete retention is infinite, skipping cleanup")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deleted files that are marked as not needed (we have processed
|
||||
// them) and they were deleted more than MaxDeletedFileAge ago.
|
||||
l.Debugln(fdb.baseName, "forgetting deleted files older than", fdb.deleteRetention)
|
||||
res, err := fdb.stmt(`
|
||||
DELETE FROM files
|
||||
WHERE deleted AND modified < ? AND local_flags & {{.FlagLocalNeeded}} == 0
|
||||
`).Exec(time.Now().Add(-fdb.deleteRetention).UnixNano())
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if aff, err := res.RowsAffected(); err == nil {
|
||||
l.Debugln(fdb.baseName, "removed old deleted file records:", aff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func garbageCollectBlocklistsAndBlocksLocked(ctx context.Context, fdb *folderDB) error {
|
||||
// Remove all blocklists not referred to by any files and, by extension,
|
||||
// any blocks not referred to by a blocklist. This is an expensive
|
||||
// operation when run normally, especially if there are a lot of blocks
|
||||
@@ -96,7 +150,7 @@ func (s *Service) garbageCollectBlocklistsAndBlocksLocked(ctx context.Context) e
|
||||
// an explicit connection and disabling foreign keys before starting the
|
||||
// transaction. We make sure to clean up on the way out.
|
||||
|
||||
conn, err := s.sdb.sql.Connx(ctx)
|
||||
conn, err := fdb.sql.Connx(ctx)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
@@ -123,7 +177,7 @@ func (s *Service) garbageCollectBlocklistsAndBlocksLocked(ctx context.Context) e
|
||||
return wrap(err, "delete blocklists")
|
||||
} else if shouldDebug() {
|
||||
rows, err := res.RowsAffected()
|
||||
l.Debugln("Blocklist GC:", rows, err)
|
||||
l.Debugln(fdb.baseName, "blocklist GC:", rows, err)
|
||||
}
|
||||
|
||||
if res, err := tx.ExecContext(ctx, `
|
||||
@@ -134,7 +188,7 @@ func (s *Service) garbageCollectBlocklistsAndBlocksLocked(ctx context.Context) e
|
||||
return wrap(err, "delete blocks")
|
||||
} else if shouldDebug() {
|
||||
rows, err := res.RowsAffected()
|
||||
l.Debugln("Blocks GC:", rows, err)
|
||||
l.Debugln(fdb.baseName, "blocks GC:", rows, err)
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
|
||||
@@ -298,6 +298,7 @@ func TestBasics(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(folders) != 1 || folders[0] != folderID {
|
||||
t.Log(folders)
|
||||
t.Error("expected one folder")
|
||||
}
|
||||
})
|
||||
@@ -1009,15 +1010,20 @@ func TestBlocklistGarbageCollection(t *testing.T) {
|
||||
|
||||
// There should exist three blockslists and six blocks
|
||||
|
||||
fdb, err := sdb.getFolderDB(folderID, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var count int
|
||||
if err := sdb.sql.Get(&count, `SELECT count(*) FROM blocklists`); err != nil {
|
||||
if err := fdb.sql.Get(&count, `SELECT count(*) FROM blocklists`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Log(count)
|
||||
t.Fatal("expected 3 blocklists")
|
||||
}
|
||||
if err := sdb.sql.Get(&count, `SELECT count(*) FROM blocks`); err != nil {
|
||||
if err := fdb.sql.Get(&count, `SELECT count(*) FROM blocks`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 6 {
|
||||
@@ -1039,14 +1045,14 @@ func TestBlocklistGarbageCollection(t *testing.T) {
|
||||
|
||||
// There should exist two blockslists and four blocks
|
||||
|
||||
if err := sdb.sql.Get(&count, `SELECT count(*) FROM blocklists`); err != nil {
|
||||
if err := fdb.sql.Get(&count, `SELECT count(*) FROM blocklists`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Log(count)
|
||||
t.Error("expected 2 blocklists")
|
||||
}
|
||||
if err := sdb.sql.Get(&count, `SELECT count(*) FROM blocks`); err != nil {
|
||||
if err := fdb.sql.Get(&count, `SELECT count(*) FROM blocks`); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
@@ -1055,6 +1061,39 @@ func TestBlocklistGarbageCollection(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertLargeFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sdb, err := OpenTemp()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := sdb.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Add a large file (many blocks)
|
||||
|
||||
files := []protocol.FileInfo{genFile("test1", 16000, 1)}
|
||||
if err := sdb.Update(folderID, protocol.LocalDeviceID, files); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify all the blocks are here
|
||||
|
||||
for i, block := range files[0].Blocks {
|
||||
bs, err := sdb.AllLocalBlocksWithHash(block.Hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(bs) == 0 {
|
||||
t.Error("missing blocks for", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorWrap(t *testing.T) {
|
||||
if wrap(nil, "foo") != nil {
|
||||
t.Fatal("nil should wrap to nil")
|
||||
|
||||
@@ -7,458 +7,38 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/internal/itererr"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *DB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertFileStmt, err := txp.Preparex(`
|
||||
INSERT OR REPLACE INTO files (folder_idx, device_idx, remote_sequence, name, type, modified, size, version, deleted, invalid, local_flags, blocklist_hash)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
RETURNING sequence
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert file")
|
||||
}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertFileInfoStmt, err := txp.Preparex(`
|
||||
INSERT INTO fileinfos (sequence, fiprotobuf)
|
||||
VALUES (?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert fileinfo")
|
||||
}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertBlockListStmt, err := txp.Preparex(`
|
||||
INSERT OR IGNORE INTO blocklists (blocklist_hash, blprotobuf)
|
||||
VALUES (?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert blocklist")
|
||||
}
|
||||
|
||||
var prevRemoteSeq int64
|
||||
for i, f := range fs {
|
||||
f.Name = osutil.NormalizedFilename(f.Name)
|
||||
|
||||
var blockshash *[]byte
|
||||
if len(f.Blocks) > 0 {
|
||||
f.BlocksHash = protocol.BlocksHash(f.Blocks)
|
||||
blockshash = &f.BlocksHash
|
||||
} else {
|
||||
f.BlocksHash = nil
|
||||
}
|
||||
|
||||
if f.Type == protocol.FileInfoTypeDirectory {
|
||||
f.Size = 128 // synthetic directory size
|
||||
}
|
||||
|
||||
// Insert the file.
|
||||
//
|
||||
// If it is a remote file, set remote_sequence otherwise leave it at
|
||||
// null. Returns the new local sequence.
|
||||
var remoteSeq *int64
|
||||
if device != protocol.LocalDeviceID {
|
||||
if i > 0 && f.Sequence == prevRemoteSeq {
|
||||
return fmt.Errorf("duplicate remote sequence number %d", prevRemoteSeq)
|
||||
}
|
||||
prevRemoteSeq = f.Sequence
|
||||
remoteSeq = &f.Sequence
|
||||
}
|
||||
var localSeq int64
|
||||
if err := insertFileStmt.Get(&localSeq, folderIdx, deviceIdx, remoteSeq, f.Name, f.Type, f.ModTime().UnixNano(), f.Size, f.Version.String(), f.IsDeleted(), f.IsInvalid(), f.LocalFlags, blockshash); err != nil {
|
||||
return wrap(err, "insert file")
|
||||
}
|
||||
|
||||
if len(f.Blocks) > 0 {
|
||||
// Indirect the block list
|
||||
blocks := sliceutil.Map(f.Blocks, protocol.BlockInfo.ToWire)
|
||||
bs, err := proto.Marshal(&dbproto.BlockList{Blocks: blocks})
|
||||
if err != nil {
|
||||
return wrap(err, "marshal blocklist")
|
||||
}
|
||||
if _, err := insertBlockListStmt.Exec(f.BlocksHash, bs); err != nil {
|
||||
return wrap(err, "insert blocklist")
|
||||
}
|
||||
|
||||
if device == protocol.LocalDeviceID {
|
||||
// Insert all blocks
|
||||
if err := s.insertBlocksLocked(txp, f.BlocksHash, f.Blocks); err != nil {
|
||||
return wrap(err, "insert blocks")
|
||||
}
|
||||
}
|
||||
|
||||
f.Blocks = nil
|
||||
}
|
||||
|
||||
// Insert the fileinfo
|
||||
if device == protocol.LocalDeviceID {
|
||||
f.Sequence = localSeq
|
||||
}
|
||||
bs, err := proto.Marshal(f.ToWire(true))
|
||||
if err != nil {
|
||||
return wrap(err, "marshal fileinfo")
|
||||
}
|
||||
if _, err := insertFileInfoStmt.Exec(localSeq, bs); err != nil {
|
||||
return wrap(err, "insert fileinfo")
|
||||
}
|
||||
|
||||
// Update global and need
|
||||
if err := s.recalcGlobalForFile(txp, folderIdx, f.Name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (s *DB) DropFolder(folder string) error {
|
||||
s.folderDBsMut.Lock()
|
||||
defer s.folderDBsMut.Unlock()
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`
|
||||
DELETE FROM folders
|
||||
WHERE folder_id = ?
|
||||
`).Exec(folder)
|
||||
if fdb, ok := s.folderDBs[folder]; ok {
|
||||
fdb.Close()
|
||||
_ = os.Remove(fdb.path)
|
||||
_ = os.Remove(fdb.path + "-wal")
|
||||
_ = os.Remove(fdb.path + "-shm")
|
||||
delete(s.folderDBs, folder)
|
||||
}
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) DropDevice(device protocol.DeviceID) error {
|
||||
if device == protocol.LocalDeviceID {
|
||||
panic("bug: cannot drop local device")
|
||||
}
|
||||
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Find all folders where the device is involved
|
||||
var folderIdxs []int64
|
||||
if err := tx.Select(&folderIdxs, `
|
||||
SELECT folder_idx
|
||||
FROM counts
|
||||
WHERE device_idx = ? AND count > 0
|
||||
GROUP BY folder_idx
|
||||
`, deviceIdx); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Drop the device, which cascades to delete all files etc for it
|
||||
if _, err := tx.Exec(`DELETE FROM devices WHERE device_id = ?`, device.String()); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Recalc the globals for all affected folders
|
||||
for _, idx := range folderIdxs {
|
||||
if err := s.recalcGlobalForFolder(txp, idx); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (s *DB) DropAllFiles(folder string, device protocol.DeviceID) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
// This is a two part operation, first dropping all the files and then
|
||||
// recalculating the global state for the entire folder.
|
||||
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Drop all the file entries
|
||||
|
||||
result, err := tx.Exec(`
|
||||
DELETE FROM files
|
||||
WHERE folder_idx = ? AND device_idx = ?
|
||||
`, folderIdx, deviceIdx)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if n, err := result.RowsAffected(); err == nil && n == 0 {
|
||||
// The delete affected no rows, so we don't need to redo the entire
|
||||
// global/need calculation.
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
// Recalc global for the entire folder
|
||||
|
||||
if err := s.recalcGlobalForFolder(txp, folderIdx); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (s *DB) DropFilesNamed(folder string, device protocol.DeviceID, names []string) error {
|
||||
for i := range names {
|
||||
names[i] = osutil.NormalizedFilename(names[i])
|
||||
}
|
||||
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Drop the named files
|
||||
|
||||
query, args, err := sqlx.In(`
|
||||
DELETE FROM files
|
||||
WHERE folder_idx = ? AND device_idx = ? AND name IN (?)
|
||||
`, folderIdx, deviceIdx, names)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := tx.Exec(query, args...); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Recalc globals for the named files
|
||||
|
||||
for _, name := range names {
|
||||
if err := s.recalcGlobalForFile(txp, folderIdx, name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (*DB) insertBlocksLocked(tx *txPreparedStmts, blocklistHash []byte, blocks []protocol.BlockInfo) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
bs := make([]map[string]any, len(blocks))
|
||||
for i, b := range blocks {
|
||||
bs[i] = map[string]any{
|
||||
"hash": b.Hash,
|
||||
"blocklist_hash": blocklistHash,
|
||||
"idx": i,
|
||||
"offset": b.Offset,
|
||||
"size": b.Size,
|
||||
}
|
||||
}
|
||||
_, err := tx.NamedExec(`
|
||||
INSERT OR IGNORE INTO blocks (hash, blocklist_hash, idx, offset, size)
|
||||
VALUES (:hash, :blocklist_hash, :idx, :offset, :size)
|
||||
`, bs)
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) recalcGlobalForFolder(txp *txPreparedStmts, folderIdx int64) error {
|
||||
// Select files where there is no global, those are the ones we need to
|
||||
// recalculate.
|
||||
//nolint:sqlclosecheck
|
||||
namesStmt, err := txp.Preparex(`
|
||||
SELECT f.name FROM files f
|
||||
WHERE f.folder_idx = ? AND NOT EXISTS (
|
||||
SELECT 1 FROM files g
|
||||
WHERE g.folder_idx = ? AND g.name = f.name AND g.local_flags & ? != 0
|
||||
)
|
||||
GROUP BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
rows, err := namesStmt.Queryx(folderIdx, folderIdx, protocol.FlagLocalGlobal)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var name string
|
||||
if err := rows.Scan(&name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if err := s.recalcGlobalForFile(txp, folderIdx, name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
return wrap(rows.Err())
|
||||
}
|
||||
|
||||
func (s *DB) recalcGlobalForFile(txp *txPreparedStmts, folderIdx int64, file string) error {
|
||||
//nolint:sqlclosecheck
|
||||
selStmt, err := txp.Preparex(`
|
||||
SELECT name, folder_idx, device_idx, sequence, modified, version, deleted, invalid, local_flags FROM files
|
||||
WHERE folder_idx = ? AND name = ?
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
es, err := itererr.Collect(iterStructs[fileRow](selStmt.Queryx(folderIdx, file)))
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if len(es) == 0 {
|
||||
// shouldn't happen
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort the entries; the global entry is at the head of the list
|
||||
slices.SortFunc(es, fileRow.Compare)
|
||||
|
||||
// The global version is the first one in the list that is not invalid,
|
||||
// or just the first one in the list if all are invalid.
|
||||
var global fileRow
|
||||
globIdx := slices.IndexFunc(es, func(e fileRow) bool { return !e.Invalid })
|
||||
if globIdx < 0 {
|
||||
globIdx = 0
|
||||
}
|
||||
global = es[globIdx]
|
||||
|
||||
// We "have" the file if the position in the list of versions is at the
|
||||
// global version or better, or if the version is the same as the global
|
||||
// file (we might be further down the list due to invalid flags), or if
|
||||
// the global is deleted and we don't have it at all...
|
||||
localIdx := slices.IndexFunc(es, func(e fileRow) bool { return e.DeviceIdx == s.localDeviceIdx })
|
||||
hasLocal := localIdx >= 0 && localIdx <= globIdx || // have a better or equal version
|
||||
localIdx >= 0 && es[localIdx].Version.Equal(global.Version.Vector) || // have an equal version but invalid/ignored
|
||||
localIdx < 0 && global.Deleted // missing it, but the global is also deleted
|
||||
|
||||
// Set the global flag on the global entry. Set the need flag if the
|
||||
// local device needs this file, unless it's invalid.
|
||||
global.LocalFlags |= protocol.FlagLocalGlobal
|
||||
if hasLocal || global.Invalid {
|
||||
global.LocalFlags &= ^protocol.FlagLocalNeeded
|
||||
} else {
|
||||
global.LocalFlags |= protocol.FlagLocalNeeded
|
||||
}
|
||||
//nolint:sqlclosecheck
|
||||
upStmt, err := txp.Prepare(`
|
||||
UPDATE files SET local_flags = ?
|
||||
WHERE folder_idx = ? AND device_idx = ? AND sequence = ?
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := upStmt.Exec(global.LocalFlags, global.FolderIdx, global.DeviceIdx, global.Sequence); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Clear the need and global flags on all other entries
|
||||
//nolint:sqlclosecheck
|
||||
upStmt, err = txp.Prepare(`
|
||||
UPDATE files SET local_flags = local_flags & ?
|
||||
WHERE folder_idx = ? AND name = ? AND sequence != ? AND local_flags & ? != 0
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := upStmt.Exec(^(protocol.FlagLocalNeeded | protocol.FlagLocalGlobal), folderIdx, global.Name, global.Sequence, protocol.FlagLocalNeeded|protocol.FlagLocalGlobal); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DB) folderIdxLocked(folderID string) (int64, error) {
|
||||
if _, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO folders(folder_id)
|
||||
VALUES (?)
|
||||
`).Exec(folderID); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
var idx int64
|
||||
if err := s.stmt(`
|
||||
SELECT idx FROM folders
|
||||
WHERE folder_id = ?
|
||||
`).Get(&idx, folderID); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
func (s *DB) deviceIdxLocked(deviceID protocol.DeviceID) (int64, error) {
|
||||
devStr := deviceID.String()
|
||||
if _, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO devices(device_id)
|
||||
VALUES (?)
|
||||
`).Exec(devStr); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
var idx int64
|
||||
if err := s.stmt(`
|
||||
SELECT idx FROM devices
|
||||
WHERE device_id = ?
|
||||
`).Get(&idx, devStr); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
func (s *DB) ListFolders() ([]string, error) {
|
||||
var res []string
|
||||
err := s.stmt(`
|
||||
SELECT folder_id FROM folders
|
||||
ORDER BY folder_id
|
||||
`).Select(&res)
|
||||
return res, wrap(err)
|
||||
}
|
||||
|
||||
// wrap returns the error wrapped with the calling function name and
|
||||
@@ -488,62 +68,3 @@ func wrap(err error, context ...string) error {
|
||||
|
||||
return fmt.Errorf("%s: %w", prefix, err)
|
||||
}
|
||||
|
||||
type fileRow struct {
|
||||
Name string
|
||||
Version dbVector
|
||||
FolderIdx int64 `db:"folder_idx"`
|
||||
DeviceIdx int64 `db:"device_idx"`
|
||||
Sequence int64
|
||||
Modified int64
|
||||
Size int64
|
||||
LocalFlags int64 `db:"local_flags"`
|
||||
Deleted bool
|
||||
Invalid bool
|
||||
}
|
||||
|
||||
func (e fileRow) Compare(other fileRow) int {
|
||||
// From FileInfo.WinsConflict
|
||||
vc := e.Version.Vector.Compare(other.Version.Vector)
|
||||
switch vc {
|
||||
case protocol.Equal:
|
||||
if e.Invalid != other.Invalid {
|
||||
if e.Invalid {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compare the device ID index, lower is better. This is only
|
||||
// deterministic to the extent that LocalDeviceID will always be the
|
||||
// lowest one, order between remote devices is random (and
|
||||
// irrelevant).
|
||||
return cmp.Compare(e.DeviceIdx, other.DeviceIdx)
|
||||
case protocol.Greater: // we are newer
|
||||
return -1
|
||||
case protocol.Lesser: // we are older
|
||||
return 1
|
||||
case protocol.ConcurrentGreater, protocol.ConcurrentLesser: // there is a conflict
|
||||
if e.Invalid != other.Invalid {
|
||||
if e.Invalid { // we are invalid, we lose
|
||||
return 1
|
||||
}
|
||||
return -1 // they are invalid, we win
|
||||
}
|
||||
if e.Deleted != other.Deleted {
|
||||
if e.Deleted { // we are deleted, we lose
|
||||
return 1
|
||||
}
|
||||
return -1 // they are deleted, we win
|
||||
}
|
||||
if d := cmp.Compare(e.Modified, other.Modified); d != 0 {
|
||||
return -d // positive d means we were newer, so we win (negative return)
|
||||
}
|
||||
if vc == protocol.ConcurrentGreater {
|
||||
return -1 // we have a better device ID, we win
|
||||
}
|
||||
return 1 // they win
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,95 +19,89 @@ type countsRow struct {
|
||||
LocalFlags int64 `db:"local_flags"`
|
||||
}
|
||||
|
||||
func (s *DB) CountLocal(folder string, device protocol.DeviceID) (db.Counts, error) {
|
||||
func (s *folderDB) CountLocal(device protocol.DeviceID) (db.Counts, error) {
|
||||
var res []countsRow
|
||||
if err := s.stmt(`
|
||||
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
|
||||
INNER JOIN folders o ON o.idx = s.folder_idx
|
||||
INNER JOIN devices d ON d.idx = s.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ? AND s.local_flags & {{.FlagLocalIgnored}} = 0
|
||||
`).Select(&res, folder, device.String()); err != nil {
|
||||
WHERE d.device_id = ? AND s.local_flags & {{.FlagLocalIgnored}} = 0
|
||||
`).Select(&res, device.String()); err != nil {
|
||||
return db.Counts{}, wrap(err)
|
||||
}
|
||||
return summarizeCounts(res), nil
|
||||
}
|
||||
|
||||
func (s *DB) CountNeed(folder string, device protocol.DeviceID) (db.Counts, error) {
|
||||
func (s *folderDB) CountNeed(device protocol.DeviceID) (db.Counts, error) {
|
||||
if device == protocol.LocalDeviceID {
|
||||
return s.needSizeLocal(folder)
|
||||
return s.needSizeLocal()
|
||||
}
|
||||
return s.needSizeRemote(folder, device)
|
||||
return s.needSizeRemote(device)
|
||||
}
|
||||
|
||||
func (s *DB) CountGlobal(folder string) (db.Counts, error) {
|
||||
func (s *folderDB) CountGlobal() (db.Counts, error) {
|
||||
// Exclude ignored and receive-only changed files from the global count
|
||||
// (legacy expectation? it's a bit weird since those files can in fact
|
||||
// be global and you can get them with GetGlobal etc.)
|
||||
var res []countsRow
|
||||
err := s.stmt(`
|
||||
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
|
||||
INNER JOIN folders o ON o.idx = s.folder_idx
|
||||
WHERE o.folder_id = ? AND s.local_flags & {{.FlagLocalGlobal}} != 0 AND s.local_flags & {{or .FlagLocalReceiveOnly .FlagLocalIgnored}} = 0
|
||||
`).Select(&res, folder)
|
||||
WHERE s.local_flags & {{.FlagLocalGlobal}} != 0 AND s.local_flags & {{or .FlagLocalReceiveOnly .FlagLocalIgnored}} = 0
|
||||
`).Select(&res)
|
||||
if err != nil {
|
||||
return db.Counts{}, wrap(err)
|
||||
}
|
||||
return summarizeCounts(res), nil
|
||||
}
|
||||
|
||||
func (s *DB) CountReceiveOnlyChanged(folder string) (db.Counts, error) {
|
||||
func (s *folderDB) CountReceiveOnlyChanged() (db.Counts, error) {
|
||||
var res []countsRow
|
||||
err := s.stmt(`
|
||||
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
|
||||
INNER JOIN folders o ON o.idx = s.folder_idx
|
||||
WHERE o.folder_id = ? AND local_flags & {{.FlagLocalReceiveOnly}} != 0
|
||||
`).Select(&res, folder)
|
||||
WHERE local_flags & {{.FlagLocalReceiveOnly}} != 0
|
||||
`).Select(&res)
|
||||
if err != nil {
|
||||
return db.Counts{}, wrap(err)
|
||||
}
|
||||
return summarizeCounts(res), nil
|
||||
}
|
||||
|
||||
func (s *DB) needSizeLocal(folder string) (db.Counts, error) {
|
||||
func (s *folderDB) needSizeLocal() (db.Counts, error) {
|
||||
// The need size for the local device is the sum of entries with the
|
||||
// need bit set.
|
||||
var res []countsRow
|
||||
err := s.stmt(`
|
||||
SELECT s.type, s.count, s.size, s.local_flags, s.deleted FROM counts s
|
||||
INNER JOIN folders o ON o.idx = s.folder_idx
|
||||
WHERE o.folder_id = ? AND s.local_flags & {{.FlagLocalNeeded}} != 0
|
||||
`).Select(&res, folder)
|
||||
WHERE s.local_flags & {{.FlagLocalNeeded}} != 0
|
||||
`).Select(&res)
|
||||
if err != nil {
|
||||
return db.Counts{}, wrap(err)
|
||||
}
|
||||
return summarizeCounts(res), nil
|
||||
}
|
||||
|
||||
func (s *DB) needSizeRemote(folder string, device protocol.DeviceID) (db.Counts, error) {
|
||||
func (s *folderDB) needSizeRemote(device protocol.DeviceID) (db.Counts, error) {
|
||||
var res []countsRow
|
||||
// See neededGlobalFilesRemote for commentary as that is the same query without summing
|
||||
if err := s.stmt(`
|
||||
SELECT g.type, count(*) as count, sum(g.size) as size, g.local_flags, g.deleted FROM files g
|
||||
INNER JOIN folders o ON o.idx = g.folder_idx
|
||||
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
|
||||
WHERE g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
|
||||
SELECT 1 FROM FILES f
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE f.name = g.name AND f.version = g.version AND f.folder_idx = g.folder_idx AND d.device_id = ?
|
||||
WHERE f.name = g.name AND f.version = g.version AND d.device_id = ?
|
||||
)
|
||||
GROUP BY g.type, g.local_flags, g.deleted
|
||||
|
||||
UNION ALL
|
||||
|
||||
SELECT g.type, count(*) as count, sum(g.size) as size, g.local_flags, g.deleted FROM files g
|
||||
INNER JOIN folders o ON o.idx = g.folder_idx
|
||||
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
|
||||
WHERE g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
|
||||
SELECT 1 FROM FILES f
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE f.name = g.name AND f.folder_idx = g.folder_idx AND d.device_id = ? AND NOT f.deleted
|
||||
WHERE f.name = g.name AND d.device_id = ? AND NOT f.deleted
|
||||
)
|
||||
GROUP BY g.type, g.local_flags, g.deleted
|
||||
`).Select(&res, folder, device.String(),
|
||||
folder, device.String()); err != nil {
|
||||
`).Select(&res, device.String(),
|
||||
device.String()); err != nil {
|
||||
return db.Counts{}, wrap(err)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func (s *DB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
|
||||
func (s *folderDB) GetGlobalFile(file string) (protocol.FileInfo, bool, error) {
|
||||
file = osutil.NormalizedFilename(file)
|
||||
|
||||
var ind indirectFI
|
||||
@@ -27,9 +27,8 @@ func (s *DB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool,
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
|
||||
INNER JOIN files f on fi.sequence = f.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
WHERE o.folder_id = ? AND f.name = ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
`).Get(&ind, folder, file)
|
||||
WHERE f.name = ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
`).Get(&ind, file)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return protocol.FileInfo{}, false, nil
|
||||
}
|
||||
@@ -43,18 +42,17 @@ func (s *DB) GetGlobalFile(folder string, file string) (protocol.FileInfo, bool,
|
||||
return fi, true, nil
|
||||
}
|
||||
|
||||
func (s *DB) GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, error) {
|
||||
func (s *folderDB) GetGlobalAvailability(file string) ([]protocol.DeviceID, error) {
|
||||
file = osutil.NormalizedFilename(file)
|
||||
|
||||
var devStrs []string
|
||||
err := s.stmt(`
|
||||
SELECT d.device_id FROM files f
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
INNER JOIN files g ON f.folder_idx = g.folder_idx AND g.version = f.version AND g.name = f.name
|
||||
WHERE o.folder_id = ? AND g.name = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND f.device_idx != {{.LocalDeviceIdx}}
|
||||
INNER JOIN files g ON g.version = f.version AND g.name = f.name
|
||||
WHERE g.name = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND f.device_idx != {{.LocalDeviceIdx}}
|
||||
ORDER BY d.device_id
|
||||
`).Select(&devStrs, folder, file)
|
||||
`).Select(&devStrs, file)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -74,22 +72,21 @@ func (s *DB) GetGlobalAvailability(folder, file string) ([]protocol.DeviceID, er
|
||||
return devs, nil
|
||||
}
|
||||
|
||||
func (s *DB) AllGlobalFiles(folder string) (iter.Seq[db.FileMetadata], func() error) {
|
||||
func (s *folderDB) AllGlobalFiles() (iter.Seq[db.FileMetadata], func() error) {
|
||||
it, errFn := iterStructs[db.FileMetadata](s.stmt(`
|
||||
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
WHERE o.folder_id = ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
WHERE f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
ORDER BY f.name
|
||||
`).Queryx(folder))
|
||||
`).Queryx())
|
||||
return itererr.Map(it, errFn, func(m db.FileMetadata) (db.FileMetadata, error) {
|
||||
m.Name = osutil.NativeFilename(m.Name)
|
||||
return m, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DB) AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[db.FileMetadata], func() error) {
|
||||
func (s *folderDB) AllGlobalFilesPrefix(prefix string) (iter.Seq[db.FileMetadata], func() error) {
|
||||
if prefix == "" {
|
||||
return s.AllGlobalFiles(folder)
|
||||
return s.AllGlobalFiles()
|
||||
}
|
||||
|
||||
prefix = osutil.NormalizedFilename(prefix)
|
||||
@@ -97,17 +94,16 @@ func (s *DB) AllGlobalFilesPrefix(folder string, prefix string) (iter.Seq[db.Fil
|
||||
|
||||
it, errFn := iterStructs[db.FileMetadata](s.stmt(`
|
||||
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
WHERE o.folder_id = ? AND f.name >= ? AND f.name < ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
WHERE f.name >= ? AND f.name < ? AND f.local_flags & {{.FlagLocalGlobal}} != 0
|
||||
ORDER BY f.name
|
||||
`).Queryx(folder, prefix, end))
|
||||
`).Queryx(prefix, end))
|
||||
return itererr.Map(it, errFn, func(m db.FileMetadata) (db.FileMetadata, error) {
|
||||
m.Name = osutil.NativeFilename(m.Name)
|
||||
return m, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DB) AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) AllNeededGlobalFiles(device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
var selectOpts string
|
||||
switch order {
|
||||
case config.PullOrderRandom:
|
||||
@@ -132,25 +128,24 @@ func (s *DB) AllNeededGlobalFiles(folder string, device protocol.DeviceID, order
|
||||
}
|
||||
|
||||
if device == protocol.LocalDeviceID {
|
||||
return s.neededGlobalFilesLocal(folder, selectOpts)
|
||||
return s.neededGlobalFilesLocal(selectOpts)
|
||||
}
|
||||
|
||||
return s.neededGlobalFilesRemote(folder, device, selectOpts)
|
||||
return s.neededGlobalFilesRemote(device, selectOpts)
|
||||
}
|
||||
|
||||
func (s *DB) neededGlobalFilesLocal(folder, selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) neededGlobalFilesLocal(selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
// Select all the non-ignored files with the need bit set.
|
||||
it, errFn := iterStructs[indirectFI](s.stmt(`
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
|
||||
INNER JOIN files g on fi.sequence = g.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = g.folder_idx
|
||||
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalIgnored}} = 0 AND g.local_flags & {{.FlagLocalNeeded}} != 0
|
||||
` + selectOpts).Queryx(folder))
|
||||
WHERE g.local_flags & {{.FlagLocalIgnored}} = 0 AND g.local_flags & {{.FlagLocalNeeded}} != 0
|
||||
` + selectOpts).Queryx())
|
||||
return itererr.Map(it, errFn, indirectFI.FileInfo)
|
||||
}
|
||||
|
||||
func (s *DB) neededGlobalFilesRemote(folder string, device protocol.DeviceID, selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) neededGlobalFilesRemote(device protocol.DeviceID, selectOpts string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
// Select:
|
||||
//
|
||||
// - all the valid, non-deleted global files that don't have a corresponding
|
||||
@@ -163,11 +158,10 @@ func (s *DB) neededGlobalFilesRemote(folder string, device protocol.DeviceID, se
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
|
||||
INNER JOIN files g on fi.sequence = g.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = g.folder_idx
|
||||
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
|
||||
WHERE g.local_flags & {{.FlagLocalGlobal}} != 0 AND NOT g.deleted AND NOT g.invalid AND NOT EXISTS (
|
||||
SELECT 1 FROM FILES f
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE f.name = g.name AND f.version = g.version AND f.folder_idx = g.folder_idx AND d.device_id = ?
|
||||
WHERE f.name = g.name AND f.version = g.version AND d.device_id = ?
|
||||
)
|
||||
|
||||
UNION ALL
|
||||
@@ -175,15 +169,14 @@ func (s *DB) neededGlobalFilesRemote(folder string, device protocol.DeviceID, se
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf, g.name, g.size, g.modified FROM fileinfos fi
|
||||
INNER JOIN files g on fi.sequence = g.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = g.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = g.folder_idx
|
||||
WHERE o.folder_id = ? AND g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
|
||||
WHERE g.local_flags & {{.FlagLocalGlobal}} != 0 AND g.deleted AND NOT g.invalid AND EXISTS (
|
||||
SELECT 1 FROM FILES f
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE f.name = g.name AND f.folder_idx = g.folder_idx AND d.device_id = ? AND NOT f.deleted
|
||||
WHERE f.name = g.name AND d.device_id = ? AND NOT f.deleted
|
||||
)
|
||||
`+selectOpts).Queryx(
|
||||
folder, device.String(),
|
||||
folder, device.String(),
|
||||
device.String(),
|
||||
device.String(),
|
||||
))
|
||||
return itererr.Map(it, errFn, indirectFI.FileInfo)
|
||||
}
|
||||
@@ -16,16 +16,15 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.IndexID, error) {
|
||||
func (s *folderDB) GetIndexID(device protocol.DeviceID) (protocol.IndexID, error) {
|
||||
// Try a fast read-only query to begin with. If it does not find the ID
|
||||
// we'll do the full thing under a lock.
|
||||
var indexID string
|
||||
if err := s.stmt(`
|
||||
SELECT i.index_id FROM indexids i
|
||||
INNER JOIN folders o ON o.idx = i.folder_idx
|
||||
INNER JOIN devices d ON d.idx = i.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ?
|
||||
`).Get(&indexID, folder, device.String()); err == nil && indexID != "" {
|
||||
WHERE d.device_id = ?
|
||||
`).Get(&indexID, device.String()); err == nil && indexID != "" {
|
||||
idx, err := indexIDFromHex(indexID)
|
||||
return idx, wrap(err, "select")
|
||||
}
|
||||
@@ -40,14 +39,9 @@ func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.Index
|
||||
|
||||
// We are now operating only for the local device ID
|
||||
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
|
||||
if err := s.stmt(`
|
||||
SELECT index_id FROM indexids WHERE folder_idx = ? AND device_idx = {{.LocalDeviceIdx}}
|
||||
`).Get(&indexID, folderIdx); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
SELECT index_id FROM indexids WHERE device_idx = {{.LocalDeviceIdx}}
|
||||
`).Get(&indexID); err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return 0, wrap(err, "select local")
|
||||
}
|
||||
|
||||
@@ -57,11 +51,11 @@ func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.Index
|
||||
// any.
|
||||
id := protocol.NewIndexID()
|
||||
if _, err := s.stmt(`
|
||||
INSERT INTO indexids (folder_idx, device_idx, index_id, sequence)
|
||||
SELECT ?, {{.LocalDeviceIdx}}, ?, COALESCE(MAX(sequence), 0) FROM files
|
||||
WHERE folder_idx = ? AND device_idx = {{.LocalDeviceIdx}}
|
||||
INSERT INTO indexids (device_idx, index_id, sequence)
|
||||
SELECT {{.LocalDeviceIdx}}, ?, COALESCE(MAX(sequence), 0) FROM files
|
||||
WHERE device_idx = {{.LocalDeviceIdx}}
|
||||
ON CONFLICT DO UPDATE SET index_id = ?
|
||||
`).Exec(folderIdx, indexIDToHex(id), folderIdx, indexIDToHex(id)); err != nil {
|
||||
`).Exec(indexIDToHex(id), indexIDToHex(id)); err != nil {
|
||||
return 0, wrap(err, "insert")
|
||||
}
|
||||
return id, nil
|
||||
@@ -70,42 +64,37 @@ func (s *DB) GetIndexID(folder string, device protocol.DeviceID) (protocol.Index
|
||||
return indexIDFromHex(indexID)
|
||||
}
|
||||
|
||||
func (s *DB) SetIndexID(folder string, device protocol.DeviceID, id protocol.IndexID) error {
|
||||
func (s *folderDB) SetIndexID(device protocol.DeviceID, id protocol.IndexID) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
folderIdx, err := s.folderIdxLocked(folder)
|
||||
if err != nil {
|
||||
return wrap(err, "folder idx")
|
||||
}
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err, "device idx")
|
||||
}
|
||||
|
||||
if _, err := s.stmt(`
|
||||
INSERT OR REPLACE INTO indexids (folder_idx, device_idx, index_id, sequence) values (?, ?, ?, 0)
|
||||
`).Exec(folderIdx, deviceIdx, indexIDToHex(id)); err != nil {
|
||||
INSERT OR REPLACE INTO indexids (device_idx, index_id, sequence) values (?, ?, 0)
|
||||
`).Exec(deviceIdx, indexIDToHex(id)); err != nil {
|
||||
return wrap(err, "insert")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DB) DropAllIndexIDs() error {
|
||||
func (s *folderDB) DropAllIndexIDs() error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`DELETE FROM indexids`).Exec()
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *DB) GetDeviceSequence(folder string, device protocol.DeviceID) (int64, error) {
|
||||
func (s *folderDB) GetDeviceSequence(device protocol.DeviceID) (int64, error) {
|
||||
var res sql.NullInt64
|
||||
err := s.stmt(`
|
||||
SELECT sequence FROM indexids i
|
||||
INNER JOIN folders o ON o.idx = i.folder_idx
|
||||
INNER JOIN devices d ON d.idx = i.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ?
|
||||
`).Get(&res, folder, device.String())
|
||||
WHERE d.device_id = ?
|
||||
`).Get(&res, device.String())
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -118,7 +107,7 @@ func (s *DB) GetDeviceSequence(folder string, device protocol.DeviceID) (int64,
|
||||
return res.Int64, nil
|
||||
}
|
||||
|
||||
func (s *DB) RemoteSequences(folder string) (map[protocol.DeviceID]int64, error) {
|
||||
func (s *folderDB) RemoteSequences() (map[protocol.DeviceID]int64, error) {
|
||||
type row struct {
|
||||
Device string
|
||||
Seq int64
|
||||
@@ -126,10 +115,9 @@ func (s *DB) RemoteSequences(folder string) (map[protocol.DeviceID]int64, error)
|
||||
|
||||
it, errFn := iterStructs[row](s.stmt(`
|
||||
SELECT d.device_id AS device, i.sequence AS seq FROM indexids i
|
||||
INNER JOIN folders o ON o.idx = i.folder_idx
|
||||
INNER JOIN devices d ON d.idx = i.device_idx
|
||||
WHERE o.folder_id = ? AND i.device_idx != {{.LocalDeviceIdx}}
|
||||
`).Queryx(folder))
|
||||
WHERE i.device_idx != {{.LocalDeviceIdx}}
|
||||
`).Queryx())
|
||||
|
||||
res := make(map[protocol.DeviceID]int64)
|
||||
for row, err := range itererr.Zip(it, errFn) {
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {
|
||||
func (s *folderDB) GetDeviceFile(device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {
|
||||
file = osutil.NormalizedFilename(file)
|
||||
|
||||
var ind indirectFI
|
||||
@@ -27,9 +27,8 @@ func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string)
|
||||
INNER JOIN files f on fi.sequence = f.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
|
||||
INNER JOIN devices d ON f.device_idx = d.idx
|
||||
INNER JOIN folders o ON f.folder_idx = o.idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ? AND f.name = ?
|
||||
`).Get(&ind, folder, device.String(), file)
|
||||
WHERE d.device_id = ? AND f.name = ?
|
||||
`).Get(&ind, device.String(), file)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return protocol.FileInfo{}, false, nil
|
||||
}
|
||||
@@ -43,19 +42,18 @@ func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string)
|
||||
return fi, true, nil
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFiles(folder string, device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) AllLocalFiles(device protocol.DeviceID) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
it, errFn := iterStructs[indirectFI](s.stmt(`
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
|
||||
INNER JOIN files f on fi.sequence = f.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ?
|
||||
`).Queryx(folder, device.String()))
|
||||
WHERE d.device_id = ?
|
||||
`).Queryx(device.String()))
|
||||
return itererr.Map(it, errFn, indirectFI.FileInfo)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesBySequence(folder string, device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) AllLocalFilesBySequence(device protocol.DeviceID, startSeq int64, limit int) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
var limitStr string
|
||||
if limit > 0 {
|
||||
limitStr = fmt.Sprintf(" LIMIT %d", limit)
|
||||
@@ -64,17 +62,16 @@ func (s *DB) AllLocalFilesBySequence(folder string, device protocol.DeviceID, st
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
|
||||
INNER JOIN files f on fi.sequence = f.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ? AND f.sequence >= ?
|
||||
WHERE d.device_id = ? AND f.sequence >= ?
|
||||
ORDER BY f.sequence`+limitStr).Queryx(
|
||||
folder, device.String(), startSeq))
|
||||
device.String(), startSeq))
|
||||
return itererr.Map(it, errFn, indirectFI.FileInfo)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
func (s *folderDB) AllLocalFilesWithPrefix(device protocol.DeviceID, prefix string) (iter.Seq[protocol.FileInfo], func() error) {
|
||||
if prefix == "" {
|
||||
return s.AllLocalFiles(folder, device)
|
||||
return s.AllLocalFiles(device)
|
||||
}
|
||||
|
||||
prefix = osutil.NormalizedFilename(prefix)
|
||||
@@ -84,37 +81,20 @@ func (s *DB) AllLocalFilesWithPrefix(folder string, device protocol.DeviceID, pr
|
||||
SELECT fi.fiprotobuf, bl.blprotobuf FROM fileinfos fi
|
||||
INNER JOIN files f on fi.sequence = f.sequence
|
||||
LEFT JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
INNER JOIN devices d ON d.idx = f.device_idx
|
||||
WHERE o.folder_id = ? AND d.device_id = ? AND f.name >= ? AND f.name < ?
|
||||
`, folder, device.String(), prefix, end))
|
||||
WHERE d.device_id = ? AND f.name >= ? AND f.name < ?
|
||||
`, device.String(), prefix, end))
|
||||
return itererr.Map(it, errFn, indirectFI.FileInfo)
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithBlocksHash(folder string, h []byte) (iter.Seq[db.FileMetadata], func() error) {
|
||||
func (s *folderDB) AllLocalFilesWithBlocksHash(h []byte) (iter.Seq[db.FileMetadata], func() error) {
|
||||
return iterStructs[db.FileMetadata](s.stmt(`
|
||||
SELECT f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
WHERE o.folder_id = ? AND f.device_idx = {{.LocalDeviceIdx}} AND f.blocklist_hash = ?
|
||||
`).Queryx(folder, h))
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalFilesWithBlocksHashAnyFolder(h []byte) (iter.Seq2[string, db.FileMetadata], func() error) {
|
||||
type row struct {
|
||||
FolderID string `db:"folder_id"`
|
||||
db.FileMetadata
|
||||
}
|
||||
it, errFn := iterStructs[row](s.stmt(`
|
||||
SELECT o.folder_id, f.sequence, f.name, f.type, f.modified as modnanos, f.size, f.deleted, f.invalid, f.local_flags as localflags FROM files f
|
||||
INNER JOIN folders o ON o.idx = f.folder_idx
|
||||
WHERE f.device_idx = {{.LocalDeviceIdx}} AND f.blocklist_hash = ?
|
||||
`).Queryx(h))
|
||||
return itererr.Map2(it, errFn, func(r row) (string, db.FileMetadata, error) {
|
||||
return r.FolderID, r.FileMetadata, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[db.BlockMapEntry], func() error) {
|
||||
func (s *folderDB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[db.BlockMapEntry], func() error) {
|
||||
// We involve the files table in this select because deletion of blocks
|
||||
// & blocklists is deferred (garbage collected) while the files list is
|
||||
// not. This filters out blocks that are in fact deleted.
|
||||
@@ -124,3 +104,25 @@ func (s *DB) AllLocalBlocksWithHash(hash []byte) (iter.Seq[db.BlockMapEntry], fu
|
||||
WHERE f.device_idx = {{.LocalDeviceIdx}} AND b.hash = ?
|
||||
`).Queryx(hash))
|
||||
}
|
||||
|
||||
func (s *folderDB) ListDevicesForFolder() ([]protocol.DeviceID, error) {
|
||||
var res []string
|
||||
err := s.stmt(`
|
||||
SELECT DISTINCT d.device_id FROM counts s
|
||||
INNER JOIN devices d ON d.idx = s.device_idx
|
||||
WHERE s.count > 0 AND s.device_idx != {{.LocalDeviceIdx}}
|
||||
ORDER BY d.device_id
|
||||
`).Select(&res)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
|
||||
devs := make([]protocol.DeviceID, len(res))
|
||||
for i, s := range res {
|
||||
devs[i], err = protocol.DeviceIDFromString(s)
|
||||
if err != nil {
|
||||
return nil, wrap(err)
|
||||
}
|
||||
}
|
||||
return devs, nil
|
||||
}
|
||||
45
internal/db/sqlite/folderdb_mtimes.go
Normal file
45
internal/db/sqlite/folderdb_mtimes.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *folderDB) GetMtime(name string) (ondisk, virtual time.Time) {
|
||||
var res struct {
|
||||
Ondisk int64
|
||||
Virtual int64
|
||||
}
|
||||
if err := s.stmt(`
|
||||
SELECT m.ondisk, m.virtual FROM mtimes m
|
||||
WHERE m.name = ?
|
||||
`).Get(&res, name); err != nil {
|
||||
return time.Time{}, time.Time{}
|
||||
}
|
||||
return time.Unix(0, res.Ondisk), time.Unix(0, res.Virtual)
|
||||
}
|
||||
|
||||
func (s *folderDB) PutMtime(name string, ondisk, virtual time.Time) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`
|
||||
INSERT OR REPLACE INTO mtimes (name, ondisk, virtual)
|
||||
VALUES (?, ?, ?)
|
||||
`).Exec(name, ondisk.UnixNano(), virtual.UnixNano())
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
func (s *folderDB) DeleteMtime(name string) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
_, err := s.stmt(`
|
||||
DELETE FROM mtimes
|
||||
WHERE name = ?
|
||||
`).Exec(name)
|
||||
return wrap(err)
|
||||
}
|
||||
110
internal/db/sqlite/folderdb_open.go
Normal file
110
internal/db/sqlite/folderdb_open.go
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type folderDB struct {
|
||||
folderID string
|
||||
*baseDB
|
||||
|
||||
localDeviceIdx int64
|
||||
deleteRetention time.Duration
|
||||
}
|
||||
|
||||
func openFolderDB(folder, path string, deleteRetention time.Duration) (*folderDB, error) {
|
||||
pragmas := []string{
|
||||
"journal_mode = WAL",
|
||||
"optimize = 0x10002",
|
||||
"auto_vacuum = INCREMENTAL",
|
||||
"default_temp_store = MEMORY",
|
||||
"temp_store = MEMORY",
|
||||
}
|
||||
schemas := []string{
|
||||
"sql/schema/common/*",
|
||||
"sql/schema/folder/*",
|
||||
}
|
||||
|
||||
base, err := openBase(path, maxDBConns, pragmas, schemas, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fdb := &folderDB{
|
||||
folderID: folder,
|
||||
baseDB: base,
|
||||
deleteRetention: deleteRetention,
|
||||
}
|
||||
|
||||
_ = fdb.PutKV("folderID", []byte(folder))
|
||||
|
||||
// Touch device IDs that should always exist and have a low index
|
||||
// numbers, and will never change
|
||||
fdb.localDeviceIdx, _ = fdb.deviceIdxLocked(protocol.LocalDeviceID)
|
||||
fdb.tplInput["LocalDeviceIdx"] = fdb.localDeviceIdx
|
||||
|
||||
return fdb, nil
|
||||
}
|
||||
|
||||
// Open the database with options suitable for the migration inserts. This
|
||||
// is not a safe mode of operation for normal processing, use only for bulk
|
||||
// inserts with a close afterwards.
|
||||
func openFolderDBForMigration(folder, path string, deleteRetention time.Duration) (*folderDB, error) {
|
||||
pragmas := []string{
|
||||
"journal_mode = OFF",
|
||||
"default_temp_store = MEMORY",
|
||||
"temp_store = MEMORY",
|
||||
"foreign_keys = 0",
|
||||
"synchronous = 0",
|
||||
"locking_mode = EXCLUSIVE",
|
||||
}
|
||||
schemas := []string{
|
||||
"sql/schema/common/*",
|
||||
"sql/schema/folder/*",
|
||||
}
|
||||
|
||||
base, err := openBase(path, 1, pragmas, schemas, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fdb := &folderDB{
|
||||
folderID: folder,
|
||||
baseDB: base,
|
||||
deleteRetention: deleteRetention,
|
||||
}
|
||||
|
||||
// Touch device IDs that should always exist and have a low index
|
||||
// numbers, and will never change
|
||||
fdb.localDeviceIdx, _ = fdb.deviceIdxLocked(protocol.LocalDeviceID)
|
||||
fdb.tplInput["LocalDeviceIdx"] = fdb.localDeviceIdx
|
||||
|
||||
return fdb, nil
|
||||
}
|
||||
|
||||
func (s *folderDB) deviceIdxLocked(deviceID protocol.DeviceID) (int64, error) {
|
||||
devStr := deviceID.String()
|
||||
if _, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO devices(device_id)
|
||||
VALUES (?)
|
||||
`).Exec(devStr); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
var idx int64
|
||||
if err := s.stmt(`
|
||||
SELECT idx FROM devices
|
||||
WHERE device_id = ?
|
||||
`).Get(&idx, devStr); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
531
internal/db/sqlite/folderdb_update.go
Normal file
531
internal/db/sqlite/folderdb_update.go
Normal file
@@ -0,0 +1,531 @@
|
||||
// Copyright (C) 2025 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/syncthing/syncthing/internal/gen/dbproto"
|
||||
"github.com/syncthing/syncthing/internal/itererr"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
// Arbitrarily chosen values for checkpoint frequency....
|
||||
updatePointsPerFile = 100
|
||||
updatePointsPerBlock = 1
|
||||
updatePointsThreshold = 250_000
|
||||
)
|
||||
|
||||
func (s *folderDB) Update(device protocol.DeviceID, fs []protocol.FileInfo) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertFileStmt, err := txp.Preparex(`
|
||||
INSERT OR REPLACE INTO files (device_idx, remote_sequence, name, type, modified, size, version, deleted, invalid, local_flags, blocklist_hash)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
RETURNING sequence
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert file")
|
||||
}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertFileInfoStmt, err := txp.Preparex(`
|
||||
INSERT INTO fileinfos (sequence, fiprotobuf)
|
||||
VALUES (?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert fileinfo")
|
||||
}
|
||||
|
||||
//nolint:sqlclosecheck
|
||||
insertBlockListStmt, err := txp.Preparex(`
|
||||
INSERT OR IGNORE INTO blocklists (blocklist_hash, blprotobuf)
|
||||
VALUES (?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err, "prepare insert blocklist")
|
||||
}
|
||||
|
||||
var prevRemoteSeq int64
|
||||
for i, f := range fs {
|
||||
f.Name = osutil.NormalizedFilename(f.Name)
|
||||
|
||||
var blockshash *[]byte
|
||||
if len(f.Blocks) > 0 {
|
||||
f.BlocksHash = protocol.BlocksHash(f.Blocks)
|
||||
blockshash = &f.BlocksHash
|
||||
} else {
|
||||
f.BlocksHash = nil
|
||||
}
|
||||
|
||||
if f.Type == protocol.FileInfoTypeDirectory {
|
||||
f.Size = 128 // synthetic directory size
|
||||
}
|
||||
|
||||
// Insert the file.
|
||||
//
|
||||
// If it is a remote file, set remote_sequence otherwise leave it at
|
||||
// null. Returns the new local sequence.
|
||||
var remoteSeq *int64
|
||||
if device != protocol.LocalDeviceID {
|
||||
if i > 0 && f.Sequence == prevRemoteSeq {
|
||||
return fmt.Errorf("duplicate remote sequence number %d", prevRemoteSeq)
|
||||
}
|
||||
prevRemoteSeq = f.Sequence
|
||||
remoteSeq = &f.Sequence
|
||||
}
|
||||
var localSeq int64
|
||||
if err := insertFileStmt.Get(&localSeq, deviceIdx, remoteSeq, f.Name, f.Type, f.ModTime().UnixNano(), f.Size, f.Version.String(), f.IsDeleted(), f.IsInvalid(), f.LocalFlags, blockshash); err != nil {
|
||||
return wrap(err, "insert file")
|
||||
}
|
||||
|
||||
if len(f.Blocks) > 0 {
|
||||
// Indirect the block list
|
||||
blocks := sliceutil.Map(f.Blocks, protocol.BlockInfo.ToWire)
|
||||
bs, err := proto.Marshal(&dbproto.BlockList{Blocks: blocks})
|
||||
if err != nil {
|
||||
return wrap(err, "marshal blocklist")
|
||||
}
|
||||
if _, err := insertBlockListStmt.Exec(f.BlocksHash, bs); err != nil {
|
||||
return wrap(err, "insert blocklist")
|
||||
}
|
||||
|
||||
if device == protocol.LocalDeviceID {
|
||||
// Insert all blocks
|
||||
if err := s.insertBlocksLocked(txp, f.BlocksHash, f.Blocks); err != nil {
|
||||
return wrap(err, "insert blocks")
|
||||
}
|
||||
}
|
||||
|
||||
f.Blocks = nil
|
||||
}
|
||||
|
||||
// Insert the fileinfo
|
||||
if device == protocol.LocalDeviceID {
|
||||
f.Sequence = localSeq
|
||||
}
|
||||
bs, err := proto.Marshal(f.ToWire(true))
|
||||
if err != nil {
|
||||
return wrap(err, "marshal fileinfo")
|
||||
}
|
||||
if _, err := insertFileInfoStmt.Exec(localSeq, bs); err != nil {
|
||||
return wrap(err, "insert fileinfo")
|
||||
}
|
||||
|
||||
// Update global and need
|
||||
if err := s.recalcGlobalForFile(txp, f.Name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
s.periodicCheckpointLocked(fs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *folderDB) DropDevice(device protocol.DeviceID) error {
|
||||
if device == protocol.LocalDeviceID {
|
||||
panic("bug: cannot drop local device")
|
||||
}
|
||||
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Drop the device, which cascades to delete all files etc for it
|
||||
if _, err := tx.Exec(`DELETE FROM devices WHERE device_id = ?`, device.String()); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Recalc the globals for all affected folders
|
||||
if err := s.recalcGlobalForFolder(txp); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (s *folderDB) DropAllFiles(device protocol.DeviceID) error {
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
// This is a two part operation, first dropping all the files and then
|
||||
// recalculating the global state for the entire folder.
|
||||
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Drop all the file entries
|
||||
|
||||
result, err := tx.Exec(`
|
||||
DELETE FROM files
|
||||
WHERE device_idx = ?
|
||||
`, deviceIdx)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if n, err := result.RowsAffected(); err == nil && n == 0 {
|
||||
// The delete affected no rows, so we don't need to redo the entire
|
||||
// global/need calculation.
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
// Recalc global for the entire folder
|
||||
|
||||
if err := s.recalcGlobalForFolder(txp); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (s *folderDB) DropFilesNamed(device protocol.DeviceID, names []string) error {
|
||||
for i := range names {
|
||||
names[i] = osutil.NormalizedFilename(names[i])
|
||||
}
|
||||
|
||||
s.updateLock.Lock()
|
||||
defer s.updateLock.Unlock()
|
||||
|
||||
deviceIdx, err := s.deviceIdxLocked(device)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
tx, err := s.sql.BeginTxx(context.Background(), nil)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
txp := &txPreparedStmts{Tx: tx}
|
||||
|
||||
// Drop the named files
|
||||
|
||||
query, args, err := sqlx.In(`
|
||||
DELETE FROM files
|
||||
WHERE device_idx = ? AND name IN (?)
|
||||
`, deviceIdx, names)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := tx.Exec(query, args...); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Recalc globals for the named files
|
||||
|
||||
for _, name := range names {
|
||||
if err := s.recalcGlobalForFile(txp, name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return wrap(tx.Commit())
|
||||
}
|
||||
|
||||
func (*folderDB) insertBlocksLocked(tx *txPreparedStmts, blocklistHash []byte, blocks []protocol.BlockInfo) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
bs := make([]map[string]any, len(blocks))
|
||||
for i, b := range blocks {
|
||||
bs[i] = map[string]any{
|
||||
"hash": b.Hash,
|
||||
"blocklist_hash": blocklistHash,
|
||||
"idx": i,
|
||||
"offset": b.Offset,
|
||||
"size": b.Size,
|
||||
}
|
||||
}
|
||||
|
||||
// Very large block lists (>8000 blocks) result in "too many variables"
|
||||
// error. Chunk it to a reasonable size.
|
||||
for chunk := range slices.Chunk(bs, 1000) {
|
||||
if _, err := tx.NamedExec(`
|
||||
INSERT OR IGNORE INTO blocks (hash, blocklist_hash, idx, offset, size)
|
||||
VALUES (:hash, :blocklist_hash, :idx, :offset, :size)
|
||||
`, chunk); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *folderDB) recalcGlobalForFolder(txp *txPreparedStmts) error {
|
||||
// Select files where there is no global, those are the ones we need to
|
||||
// recalculate.
|
||||
//nolint:sqlclosecheck
|
||||
namesStmt, err := txp.Preparex(`
|
||||
SELECT f.name FROM files f
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM files g
|
||||
WHERE g.name = f.name AND g.local_flags & ? != 0
|
||||
)
|
||||
GROUP BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
rows, err := namesStmt.Queryx(protocol.FlagLocalGlobal)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var name string
|
||||
if err := rows.Scan(&name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if err := s.recalcGlobalForFile(txp, name); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
}
|
||||
return wrap(rows.Err())
|
||||
}
|
||||
|
||||
func (s *folderDB) recalcGlobalForFile(txp *txPreparedStmts, file string) error {
|
||||
//nolint:sqlclosecheck
|
||||
selStmt, err := txp.Preparex(`
|
||||
SELECT name, device_idx, sequence, modified, version, deleted, invalid, local_flags FROM files
|
||||
WHERE name = ?
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
es, err := itererr.Collect(iterStructs[fileRow](selStmt.Queryx(file)))
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if len(es) == 0 {
|
||||
// shouldn't happen
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort the entries; the global entry is at the head of the list
|
||||
slices.SortFunc(es, fileRow.Compare)
|
||||
|
||||
// The global version is the first one in the list that is not invalid,
|
||||
// or just the first one in the list if all are invalid.
|
||||
var global fileRow
|
||||
globIdx := slices.IndexFunc(es, func(e fileRow) bool { return !e.Invalid })
|
||||
if globIdx < 0 {
|
||||
globIdx = 0
|
||||
}
|
||||
global = es[globIdx]
|
||||
|
||||
// We "have" the file if the position in the list of versions is at the
|
||||
// global version or better, or if the version is the same as the global
|
||||
// file (we might be further down the list due to invalid flags), or if
|
||||
// the global is deleted and we don't have it at all...
|
||||
localIdx := slices.IndexFunc(es, func(e fileRow) bool { return e.DeviceIdx == s.localDeviceIdx })
|
||||
hasLocal := localIdx >= 0 && localIdx <= globIdx || // have a better or equal version
|
||||
localIdx >= 0 && es[localIdx].Version.Equal(global.Version.Vector) || // have an equal version but invalid/ignored
|
||||
localIdx < 0 && global.Deleted // missing it, but the global is also deleted
|
||||
|
||||
// Set the global flag on the global entry. Set the need flag if the
|
||||
// local device needs this file, unless it's invalid.
|
||||
global.LocalFlags |= protocol.FlagLocalGlobal
|
||||
if hasLocal || global.Invalid {
|
||||
global.LocalFlags &= ^protocol.FlagLocalNeeded
|
||||
} else {
|
||||
global.LocalFlags |= protocol.FlagLocalNeeded
|
||||
}
|
||||
//nolint:sqlclosecheck
|
||||
upStmt, err := txp.Preparex(`
|
||||
UPDATE files SET local_flags = ?
|
||||
WHERE device_idx = ? AND sequence = ?
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := upStmt.Exec(global.LocalFlags, global.DeviceIdx, global.Sequence); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
// Clear the need and global flags on all other entries
|
||||
//nolint:sqlclosecheck
|
||||
upStmt, err = txp.Preparex(`
|
||||
UPDATE files SET local_flags = local_flags & ?
|
||||
WHERE name = ? AND sequence != ? AND local_flags & ? != 0
|
||||
`)
|
||||
if err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
if _, err := upStmt.Exec(^(protocol.FlagLocalNeeded | protocol.FlagLocalGlobal), global.Name, global.Sequence, protocol.FlagLocalNeeded|protocol.FlagLocalGlobal); err != nil {
|
||||
return wrap(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *DB) folderIdxLocked(folderID string) (int64, error) {
|
||||
if _, err := s.stmt(`
|
||||
INSERT OR IGNORE INTO folders(folder_id)
|
||||
VALUES (?)
|
||||
`).Exec(folderID); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
var idx int64
|
||||
if err := s.stmt(`
|
||||
SELECT idx FROM folders
|
||||
WHERE folder_id = ?
|
||||
`).Get(&idx, folderID); err != nil {
|
||||
return 0, wrap(err)
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
type fileRow struct {
|
||||
Name string
|
||||
Version dbVector
|
||||
DeviceIdx int64 `db:"device_idx"`
|
||||
Sequence int64
|
||||
Modified int64
|
||||
Size int64
|
||||
LocalFlags int64 `db:"local_flags"`
|
||||
Deleted bool
|
||||
Invalid bool
|
||||
}
|
||||
|
||||
func (e fileRow) Compare(other fileRow) int {
|
||||
// From FileInfo.WinsConflict
|
||||
vc := e.Version.Vector.Compare(other.Version.Vector)
|
||||
switch vc {
|
||||
case protocol.Equal:
|
||||
if e.Invalid != other.Invalid {
|
||||
if e.Invalid {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compare the device ID index, lower is better. This is only
|
||||
// deterministic to the extent that LocalDeviceID will always be the
|
||||
// lowest one, order between remote devices is random (and
|
||||
// irrelevant).
|
||||
return cmp.Compare(e.DeviceIdx, other.DeviceIdx)
|
||||
case protocol.Greater: // we are newer
|
||||
return -1
|
||||
case protocol.Lesser: // we are older
|
||||
return 1
|
||||
case protocol.ConcurrentGreater, protocol.ConcurrentLesser: // there is a conflict
|
||||
if e.Invalid != other.Invalid {
|
||||
if e.Invalid { // we are invalid, we lose
|
||||
return 1
|
||||
}
|
||||
return -1 // they are invalid, we win
|
||||
}
|
||||
if e.Deleted != other.Deleted {
|
||||
if e.Deleted { // we are deleted, we lose
|
||||
return 1
|
||||
}
|
||||
return -1 // they are deleted, we win
|
||||
}
|
||||
if d := cmp.Compare(e.Modified, other.Modified); d != 0 {
|
||||
return -d // positive d means we were newer, so we win (negative return)
|
||||
}
|
||||
if vc == protocol.ConcurrentGreater {
|
||||
return -1 // we have a better device ID, we win
|
||||
}
|
||||
return 1 // they win
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (s *folderDB) periodicCheckpointLocked(fs []protocol.FileInfo) {
|
||||
// Induce periodic checkpoints. We add points for each file and block,
|
||||
// and checkpoint when we've written more than a threshold of points.
|
||||
// This ensures we do not go too long without a checkpoint, while also
|
||||
// not doing it incessantly for every update.
|
||||
s.updatePoints += updatePointsPerFile * len(fs)
|
||||
for _, f := range fs {
|
||||
s.updatePoints += len(f.Blocks) * updatePointsPerBlock
|
||||
}
|
||||
if s.updatePoints > updatePointsThreshold {
|
||||
conn, err := s.sql.Conn(context.Background())
|
||||
if err != nil {
|
||||
l.Debugln(s.baseName, "conn:", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
if _, err := conn.ExecContext(context.Background(), `PRAGMA journal_size_limit = 8388608`); err != nil {
|
||||
l.Debugln(s.baseName, "PRAGMA journal_size_limit:", err)
|
||||
}
|
||||
|
||||
// Every 50th checkpoint becomes a truncate, in an effort to bring
|
||||
// down the size now and then.
|
||||
checkpointType := "RESTART"
|
||||
if s.checkpointsCount > 50 {
|
||||
checkpointType = "TRUNCATE"
|
||||
}
|
||||
cmd := fmt.Sprintf(`PRAGMA wal_checkpoint(%s)`, checkpointType)
|
||||
row := conn.QueryRowContext(context.Background(), cmd)
|
||||
|
||||
var res, modified, moved int
|
||||
if row.Err() != nil {
|
||||
l.Debugln(s.baseName, cmd+":", err)
|
||||
} else if err := row.Scan(&res, &modified, &moved); err != nil {
|
||||
l.Debugln(s.baseName, cmd+" (scan):", err)
|
||||
} else {
|
||||
l.Debugln(s.baseName, cmd, s.checkpointsCount, "at", s.updatePoints, "returned", res, modified, moved)
|
||||
}
|
||||
|
||||
// Reset the truncate counter when a truncate succeeded. If it
|
||||
// failed, we'll keep trying it until we succeed. Increase it faster
|
||||
// when we fail to checkpoint, as it's more likely the WAL is
|
||||
// growing and will need truncation when we get out of this state.
|
||||
if res == 1 {
|
||||
s.checkpointsCount += 10
|
||||
} else if res == 0 && checkpointType == "TRUNCATE" {
|
||||
s.checkpointsCount = 0
|
||||
} else {
|
||||
s.checkpointsCount++
|
||||
}
|
||||
s.updatePoints = 0
|
||||
}
|
||||
}
|
||||
@@ -4,16 +4,9 @@
|
||||
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
-- You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
-- folders map folder IDs as used by Syncthing to database folder indexes
|
||||
CREATE TABLE IF NOT EXISTS folders (
|
||||
idx INTEGER NOT NULL PRIMARY KEY,
|
||||
folder_id TEXT NOT NULL UNIQUE COLLATE BINARY
|
||||
) STRICT
|
||||
;
|
||||
|
||||
-- devices map device IDs as used by Syncthing to database device indexes
|
||||
CREATE TABLE IF NOT EXISTS devices (
|
||||
idx INTEGER NOT NULL PRIMARY KEY,
|
||||
idx INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
device_id TEXT NOT NULL UNIQUE COLLATE BINARY
|
||||
) STRICT
|
||||
;
|
||||
@@ -22,7 +22,6 @@
|
||||
-- Need bit. This allows for very efficient lookup of files needing handling
|
||||
-- on this device, which is a common query.
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
folder_idx INTEGER NOT NULL,
|
||||
device_idx INTEGER NOT NULL, -- actual device ID or LocalDeviceID
|
||||
sequence INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, -- our local database sequence, for each and every entry
|
||||
remote_sequence INTEGER, -- remote device's sequence number, null for local or synthetic entries
|
||||
@@ -35,8 +34,7 @@ CREATE TABLE IF NOT EXISTS files (
|
||||
invalid INTEGER NOT NULL, -- boolean
|
||||
local_flags INTEGER NOT NULL,
|
||||
blocklist_hash BLOB, -- null when there are no blocks
|
||||
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE,
|
||||
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
|
||||
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE
|
||||
) STRICT
|
||||
;
|
||||
-- FileInfos store the actual protobuf object. We do this separately to keep
|
||||
@@ -48,15 +46,15 @@ CREATE TABLE IF NOT EXISTS fileinfos (
|
||||
) STRICT
|
||||
;
|
||||
-- There can be only one file per folder, device, and remote sequence number
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS files_remote_sequence ON files (folder_idx, device_idx, remote_sequence)
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS files_remote_sequence ON files (device_idx, remote_sequence)
|
||||
WHERE remote_sequence IS NOT NULL
|
||||
;
|
||||
-- There can be only one file per folder, device, and name
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS files_device_name ON files (folder_idx, device_idx, name)
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS files_device_name ON files (device_idx, name)
|
||||
;
|
||||
-- We want to be able to look up & iterate files based on just folder and name
|
||||
CREATE INDEX IF NOT EXISTS files_name_only ON files (folder_idx, name)
|
||||
CREATE INDEX IF NOT EXISTS files_name_only ON files (name)
|
||||
;
|
||||
-- We want to be able to look up & iterate files based on blocks hash
|
||||
CREATE INDEX IF NOT EXISTS files_blocklist_hash_only ON files (blocklist_hash, device_idx, folder_idx) WHERE blocklist_hash IS NOT NULL
|
||||
CREATE INDEX IF NOT EXISTS files_blocklist_hash_only ON files (blocklist_hash, device_idx) WHERE blocklist_hash IS NOT NULL
|
||||
;
|
||||
@@ -7,18 +7,16 @@
|
||||
-- indexids holds the index ID and maximum sequence for a given device and folder
|
||||
CREATE TABLE IF NOT EXISTS indexids (
|
||||
device_idx INTEGER NOT NULL,
|
||||
folder_idx INTEGER NOT NULL,
|
||||
index_id TEXT NOT NULL COLLATE BINARY,
|
||||
sequence INTEGER NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY(device_idx, folder_idx),
|
||||
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE,
|
||||
PRIMARY KEY(device_idx),
|
||||
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE
|
||||
) STRICT, WITHOUT ROWID
|
||||
;
|
||||
CREATE TRIGGER IF NOT EXISTS indexids_seq AFTER INSERT ON files
|
||||
BEGIN
|
||||
INSERT INTO indexids (folder_idx, device_idx, index_id, sequence)
|
||||
VALUES (NEW.folder_idx, NEW.device_idx, "", COALESCE(NEW.remote_sequence, NEW.sequence))
|
||||
INSERT INTO indexids (device_idx, index_id, sequence)
|
||||
VALUES (NEW.device_idx, "", COALESCE(NEW.remote_sequence, NEW.sequence))
|
||||
ON CONFLICT DO UPDATE SET sequence = COALESCE(NEW.remote_sequence, NEW.sequence);
|
||||
END
|
||||
;
|
||||
@@ -9,16 +9,14 @@
|
||||
-- Counts and sizes are maintained for each device, folder, type, flag bits
|
||||
-- combination.
|
||||
CREATE TABLE IF NOT EXISTS counts (
|
||||
folder_idx INTEGER NOT NULL,
|
||||
device_idx INTEGER NOT NULL,
|
||||
type INTEGER NOT NULL,
|
||||
local_flags INTEGER NOT NULL,
|
||||
count INTEGER NOT NULL,
|
||||
size INTEGER NOT NULL,
|
||||
deleted INTEGER NOT NULL, -- boolean
|
||||
PRIMARY KEY(folder_idx, device_idx, type, local_flags, deleted),
|
||||
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE,
|
||||
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
|
||||
PRIMARY KEY(device_idx, type, local_flags, deleted),
|
||||
FOREIGN KEY(device_idx) REFERENCES devices(idx) ON DELETE CASCADE
|
||||
) STRICT, WITHOUT ROWID
|
||||
;
|
||||
|
||||
@@ -26,28 +24,24 @@ CREATE TABLE IF NOT EXISTS counts (
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS counts_insert AFTER INSERT ON files
|
||||
BEGIN
|
||||
INSERT INTO counts (folder_idx, device_idx, type, local_flags, count, size, deleted)
|
||||
VALUES (NEW.folder_idx, NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
|
||||
INSERT INTO counts (device_idx, type, local_flags, count, size, deleted)
|
||||
VALUES (NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
|
||||
ON CONFLICT DO UPDATE SET count = count + 1, size = size + NEW.size;
|
||||
END
|
||||
;
|
||||
CREATE TRIGGER IF NOT EXISTS counts_delete AFTER DELETE ON files
|
||||
BEGIN
|
||||
UPDATE counts SET count = count - 1, size = size - OLD.size
|
||||
WHERE folder_idx = OLD.folder_idx AND device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
|
||||
WHERE device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
|
||||
END
|
||||
;
|
||||
CREATE TRIGGER IF NOT EXISTS counts_update AFTER UPDATE OF local_flags ON files
|
||||
WHEN NEW.local_flags != OLD.local_flags
|
||||
BEGIN
|
||||
INSERT INTO counts (folder_idx, device_idx, type, local_flags, count, size, deleted)
|
||||
VALUES (NEW.folder_idx, NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
|
||||
INSERT INTO counts (device_idx, type, local_flags, count, size, deleted)
|
||||
VALUES (NEW.device_idx, NEW.type, NEW.local_flags, 1, NEW.size, NEW.deleted)
|
||||
ON CONFLICT DO UPDATE SET count = count + 1, size = size + NEW.size;
|
||||
UPDATE counts SET count = count - 1, size = size - OLD.size
|
||||
WHERE folder_idx = OLD.folder_idx AND device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
|
||||
WHERE device_idx = OLD.device_idx AND type = OLD.type AND local_flags = OLD.local_flags AND deleted = OLD.deleted;
|
||||
END
|
||||
;
|
||||
DROP TRIGGER IF EXISTS counts_update_add -- tmp migration
|
||||
;
|
||||
DROP TRIGGER IF EXISTS counts_update_del -- tmp migration
|
||||
;
|
||||
@@ -6,11 +6,9 @@
|
||||
|
||||
--- Backing for the MtimeFS
|
||||
CREATE TABLE IF NOT EXISTS mtimes (
|
||||
folder_idx INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
ondisk INTEGER NOT NULL, -- unix nanos
|
||||
virtual INTEGER NOT NULL, -- unix nanos
|
||||
PRIMARY KEY(folder_idx, name),
|
||||
FOREIGN KEY(folder_idx) REFERENCES folders(idx) ON DELETE CASCADE
|
||||
PRIMARY KEY(name)
|
||||
) STRICT, WITHOUT ROWID
|
||||
;
|
||||
16
internal/db/sqlite/sql/schema/main/00-folders.sql
Normal file
16
internal/db/sqlite/sql/schema/main/00-folders.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
-- Copyright (C) 2025 The Syncthing Authors.
|
||||
--
|
||||
-- This Source Code Form is subject to the terms of the Mozilla Public
|
||||
-- License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
-- You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
-- folders map folder IDs as used by Syncthing to database folder indexes
|
||||
CREATE TABLE IF NOT EXISTS folders (
|
||||
idx INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
folder_id TEXT NOT NULL UNIQUE COLLATE BINARY,
|
||||
database_name TEXT COLLATE BINARY -- initially null
|
||||
) STRICT
|
||||
;
|
||||
-- The database_name is unique, when set
|
||||
CREATE INDEX IF NOT EXISTS folders_database_name ON folders (database_name) WHERE database_name IS NOT NULL
|
||||
;
|
||||
@@ -68,13 +68,9 @@ var (
|
||||
DefaultTheme = "default"
|
||||
// Default stun servers should be substituted when the configuration
|
||||
// contains <stunServer>default</stunServer>.
|
||||
|
||||
// DefaultPrimaryStunServers are servers provided by us (to avoid causing the public servers burden)
|
||||
DefaultPrimaryStunServers = []string{
|
||||
// Discontinued because of misuse. See https://forum.syncthing.net/t/stun-server-misuse/23319
|
||||
//"stun.syncthing.net:3478",
|
||||
}
|
||||
DefaultSecondaryStunServers = []string{
|
||||
// The primary stun servers are provided by us and are resolved via an SRV record
|
||||
// The fallback stun servers are used if the primary ones can't be resolved or are down.
|
||||
DefaultFallbackStunServers = []string{
|
||||
"stun.counterpath.com:3478",
|
||||
"stun.counterpath.net:3478",
|
||||
"stun.ekiga.net:3478",
|
||||
|
||||
@@ -101,16 +101,18 @@ func TestDefaultValues(t *testing.T) {
|
||||
Defaults: Defaults{
|
||||
Folder: FolderConfiguration{
|
||||
FilesystemType: FilesystemTypeBasic,
|
||||
Path: "~",
|
||||
Path: "",
|
||||
Type: FolderTypeSendReceive,
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}},
|
||||
RescanIntervalS: 3600,
|
||||
FSWatcherEnabled: true,
|
||||
FSWatcherDelayS: 10,
|
||||
IgnorePerms: false,
|
||||
PullerDelayS: 1,
|
||||
AutoNormalize: true,
|
||||
MinDiskFree: size,
|
||||
Versioning: VersioningConfiguration{
|
||||
FSType: FilesystemTypeBasic,
|
||||
CleanupIntervalS: 3600,
|
||||
Params: map[string]string{},
|
||||
},
|
||||
@@ -179,21 +181,26 @@ func TestDeviceConfig(t *testing.T) {
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||
Type: FolderTypeSendOnly,
|
||||
RescanIntervalS: 600,
|
||||
FSWatcherEnabled: false,
|
||||
FSWatcherEnabled: true,
|
||||
FSWatcherDelayS: 10,
|
||||
Copiers: 0,
|
||||
Hashers: 0,
|
||||
PullerDelayS: 1,
|
||||
AutoNormalize: true,
|
||||
MinDiskFree: Size{1, "%"},
|
||||
MaxConflicts: -1,
|
||||
Versioning: VersioningConfiguration{
|
||||
Params: map[string]string{},
|
||||
CleanupIntervalS: 3600,
|
||||
FSType: FilesystemTypeBasic,
|
||||
Params: map[string]string{},
|
||||
},
|
||||
MarkerName: DefaultMarkerName,
|
||||
JunctionsAsDirs: true,
|
||||
MaxConcurrentWrites: maxConcurrentWritesDefault,
|
||||
XattrFilter: XattrFilter{
|
||||
Entries: []XattrFilterEntry{},
|
||||
MaxSingleEntrySize: 1024,
|
||||
MaxTotalSize: 4096,
|
||||
Entries: []XattrFilterEntry{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -518,7 +525,8 @@ func TestIssue1750(t *testing.T) {
|
||||
|
||||
func TestFolderPath(t *testing.T) {
|
||||
folder := FolderConfiguration{
|
||||
Path: "~/tmp",
|
||||
FilesystemType: FilesystemTypeBasic,
|
||||
Path: "~/tmp",
|
||||
}
|
||||
|
||||
realPath := folder.Filesystem().URI()
|
||||
|
||||
@@ -8,33 +8,27 @@ package config
|
||||
|
||||
import "github.com/syncthing/syncthing/lib/fs"
|
||||
|
||||
type FilesystemType int32
|
||||
type FilesystemType string
|
||||
|
||||
const (
|
||||
FilesystemTypeBasic FilesystemType = 0
|
||||
FilesystemTypeFake FilesystemType = 1
|
||||
FilesystemTypeBasic FilesystemType = "basic"
|
||||
FilesystemTypeFake FilesystemType = "fake"
|
||||
)
|
||||
|
||||
func (t FilesystemType) String() string {
|
||||
switch t {
|
||||
case FilesystemTypeBasic:
|
||||
return "basic"
|
||||
case FilesystemTypeFake:
|
||||
return "fake"
|
||||
default:
|
||||
return "unknown"
|
||||
func (t FilesystemType) ToFS() fs.FilesystemType {
|
||||
if t == "" {
|
||||
// legacy compat, zero value means basic
|
||||
return fs.FilesystemTypeBasic
|
||||
}
|
||||
return fs.FilesystemType(string(t))
|
||||
}
|
||||
|
||||
func (t FilesystemType) ToFS() fs.FilesystemType {
|
||||
switch t {
|
||||
case FilesystemTypeBasic:
|
||||
return fs.FilesystemTypeBasic
|
||||
case FilesystemTypeFake:
|
||||
return fs.FilesystemTypeFake
|
||||
default:
|
||||
return fs.FilesystemTypeBasic
|
||||
func (t FilesystemType) String() string {
|
||||
if t == "" {
|
||||
// legacy compat, zero value means basic
|
||||
return string(FilesystemTypeBasic)
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t FilesystemType) MarshalText() ([]byte, error) {
|
||||
@@ -42,13 +36,15 @@ func (t FilesystemType) MarshalText() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (t *FilesystemType) UnmarshalText(bs []byte) error {
|
||||
switch string(bs) {
|
||||
case "basic":
|
||||
*t = FilesystemTypeBasic
|
||||
case "fake":
|
||||
*t = FilesystemTypeFake
|
||||
default:
|
||||
if len(bs) == 0 {
|
||||
// legacy compat, zero value means basic
|
||||
*t = FilesystemTypeBasic
|
||||
return nil
|
||||
}
|
||||
*t = FilesystemType(string(bs))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *FilesystemType) ParseDefault(str string) error {
|
||||
return t.UnmarshalText([]byte(str))
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/structutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -46,8 +49,8 @@ type FolderDeviceConfiguration struct {
|
||||
type FolderConfiguration struct {
|
||||
ID string `json:"id" xml:"id,attr" nodefault:"true"`
|
||||
Label string `json:"label" xml:"label,attr" restart:"false"`
|
||||
FilesystemType FilesystemType `json:"filesystemType" xml:"filesystemType"`
|
||||
Path string `json:"path" xml:"path,attr" default:"~"`
|
||||
FilesystemType FilesystemType `json:"filesystemType" xml:"filesystemType" default:"basic"`
|
||||
Path string `json:"path" xml:"path,attr"`
|
||||
Type FolderType `json:"type" xml:"type,attr"`
|
||||
Devices []FolderDeviceConfiguration `json:"devices" xml:"device"`
|
||||
RescanIntervalS int `json:"rescanIntervalS" xml:"rescanIntervalS,attr" default:"3600"`
|
||||
@@ -65,6 +68,7 @@ type FolderConfiguration struct {
|
||||
IgnoreDelete bool `json:"ignoreDelete" xml:"ignoreDelete"`
|
||||
ScanProgressIntervalS int `json:"scanProgressIntervalS" xml:"scanProgressIntervalS"`
|
||||
PullerPauseS int `json:"pullerPauseS" xml:"pullerPauseS"`
|
||||
PullerDelayS float64 `json:"pullerDelayS" xml:"pullerDelayS" default:"1"`
|
||||
MaxConflicts int `json:"maxConflicts" xml:"maxConflicts" default:"10"`
|
||||
DisableSparseFiles bool `json:"disableSparseFiles" xml:"disableSparseFiles"`
|
||||
DisableTempIndexes bool `json:"disableTempIndexes" xml:"disableTempIndexes"`
|
||||
@@ -391,3 +395,23 @@ func (f XattrFilter) GetMaxSingleEntrySize() int {
|
||||
func (f XattrFilter) GetMaxTotalSize() int {
|
||||
return f.MaxTotalSize
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) UnmarshalJSON(data []byte) error {
|
||||
structutil.SetDefaults(f)
|
||||
|
||||
// avoid recursing into this method
|
||||
type noCustomUnmarshal FolderConfiguration
|
||||
ptr := (*noCustomUnmarshal)(f)
|
||||
|
||||
return json.Unmarshal(data, ptr)
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
structutil.SetDefaults(f)
|
||||
|
||||
// avoid recursing into this method
|
||||
type noCustomUnmarshal FolderConfiguration
|
||||
ptr := (*noCustomUnmarshal)(f)
|
||||
|
||||
return d.DecodeElement(ptr, &start)
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
@@ -183,15 +185,22 @@ func (opts OptionsConfiguration) StunServers() []string {
|
||||
for _, addr := range opts.RawStunServers {
|
||||
switch addr {
|
||||
case "default":
|
||||
defaultPrimaryAddresses := make([]string, len(DefaultPrimaryStunServers))
|
||||
copy(defaultPrimaryAddresses, DefaultPrimaryStunServers)
|
||||
rand.Shuffle(defaultPrimaryAddresses)
|
||||
addresses = append(addresses, defaultPrimaryAddresses...)
|
||||
_, records, err := net.LookupSRV("stun", "udp", "syncthing.net")
|
||||
if err != nil {
|
||||
l.Warnln("Unable to resolve primary STUN servers via DNS:", err)
|
||||
}
|
||||
|
||||
defaultSecondaryAddresses := make([]string, len(DefaultSecondaryStunServers))
|
||||
copy(defaultSecondaryAddresses, DefaultSecondaryStunServers)
|
||||
rand.Shuffle(defaultSecondaryAddresses)
|
||||
addresses = append(addresses, defaultSecondaryAddresses...)
|
||||
for _, record := range records {
|
||||
priority := record.Priority
|
||||
target := strings.TrimSuffix(record.Target, ".")
|
||||
address := fmt.Sprintf("%s:%d", target, record.Port)
|
||||
l.Debugf("Resolved primary STUN server %s with priority %d", address, priority)
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
|
||||
fallbackAddresses := slices.Clone(DefaultFallbackStunServers)
|
||||
rand.Shuffle(fallbackAddresses)
|
||||
addresses = append(addresses, fallbackAddresses...)
|
||||
default:
|
||||
addresses = append(addresses, addr)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ type VersioningConfiguration struct {
|
||||
Params map[string]string `json:"params" xml:"parameter" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
CleanupIntervalS int `json:"cleanupIntervalS" xml:"cleanupIntervalS" default:"3600"`
|
||||
FSPath string `json:"fsPath" xml:"fsPath"`
|
||||
FSType FilesystemType `json:"fsType" xml:"fsType"`
|
||||
FSType FilesystemType `json:"fsType" xml:"fsType" default:"basic"`
|
||||
}
|
||||
|
||||
func (c *VersioningConfiguration) Reset() {
|
||||
@@ -33,7 +33,7 @@ type internalVersioningConfiguration struct {
|
||||
Params []internalParam `xml:"param"`
|
||||
CleanupIntervalS int `xml:"cleanupIntervalS" default:"3600"`
|
||||
FSPath string `xml:"fsPath"`
|
||||
FSType FilesystemType `xml:"fsType"`
|
||||
FSType FilesystemType `xml:"fsType" default:"basic"`
|
||||
}
|
||||
|
||||
type internalParam struct {
|
||||
|
||||
@@ -28,7 +28,7 @@ func init() {
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Defer this, so that logging gets setup.
|
||||
// Defer this, so that logging gets set up.
|
||||
go func() {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
l.Infoln("Proxy settings detected")
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
const FilesystemTypeBasic FilesystemType = "basic"
|
||||
|
||||
var (
|
||||
errInvalidFilenameEmpty = errors.New("name is invalid, must not be empty")
|
||||
errInvalidFilenameWindowsSpacePeriod = errors.New("name is invalid, must not end in space or period on Windows")
|
||||
@@ -56,6 +58,12 @@ type (
|
||||
groupCache = valueCache[string, *user.Group]
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterFilesystemType(FilesystemTypeBasic, func(root string, opts ...Option) (Filesystem, error) {
|
||||
return newBasicFilesystem(root, opts...), nil
|
||||
})
|
||||
}
|
||||
|
||||
func newBasicFilesystem(root string, opts ...Option) *BasicFilesystem {
|
||||
if root == "" {
|
||||
root = "." // Otherwise "" becomes "/" below
|
||||
@@ -328,10 +336,6 @@ func (*BasicFilesystem) underlying() (Filesystem, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (*BasicFilesystem) wrapperType() filesystemWrapperType {
|
||||
return filesystemWrapperTypeNone
|
||||
}
|
||||
|
||||
// basicFile implements the fs.File interface on top of an os.File
|
||||
type basicFile struct {
|
||||
*os.File
|
||||
|
||||
@@ -54,7 +54,7 @@ func (f *BasicFilesystem) Watch(name string, ignore Matcher, ctx context.Context
|
||||
if err != nil {
|
||||
notify.Stop(backendChan)
|
||||
if reachedMaxUserWatches(err) {
|
||||
err = errors.New("failed to setup inotify handler. Please increase inotify limits, see https://docs.syncthing.net/users/faq.html#inotify-limits")
|
||||
err = errors.New("failed to set up inotify handler. Please increase inotify limits, see https://docs.syncthing.net/users/faq.html#inotify-limits")
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -357,10 +357,6 @@ func (f *caseFilesystem) underlying() (Filesystem, bool) {
|
||||
return f.Filesystem, true
|
||||
}
|
||||
|
||||
func (*caseFilesystem) wrapperType() filesystemWrapperType {
|
||||
return filesystemWrapperTypeCase
|
||||
}
|
||||
|
||||
func (f *caseFilesystem) checkCase(name string) error {
|
||||
var err error
|
||||
if name, err = Canonicalize(name); err != nil {
|
||||
|
||||
@@ -161,10 +161,11 @@ func BenchmarkWalkCaseFakeFS100k(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Run("rawfs", func(b *testing.B) {
|
||||
var fakefs *fakeFS
|
||||
if ffs, ok := unwrapFilesystem(fsys, filesystemWrapperTypeNone); ok {
|
||||
fakefs = ffs.(*fakeFS)
|
||||
fakefs, ok := unwrapFilesystem[*fakeFS](fsys)
|
||||
if !ok {
|
||||
panic("expected unwrap to fakefs")
|
||||
}
|
||||
|
||||
fakefs.resetCounters()
|
||||
benchmarkWalkFakeFS(b, fsys, paths, 0, "")
|
||||
fakefs.reportMetricsPerOp(b)
|
||||
@@ -180,9 +181,10 @@ func BenchmarkWalkCaseFakeFS100k(b *testing.B) {
|
||||
cache: newCaseCache(),
|
||||
},
|
||||
}
|
||||
var fakefs *fakeFS
|
||||
if ffs, ok := unwrapFilesystem(fsys, filesystemWrapperTypeNone); ok {
|
||||
fakefs = ffs.(*fakeFS)
|
||||
|
||||
fakefs, ok := unwrapFilesystem[*fakeFS](fsys)
|
||||
if !ok {
|
||||
panic("expected unwrap to fakefs")
|
||||
}
|
||||
fakefs.resetCounters()
|
||||
benchmarkWalkFakeFS(b, casefs, paths, 0, "")
|
||||
@@ -209,10 +211,12 @@ func BenchmarkWalkCaseFakeFS100k(b *testing.B) {
|
||||
cache: newCaseCache(),
|
||||
},
|
||||
}
|
||||
var fakefs *fakeFS
|
||||
if ffs, ok := unwrapFilesystem(fsys, filesystemWrapperTypeNone); ok {
|
||||
fakefs = ffs.(*fakeFS)
|
||||
|
||||
fakefs, ok := unwrapFilesystem[*fakeFS](fsys)
|
||||
if !ok {
|
||||
panic("expected unwrap to fakefs")
|
||||
}
|
||||
|
||||
fakefs.resetCounters()
|
||||
benchmarkWalkFakeFS(b, casefs, paths, otherOpEvery, otherOpPath)
|
||||
fakefs.reportMetricsPerOp(b)
|
||||
|
||||
@@ -69,7 +69,3 @@ func (fs *errorFilesystem) PlatformData(_ string, _, _ bool, _ XattrFilter) (pro
|
||||
func (*errorFilesystem) underlying() (Filesystem, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (*errorFilesystem) wrapperType() filesystemWrapperType {
|
||||
return filesystemWrapperTypeError
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user