Compare commits

...

36 Commits

Author SHA1 Message Date
Jakob Borg
d0c3697152 cmd/syncthing: Print version information early (fixes #5891) (#5893) 2019-07-27 20:36:33 +02:00
Simon Frei
35f40e9a58 lib/model: Create new file-set after stopping folder (fixes #5882) (#5883) 2019-07-23 20:39:25 +02:00
Simon Frei
5de9b677c2 lib/fs: Fix kqueue event list (fixes #5308) (#5885) 2019-07-23 14:11:15 +02:00
Simon Frei
6f08162376 lib/model: Remove incorrect/useless panics (#5881) 2019-07-23 10:51:16 +02:00
Simon Frei
7b3d9a8dca lib/syncthing: Refactor to use util.AsService (#5858) 2019-07-23 10:50:37 +02:00
Simon Frei
942659fb06 lib/model, lib/nat: More service termination speedup (#5884) 2019-07-23 10:49:22 +02:00
dependabot-preview[bot]
15c262184b build(deps): bump github.com/maruel/panicparse from 1.2.1 to 1.3.0 (#5879)
Bumps [github.com/maruel/panicparse](https://github.com/maruel/panicparse) from 1.2.1 to 1.3.0.
- [Release notes](https://github.com/maruel/panicparse/releases)
- [Commits](https://github.com/maruel/panicparse/compare/v1.2.1...v1.3.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-22 19:46:52 +01:00
dependabot-preview[bot]
484fa0592e build(deps): bump github.com/lib/pq from 1.1.1 to 1.2.0 (#5878)
Bumps [github.com/lib/pq](https://github.com/lib/pq) from 1.1.1 to 1.2.0.
- [Release notes](https://github.com/lib/pq/releases)
- [Commits](https://github.com/lib/pq/compare/v1.1.1...v1.2.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-22 08:07:21 +01:00
Simon Frei
b5b54ff057 lib/model: No watch-error on missing folder (fixes #5833) (#5876) 2019-07-19 19:41:16 +02:00
Simon Frei
4d3432af3e lib: Ensure timely service termination (fixes #5860) (#5863) 2019-07-19 19:40:40 +02:00
Simon Frei
1cb55904bc lib/model: Prevent panic in NeedFolderFiles (fixes #5872) (#5875) 2019-07-19 19:39:52 +02:00
Simon Frei
2b622d0774 lib/model: Close conn on dev pause (fixes #5873) (#5874) 2019-07-19 19:37:29 +02:00
Simon Frei
1894123d3c lib/syncthing: Modify exit status before stopping (fixes #5869) (#5870) 2019-07-18 20:49:00 +02:00
Jakob Borg
e7e177a6fa lib/relay: Prevent spurious relay error message (fixes #5861) (#5864) 2019-07-17 10:55:28 +02:00
Simon Frei
eed1edcca0 cmd/syncthing: Ensure myID is set by making it local (fixes #5859) (#5862) 2019-07-17 07:19:14 +02:00
Simon Frei
0025e9ccfb all: Refactor cmd/syncthing creating lib/syncthing (ref #4085) (#5805)
* add skeleton for lib/syncthing

* copy syncthingMain to lib/syncthing (verbatim)

* Remove code to deduplicate copies of syncthingMain

* fix simple build errors

* move stuff from main to syncthing with minimal mod

* merge runtime options

* actually use syncthing.App

* pass io.writer to lib/syncthing for auditing

* get rid of env stuff in lib/syncthing

* add .Error() and comments

* review: Remove fs interactions from lib

* and go 1.13 happened

* utility functions
2019-07-14 11:43:13 +01:00
Simon Frei
82b70b9fae lib/model, lib/protocol: Track closing connections (fixes #5828) (#5829) 2019-07-14 11:03:55 +02:00
Simon Frei
def4b8cee5 lib/config: Error on empty folder path (fixes #5853) (#5854) 2019-07-14 11:03:14 +02:00
Aurélien Rainone
f1a7dd766e all: Add comment to ensure correct atomics alignment (fixes #5813)
Per the sync/atomic bug note:

> On ARM, x86-32, and 32-bit MIPS, it is the caller's
> responsibility to arrange for 64-bit alignment of 64-bit words
> accessed atomically. The first word in a variable or in an
> allocated struct, array, or slice can be relied upon to be
> 64-bit aligned.

All atomic accesses of 64-bit variables in syncthing code base are
currently ok (i.e they are all 64-bit aligned).

Generally, the bug is triggered because of incorrect alignement
of struct fields. Free variables (declared in a function) are
guaranteed to be 64-bit aligned by the Go compiler.

To ensure the code remains correct upon further addition/removal
of fields, which would change the currently correct alignment, I
added the following comment where required:

     // atomic, must remain 64-bit aligned

See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
2019-07-13 14:05:39 +01:00
Simon Frei
20c8dbd9ed lib/model: Fix integer conversion (fixes #5837) (#5851) 2019-07-12 16:37:12 +02:00
xduugu
4b3f9b1af9 lib/versioner: Replace multiple placeholders in a single token in external command (fixes #5849)
* lib/versioner: Add placeholder to provide the absolute file path to external commands

This commit adds support for a new placeholder, %FILE_PATH_FULL%, to the
command of the external versioner. The placeholder will be replaced by
the absolute path of the file that should be deleted.

* Revert "lib/versioner: Add placeholder to provide the absolute file path to external commands"

This reverts commit fb48962b94.

* lib/versioner: Replace all placeholders in external command (fixes #5849)

Before this commit, only these placeholders were replaced that span a
whole word, for example "%FOLDER_PATH%". Words that consisted of more
than one placeholder or additional characters, for example
"%FOLDER_PATH%/%FILE_PATH%", were left untouched.

* fixup! lib/versioner: Replace all placeholders in external command (fixes #5849)
2019-07-12 08:45:39 +01:00
Simon Frei
3446d50201 lib/model: Remove pointless error that watch hasn't started (fixes #5833) (#5834) 2019-07-10 11:00:06 +02:00
Simon Frei
9fef1552fc lib/db, lib/model: Remove folder info from panics (ref #5839) (#5840) 2019-07-10 10:57:49 +02:00
Simon Frei
85318f3b82 gui: On update setting don't show RC msg when disabled (fixes #5803) (#5842) 2019-07-09 22:30:22 +01:00
Simon Frei
485acda63b lib/relay: Call the proper Error method (ref #5806) (#5841) 2019-07-09 22:29:19 +01:00
Simon Frei
05e9e0bfa9 build: Update notify dependency (#5838) 2019-07-09 21:33:22 +01:00
Simon Frei
ba056578ec lib: Add util.Service as suture.Service template (fixes #5801) (#5806) 2019-07-09 11:40:30 +02:00
Jakob Borg
d0ab65a178 cmd/stcrashreceiver: Don't leak clients
Use a global raven.Client because they allocate an http.Client for each,
with a separate CA bundle and infinite connection idle time. Infinite
connection idle time means that if the client is never used again it
will always keep the connection around, not verifying whether it's
closed server side or not. This leaks about a megabyte of memory for
each client every created.

client.Close() doesn't help with this because the http.Client is still
around, retained by its own goroutines.

The thing with the map is just to retain the API on sendReport, even
though there will in practice only ever be one DSN per process
instance...
2019-07-09 11:11:06 +02:00
Simon Frei
4cba433852 build: Add go major version to go.mod (#5822) 2019-06-30 13:18:34 +02:00
Simon Frei
863fe23347 gui, lib/model: Fix download progress accounting (fixes #5811) (#5815) 2019-06-30 09:23:47 +02:00
Jakob Borg
43b6ac9501 cmd/stcrashreceiver: Add source code loader (#5779) 2019-06-29 08:50:09 +02:00
Simon Frei
1cf352a722 lib/model: NewFileSet outside fmut (#5818) 2019-06-29 08:49:30 +02:00
Simon Frei
b58f6ca886 lib/model: Correct/unify check if item changed (#5819) 2019-06-29 07:45:41 +02:00
Jakob Borg
5cbc9089fd Merge branch 'release'
* release:
  go.mod: Update AudriusButkevicius/pfilter (fixes #5820)
2019-06-28 08:21:00 +02:00
Jakob Borg
2b4df6b874 go.mod: Update AudriusButkevicius/pfilter (fixes #5820) 2019-06-28 07:38:52 +02:00
Simon Frei
3c7e7e971d lib/model: Make jobQueue.Jobs paginated (fixes #5754) (#5804)
* lib/model: Make jobQueue.Jobs paginated (fixes #5754)

* fix, no test yet

* add test
2019-06-27 19:25:38 +01:00
66 changed files with 1904 additions and 1369 deletions

View File

@@ -12,6 +12,7 @@ import (
"io/ioutil"
"regexp"
"strings"
"sync"
raven "github.com/getsentry/raven-go"
"github.com/maruel/panicparse/stack"
@@ -19,15 +20,33 @@ import (
const reportServer = "https://crash.syncthing.net/report/"
var loader = newGithubSourceCodeLoader()
func init() {
raven.SetSourceCodeLoader(loader)
}
var (
clients = make(map[string]*raven.Client)
clientsMut sync.Mutex
)
func sendReport(dsn, path string, report []byte) error {
pkt, err := parseReport(path, report)
if err != nil {
return err
}
cli, err := raven.New(dsn)
if err != nil {
return err
clientsMut.Lock()
defer clientsMut.Unlock()
cli, ok := clients[dsn]
if !ok {
cli, err = raven.New(dsn)
if err != nil {
return err
}
clients[dsn] = cli
}
// The client sets release and such on the packet before sending, in the
@@ -36,6 +55,7 @@ func sendReport(dsn, path string, report []byte) error {
cli.SetRelease(pkt.Release)
cli.SetEnvironment(pkt.Environment)
defer cli.Wait()
_, errC := cli.Capture(pkt, nil)
return <-errC
}
@@ -80,30 +100,38 @@ func parseReport(path string, report []byte) (*raven.Packet, error) {
return nil, err
}
// Lock the source code loader to the version we are processing here.
if version.commit != "" {
// We have a commit hash, so we know exactly which source to use
loader.LockWithVersion(version.commit)
} else if strings.HasPrefix(version.tag, "v") {
// Lets hope the tag is close enough
loader.LockWithVersion(version.tag)
} else {
// Last resort
loader.LockWithVersion("master")
}
defer loader.Unlock()
var trace raven.Stacktrace
for _, gr := range ctx.Goroutines {
if gr.First {
trace.Frames = make([]*raven.StacktraceFrame, len(gr.Stack.Calls))
for i, sc := range gr.Stack.Calls {
trace.Frames[len(trace.Frames)-1-i] = &raven.StacktraceFrame{
Function: sc.Func.Name(),
Module: sc.Func.PkgName(),
Filename: sc.SrcPath,
Lineno: sc.Line,
}
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name(), sc.SrcPath, sc.Line, 3, nil)
}
break
}
}
pkt := &raven.Packet{
Message: string(subjectLine),
Platform: "go",
Release: version.tag,
Message: string(subjectLine),
Platform: "go",
Release: version.tag,
Environment: version.environment(),
Tags: raven.Tags{
raven.Tag{Key: "version", Value: version.version},
raven.Tag{Key: "tag", Value: version.tag},
raven.Tag{Key: "commit", Value: version.commit},
raven.Tag{Key: "codename", Value: version.codename},
raven.Tag{Key: "runtime", Value: version.runtime},
raven.Tag{Key: "goos", Value: version.goos},
@@ -115,6 +143,9 @@ func parseReport(path string, report []byte) (*raven.Packet, error) {
},
Interfaces: []raven.Interface{&trace},
}
if version.commit != "" {
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
}
return pkt, nil
}
@@ -133,6 +164,19 @@ type version struct {
builder string // "jb@kvin.kastelo.net"
}
func (v version) environment() string {
if v.commit != "" {
return "Development"
}
if strings.Contains(v.tag, "-rc.") {
return "Candidate"
}
if strings.Contains(v.tag, "-") {
return "Beta"
}
return "Stable"
}
func parseVersion(line string) (version, error) {
m := longVersionRE.FindStringSubmatch(line)
if len(m) == 0 {

View File

@@ -0,0 +1,114 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"strings"
"sync"
"time"
)
const (
urlPrefix = "https://raw.githubusercontent.com/syncthing/syncthing/"
httpTimeout = 10 * time.Second
)
type githubSourceCodeLoader struct {
mut sync.Mutex
version string
cache map[string]map[string][][]byte // version -> file -> lines
client *http.Client
}
func newGithubSourceCodeLoader() *githubSourceCodeLoader {
return &githubSourceCodeLoader{
cache: make(map[string]map[string][][]byte),
client: &http.Client{Timeout: httpTimeout},
}
}
func (l *githubSourceCodeLoader) LockWithVersion(version string) {
l.mut.Lock()
l.version = version
if _, ok := l.cache[version]; !ok {
l.cache[version] = make(map[string][][]byte)
}
}
func (l *githubSourceCodeLoader) Unlock() {
l.mut.Unlock()
}
func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]byte, int) {
filename = filepath.ToSlash(filename)
lines, ok := l.cache[l.version][filename]
if !ok {
// Cache whatever we managed to find (or nil if nothing, so we don't try again)
defer func() {
l.cache[l.version][filename] = lines
}()
knownPrefixes := []string{"/lib/", "/cmd/"}
var idx int
for _, pref := range knownPrefixes {
idx = strings.Index(filename, pref)
if idx >= 0 {
break
}
}
if idx == -1 {
return nil, 0
}
url := urlPrefix + l.version + filename[idx:]
resp, err := l.client.Get(url)
if err != nil || resp.StatusCode != http.StatusOK {
fmt.Println("Loading source:", err.Error())
return nil, 0
}
data, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
if err != nil {
fmt.Println("Loading source:", err.Error())
return nil, 0
}
lines = bytes.Split(data, []byte{'\n'})
}
return getLineFromLines(lines, line, context)
}
func getLineFromLines(lines [][]byte, line, context int) ([][]byte, int) {
if lines == nil {
// cached error from ReadFile: return no lines
return nil, 0
}
line-- // stack trace lines are 1-indexed
start := line - context
var idx int
if start < 0 {
start = 0
idx = line
} else {
idx = context
}
end := line + context + 1
if line >= len(lines) {
return nil, 0
}
if end > len(lines) {
end = len(lines)
}
return lines[start:end], idx
}

View File

@@ -86,9 +86,9 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
}
type rateCalculator struct {
counter *int64 // atomic, must remain 64-bit aligned
rates []int64
prev int64
counter *int64
startTime time.Time
}

View File

@@ -1,69 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"encoding/json"
"io"
"github.com/syncthing/syncthing/lib/events"
)
// The auditService subscribes to events and writes these in JSON format, one
// event per line, to the specified writer.
type auditService struct {
w io.Writer // audit destination
stop chan struct{} // signals time to stop
started chan struct{} // signals startup complete
stopped chan struct{} // signals stop complete
}
func newAuditService(w io.Writer) *auditService {
return &auditService{
w: w,
stop: make(chan struct{}),
started: make(chan struct{}),
stopped: make(chan struct{}),
}
}
// Serve runs the audit service.
func (s *auditService) Serve() {
defer close(s.stopped)
sub := events.Default.Subscribe(events.AllEvents)
defer events.Default.Unsubscribe(sub)
enc := json.NewEncoder(s.w)
// We're ready to start processing events.
close(s.started)
for {
select {
case ev := <-sub.C():
enc.Encode(ev)
case <-s.stop:
return
}
}
}
// Stop stops the audit service.
func (s *auditService) Stop() {
close(s.stop)
}
// WaitForStart returns once the audit service is ready to receive events, or
// immediately if it's already running.
func (s *auditService) WaitForStart() {
<-s.started
}
// WaitForStop returns once the audit service has stopped.
// (Needed by the tests.)
func (s *auditService) WaitForStop() {
<-s.stopped
}

View File

@@ -25,32 +25,23 @@ import (
"runtime/pprof"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/syncthing/syncthing/lib/api"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/connections"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/dialer"
"github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/locations"
"github.com/syncthing/syncthing/lib/logger"
"github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/syncthing"
"github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syncthing/syncthing/lib/upgrade"
"github.com/syncthing/syncthing/lib/ur"
"github.com/pkg/errors"
"github.com/thejerf/suture"
)
const (
@@ -69,8 +60,6 @@ const (
maxSystemLog = 250
)
var myID protocol.DeviceID
const (
usage = "syncthing [options]"
extraUsage = `
@@ -158,15 +147,14 @@ The following are valid values for the STTRACE variable:
// Environment options
var (
noUpgradeFromEnv = os.Getenv("STNOUPGRADE") != ""
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
noDefaultFolder = os.Getenv("STNODEFAULTFOLDER") != ""
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
noDefaultFolder = os.Getenv("STNODEFAULTFOLDER") != ""
)
type RuntimeOptions struct {
syncthing.Options
confDir string
resetDatabase bool
resetDeltaIdxs bool
showVersion bool
showPaths bool
showDeviceId bool
@@ -179,15 +167,12 @@ type RuntimeOptions struct {
logFile string
auditEnabled bool
auditFile string
verbose bool
paused bool
unpaused bool
guiAddress string
guiAPIKey string
generateDir string
noRestart bool
profiler string
assetDir string
cpuProfile bool
stRestarting bool
logFlags int
@@ -197,9 +182,12 @@ type RuntimeOptions struct {
func defaultRuntimeOptions() RuntimeOptions {
options := RuntimeOptions{
Options: syncthing.Options{
AssetDir: os.Getenv("STGUIASSETS"),
NoUpgrade: os.Getenv("STNOUPGRADE") != "",
ProfilerURL: os.Getenv("STPROFILER"),
},
noRestart: os.Getenv("STNORESTART") != "",
profiler: os.Getenv("STPROFILER"),
assetDir: os.Getenv("STGUIASSETS"),
cpuProfile: os.Getenv("STCPUPROFILE") != "",
stRestarting: os.Getenv("STRESTART") != "",
logFlags: log.Ltime,
@@ -232,7 +220,7 @@ func parseCommandLineOptions() RuntimeOptions {
flag.BoolVar(&options.browserOnly, "browser-only", false, "Open GUI in browser")
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Disable monitor process, managed restarts and log file writing")
flag.BoolVar(&options.resetDatabase, "reset-database", false, "Reset the database, forcing a full rescan and resync")
flag.BoolVar(&options.resetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
flag.BoolVar(&options.ResetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
flag.BoolVar(&options.doUpgrade, "upgrade", false, "Perform upgrade")
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
flag.BoolVar(&options.showVersion, "version", false, "Show version")
@@ -241,7 +229,7 @@ func parseCommandLineOptions() RuntimeOptions {
flag.BoolVar(&options.showDeviceId, "device-id", false, "Show the device ID")
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
flag.BoolVar(&options.verbose, "verbose", false, "Print verbose log output")
flag.BoolVar(&options.Verbose, "verbose", false, "Print verbose log output")
flag.BoolVar(&options.paused, "paused", false, "Start with all devices and folders paused")
flag.BoolVar(&options.unpaused, "unpaused", false, "Start with all devices and folders unpaused")
flag.StringVar(&options.logFile, "logfile", options.logFile, "Log file name (still always logs to stdout). Cannot be used together with -no-restart/STNORESTART environment variable.")
@@ -264,33 +252,6 @@ func parseCommandLineOptions() RuntimeOptions {
return options
}
// exiter implements api.Controller
type exiter struct {
stop chan int
}
func (e *exiter) Restart() {
l.Infoln("Restarting")
e.stop <- exitRestarting
}
func (e *exiter) Shutdown() {
l.Infoln("Shutting down")
e.stop <- exitSuccess
}
func (e *exiter) ExitUpgrading() {
l.Infoln("Shutting down after upgrade")
e.stop <- exitUpgrading
}
// waitForExit must be called synchronously.
func (e *exiter) waitForExit() int {
return <-e.stop
}
var exit = &exiter{make(chan int)}
func main() {
options := parseCommandLineOptions()
l.SetFlags(options.logFlags)
@@ -339,10 +300,10 @@ func main() {
options.logFile = locations.Get(locations.LogFile)
}
if options.assetDir == "" {
if options.AssetDir == "" {
// The asset dir is blank if STGUIASSETS wasn't set, in which case we
// should look for extra assets in the default place.
options.assetDir = locations.Get(locations.GUIAssets)
options.AssetDir = locations.Get(locations.GUIAssets)
}
if options.showVersion {
@@ -370,13 +331,12 @@ func main() {
os.Exit(exitError)
}
myID = protocol.NewDeviceID(cert.Certificate[0])
fmt.Println(myID)
fmt.Println(protocol.NewDeviceID(cert.Certificate[0]))
return
}
if options.browserOnly {
if err := openGUI(); err != nil {
if err := openGUI(protocol.EmptyDeviceID); err != nil {
l.Warnln("Failed to open web UI:", err)
os.Exit(exitError)
}
@@ -433,8 +393,8 @@ func main() {
}
}
func openGUI() error {
cfg, err := loadOrDefaultConfig()
func openGUI(myID protocol.DeviceID) error {
cfg, err := loadOrDefaultConfig(myID)
if err != nil {
return err
}
@@ -458,31 +418,26 @@ func generate(generateDir string) error {
return err
}
var myID protocol.DeviceID
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err == nil {
l.Warnln("Key exists; will not overwrite.")
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
} else {
cert, err = tlsutil.NewCertificate(certFile, keyFile, tlsDefaultCommonName)
if err != nil {
return errors.Wrap(err, "create certificate")
}
myID = protocol.NewDeviceID(cert.Certificate[0])
if err != nil {
return errors.Wrap(err, "load certificate")
}
if err == nil {
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
}
}
myID = protocol.NewDeviceID(cert.Certificate[0])
l.Infoln("Device ID:", myID)
cfgFile := filepath.Join(dir, "config.xml")
if _, err := os.Stat(cfgFile); err == nil {
l.Warnln("Config exists; will not overwrite.")
return nil
}
cfg, err := defaultConfig(cfgFile)
cfg, err := defaultConfig(cfgFile, myID)
if err != nil {
return err
}
@@ -516,7 +471,7 @@ func debugFacilities() string {
}
func checkUpgrade() upgrade.Release {
cfg, _ := loadOrDefaultConfig()
cfg, _ := loadOrDefaultConfig(protocol.EmptyDeviceID)
opts := cfg.Options()
release, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
if err != nil {
@@ -536,7 +491,7 @@ func checkUpgrade() upgrade.Release {
func performUpgrade(release upgrade.Release) {
// Use leveldb database locks to protect against concurrent upgrades
_, err := db.Open(locations.Get(locations.Database))
_, err := syncthing.OpenGoleveldb(locations.Get(locations.Database))
if err == nil {
err = upgrade.To(release)
if err != nil {
@@ -557,7 +512,7 @@ func performUpgrade(release upgrade.Release) {
}
func upgradeViaRest() error {
cfg, _ := loadOrDefaultConfig()
cfg, _ := loadOrDefaultConfig(protocol.EmptyDeviceID)
u, err := url.Parse(cfg.GUI().URL())
if err != nil {
return err
@@ -593,47 +548,13 @@ func upgradeViaRest() error {
}
func syncthingMain(runtimeOptions RuntimeOptions) {
setupSignalHandling()
// Create a main service manager. We'll add things to this as we go along.
// We want any logging it does to go through our log system.
mainService := suture.New("main", suture.Spec{
Log: func(line string) {
l.Debugln(line)
},
PassThroughPanics: true,
})
mainService.ServeBackground()
// Set a log prefix similar to the ID we will have later on, or early log
// lines look ugly.
l.SetPrefix("[start] ")
if runtimeOptions.auditEnabled {
startAuditing(mainService, runtimeOptions.auditFile)
}
if runtimeOptions.verbose {
mainService.Add(newVerboseService())
}
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
// Event subscription for the API; must start early to catch the early
// events. The LocalChangeDetected event might overwhelm the event
// receiver in some situations so we will not subscribe to it here.
defaultSub := events.NewBufferedSubscription(events.Default.Subscribe(api.DefaultEventMask), api.EventSubBufferSize)
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(api.DiskEventMask), api.EventSubBufferSize)
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
// Attempt to increase the limit on number of open files to the maximum
// allowed, in case we have many peers. We don't really care enough to
// report the error if there is one.
osutil.MaximizeOpenFileLimit()
// Print our version information up front, so any crash that happens
// early etc. will have it available.
l.Infoln(build.LongVersion)
// Ensure that we have a certificate and key.
cert, err := tls.LoadX509KeyPair(
@@ -648,190 +569,46 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
tlsDefaultCommonName,
)
if err != nil {
l.Infoln("Failed to generate certificate:", err)
os.Exit(exitError)
l.Warnln("Failed to generate certificate:", err)
os.Exit(1)
}
}
myID := protocol.NewDeviceID(cert.Certificate[0])
myID = protocol.NewDeviceID(cert.Certificate[0])
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
l.Infoln(build.LongVersion)
l.Infoln("My ID:", myID)
// Select SHA256 implementation and report. Affected by the
// STHASHING environment variable.
sha256.SelectAlgo()
sha256.Report()
// Emit the Starting event, now that we know who we are.
events.Default.Log(events.Starting, map[string]string{
"home": locations.GetBaseDir(locations.ConfigBaseDir),
"myID": myID.String(),
})
cfg, err := loadConfigAtStartup(runtimeOptions.allowNewerConfig)
cfg, err := loadConfigAtStartup(runtimeOptions.allowNewerConfig, myID)
if err != nil {
l.Warnln("Failed to initialize config:", err)
os.Exit(exitError)
}
if err := checkShortIDs(cfg); err != nil {
l.Warnln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one of the following:\n ", err)
os.Exit(exitError)
}
if len(runtimeOptions.profiler) > 0 {
go func() {
l.Debugln("Starting profiler on", runtimeOptions.profiler)
runtime.SetBlockProfileRate(1)
err := http.ListenAndServe(runtimeOptions.profiler, nil)
if err != nil {
l.Warnln(err)
os.Exit(exitError)
}
}()
}
perf := ur.CpuBench(3, 150*time.Millisecond, true)
l.Infof("Hashing performance is %.02f MB/s", perf)
dbFile := locations.Get(locations.Database)
ldb, err := db.Open(dbFile)
if err != nil {
l.Warnln("Error opening database:", err)
os.Exit(exitError)
}
if err := db.UpdateSchema(ldb); err != nil {
l.Warnln("Database schema:", err)
os.Exit(exitError)
}
if runtimeOptions.resetDeltaIdxs {
l.Infoln("Reinitializing delta index IDs")
db.DropDeltaIndexIDs(ldb)
}
protectedFiles := []string{
locations.Get(locations.Database),
locations.Get(locations.ConfigFile),
locations.Get(locations.CertFile),
locations.Get(locations.KeyFile),
}
// Remove database entries for folders that no longer exist in the config
folders := cfg.Folders()
for _, folder := range ldb.ListFolders() {
if _, ok := folders[folder]; !ok {
l.Infof("Cleaning data for dropped folder %q", folder)
db.DropFolder(ldb, folder)
}
}
// Grab the previously running version string from the database.
miscDB := db.NewMiscDataNamespace(ldb)
prevVersion, _ := miscDB.String("prevVersion")
// Strip away prerelease/beta stuff and just compare the release
// numbers. 0.14.44 to 0.14.45-banana is an upgrade, 0.14.45-banana to
// 0.14.45-pineapple is not.
prevParts := strings.Split(prevVersion, "-")
curParts := strings.Split(build.Version, "-")
if prevParts[0] != curParts[0] {
if prevVersion != "" {
l.Infoln("Detected upgrade from", prevVersion, "to", build.Version)
}
// Drop delta indexes in case we've changed random stuff we
// shouldn't have. We will resend our index on next connect.
db.DropDeltaIndexIDs(ldb)
// Remember the new version.
miscDB.PutString("prevVersion", build.Version)
}
m := model.NewModel(cfg, myID, "syncthing", build.Version, ldb, protectedFiles)
if t := os.Getenv("STDEADLOCKTIMEOUT"); t != "" {
if secs, _ := strconv.Atoi(t); secs > 0 {
m.StartDeadlockDetector(time.Duration(secs) * time.Second)
}
} else if !build.IsRelease || build.IsBeta {
m.StartDeadlockDetector(20 * time.Minute)
}
if runtimeOptions.unpaused {
setPauseState(cfg, false)
} else if runtimeOptions.paused {
setPauseState(cfg, true)
}
// Add and start folders
for _, folderCfg := range cfg.Folders() {
if folderCfg.Paused {
folderCfg.CreateRoot()
continue
}
m.AddFolder(folderCfg)
m.StartFolder(folderCfg.ID)
dbFile := locations.Get(locations.Database)
ldb, err := syncthing.OpenGoleveldb(dbFile)
if err != nil {
l.Warnln("Error opening database:", err)
os.Exit(1)
}
mainService.Add(m)
// Start discovery
cachedDiscovery := discover.NewCachingMux()
mainService.Add(cachedDiscovery)
// The TLS configuration is used for both the listening socket and outgoing
// connections.
tlsCfg := tlsutil.SecureDefault()
tlsCfg.Certificates = []tls.Certificate{cert}
tlsCfg.NextProtos = []string{bepProtocolName}
tlsCfg.ClientAuth = tls.RequestClientCert
tlsCfg.SessionTicketsDisabled = true
tlsCfg.InsecureSkipVerify = true
// Start connection management
connectionsService := connections.NewService(cfg, myID, m, tlsCfg, cachedDiscovery, bepProtocolName, tlsDefaultCommonName)
mainService.Add(connectionsService)
if cfg.Options().GlobalAnnEnabled {
for _, srv := range cfg.GlobalDiscoveryServers() {
l.Infoln("Using discovery server", srv)
gd, err := discover.NewGlobal(srv, cert, connectionsService)
if err != nil {
l.Warnln("Global discovery:", err)
continue
}
// Each global discovery server gets its results cached for five
// minutes, and is not asked again for a minute when it's returned
// unsuccessfully.
cachedDiscovery.Add(gd, 5*time.Minute, time.Minute)
}
appOpts := runtimeOptions.Options
if runtimeOptions.auditEnabled {
appOpts.AuditWriter = auditWriter(runtimeOptions.auditFile)
}
if t := os.Getenv("STDEADLOCKTIMEOUT"); t != "" {
secs, _ := strconv.Atoi(t)
appOpts.DeadlockTimeoutS = secs
}
if cfg.Options().LocalAnnEnabled {
// v4 broadcasts
bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), connectionsService)
if err != nil {
l.Warnln("IPv4 local discovery:", err)
} else {
cachedDiscovery.Add(bcd, 0, 0)
}
// v6 multicasts
mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, connectionsService)
if err != nil {
l.Warnln("IPv6 local discovery:", err)
} else {
cachedDiscovery.Add(mcd, 0, 0)
}
app := syncthing.New(cfg, ldb, cert, appOpts)
setupSignalHandling(app)
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if runtimeOptions.cpuProfile {
@@ -846,49 +623,15 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
}
}
// Candidate builds always run with usage reporting.
if opts := cfg.Options(); build.IsCandidate {
l.Infoln("Anonymous usage reporting is always enabled for candidate releases.")
if opts.URAccepted != ur.Version {
opts.URAccepted = ur.Version
cfg.SetOptions(opts)
cfg.Save()
// Unique ID will be set and config saved below if necessary.
}
}
// If we are going to do usage reporting, ensure we have a valid unique ID.
if opts := cfg.Options(); opts.URAccepted > 0 && opts.URUniqueID == "" {
opts.URUniqueID = rand.String(8)
cfg.SetOptions(opts)
cfg.Save()
}
usageReportingSvc := ur.New(cfg, m, connectionsService, noUpgradeFromEnv)
mainService.Add(usageReportingSvc)
// GUI
setupGUI(mainService, cfg, m, defaultSub, diskSub, cachedDiscovery, connectionsService, usageReportingSvc, errors, systemLog, runtimeOptions)
myDev, _ := cfg.Device(myID)
l.Infof(`My name is "%v"`, myDev.Name)
for _, device := range cfg.Devices() {
if device.DeviceID != myID {
l.Infof(`Device %s is "%v" at %v`, device.DeviceID, device.Name, device.Addresses)
}
}
if opts := cfg.Options(); opts.RestartOnWakeup {
go standbyMonitor()
go standbyMonitor(app)
}
// Candidate builds should auto upgrade. Make sure the option is set,
// unless we are in a build where it's disabled or the STNOUPGRADE
// environment variable is set.
if build.IsCandidate && !upgrade.DisabledByCompilation && !noUpgradeFromEnv {
if build.IsCandidate && !upgrade.DisabledByCompilation && !runtimeOptions.NoUpgrade {
l.Infoln("Automatic upgrade is always enabled for candidate releases.")
if opts := cfg.Options(); opts.AutoUpgradeIntervalH == 0 || opts.AutoUpgradeIntervalH > 24 {
opts.AutoUpgradeIntervalH = 12
@@ -902,54 +645,33 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
}
if opts := cfg.Options(); opts.AutoUpgradeIntervalH > 0 {
if noUpgradeFromEnv {
if runtimeOptions.NoUpgrade {
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
} else {
go autoUpgrade(cfg)
go autoUpgrade(cfg, app)
}
}
if isSuperUser() {
l.Warnln("Syncthing should not run as a privileged or system user. Please consider using a normal user account.")
}
events.Default.Log(events.StartupComplete, map[string]string{
"myID": myID.String(),
})
app.Start()
cleanConfigDirectory()
if cfg.Options().SetLowPriority {
if err := osutil.SetLowPriority(); err != nil {
l.Warnln("Failed to lower process priority:", err)
}
if cfg.Options().StartBrowser && !runtimeOptions.noBrowser && !runtimeOptions.stRestarting {
// Can potentially block if the utility we are invoking doesn't
// fork, and just execs, hence keep it in its own routine.
go func() { _ = openURL(cfg.GUI().URL()) }()
}
code := exit.waitForExit()
mainService.Stop()
done := make(chan struct{})
go func() {
ldb.Close()
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Second):
l.Warnln("Database failed to stop within 10s")
}
l.Infoln("Exiting")
status := app.Wait()
if runtimeOptions.cpuProfile {
pprof.StopCPUProfile()
}
os.Exit(code)
os.Exit(int(status))
}
func setupSignalHandling() {
func setupSignalHandling(app *syncthing.App) {
// Exit cleanly with "restarting" code on SIGHUP.
restartSign := make(chan os.Signal, 1)
@@ -957,7 +679,7 @@ func setupSignalHandling() {
signal.Notify(restartSign, sigHup)
go func() {
<-restartSign
exit.Restart()
app.Stop(syncthing.ExitRestart)
}()
// Exit with "success" code (no restart) on INT/TERM
@@ -967,26 +689,26 @@ func setupSignalHandling() {
signal.Notify(stopSign, os.Interrupt, sigTerm)
go func() {
<-stopSign
exit.Shutdown()
app.Stop(syncthing.ExitSuccess)
}()
}
func loadOrDefaultConfig() (config.Wrapper, error) {
func loadOrDefaultConfig(myID protocol.DeviceID) (config.Wrapper, error) {
cfgFile := locations.Get(locations.ConfigFile)
cfg, err := config.Load(cfgFile, myID)
if err != nil {
cfg, err = defaultConfig(cfgFile)
cfg, err = defaultConfig(cfgFile, myID)
}
return cfg, err
}
func loadConfigAtStartup(allowNewerConfig bool) (config.Wrapper, error) {
func loadConfigAtStartup(allowNewerConfig bool, myID protocol.DeviceID) (config.Wrapper, error) {
cfgFile := locations.Get(locations.ConfigFile)
cfg, err := config.Load(cfgFile, myID)
if os.IsNotExist(err) {
cfg, err = defaultConfig(cfgFile)
cfg, err = defaultConfig(cfgFile, myID)
if err != nil {
return nil, errors.Wrap(err, "failed to generate default config")
}
@@ -1044,8 +766,7 @@ func copyFile(src, dst string) error {
return nil
}
func startAuditing(mainService *suture.Supervisor, auditFile string) {
func auditWriter(auditFile string) io.Writer {
var fd io.Writer
var err error
var auditDest string
@@ -1072,49 +793,12 @@ func startAuditing(mainService *suture.Supervisor, auditFile string) {
auditDest = auditFile
}
auditService := newAuditService(fd)
mainService.Add(auditService)
// We wait for the audit service to fully start before we return, to
// ensure we capture all events from the start.
auditService.WaitForStart()
l.Infoln("Audit log in", auditDest)
return fd
}
func setupGUI(mainService *suture.Supervisor, cfg config.Wrapper, m model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connections.Service, urService *ur.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
guiCfg := cfg.GUI()
if !guiCfg.Enabled {
return
}
if guiCfg.InsecureAdminAccess {
l.Warnln("Insecure admin access is enabled.")
}
cpu := newCPUService()
mainService.Add(cpu)
summaryService := model.NewFolderSummaryService(cfg, m, myID)
mainService.Add(summaryService)
apiSvc := api.New(myID, cfg, runtimeOptions.assetDir, tlsDefaultCommonName, m, defaultSub, diskSub, discoverer, connectionsService, urService, summaryService, errors, systemLog, cpu, exit, noUpgradeFromEnv)
mainService.Add(apiSvc)
if err := apiSvc.WaitForStart(); err != nil {
l.Warnln("Failed starting API:", err)
os.Exit(exitError)
}
if cfg.Options().StartBrowser && !runtimeOptions.noBrowser && !runtimeOptions.stRestarting {
// Can potentially block if the utility we are invoking doesn't
// fork, and just execs, hence keep it in its own routine.
go func() { _ = openURL(guiCfg.URL()) }()
}
}
func defaultConfig(cfgFile string) (config.Wrapper, error) {
func defaultConfig(cfgFile string, myID protocol.DeviceID) (config.Wrapper, error) {
newCfg, err := config.NewWithFreePorts(myID)
if err != nil {
return nil, err
@@ -1157,7 +841,7 @@ func ensureDir(dir string, mode fs.FileMode) error {
return nil
}
func standbyMonitor() {
func standbyMonitor(app *syncthing.App) {
restartDelay := 60 * time.Second
now := time.Now()
for {
@@ -1170,14 +854,14 @@ func standbyMonitor() {
// things a moment to stabilize.
time.Sleep(restartDelay)
exit.Restart()
app.Stop(syncthing.ExitRestart)
return
}
now = time.Now()
}
}
func autoUpgrade(cfg config.Wrapper) {
func autoUpgrade(cfg config.Wrapper, app *syncthing.App) {
timer := time.NewTimer(0)
sub := events.Default.Subscribe(events.DeviceConnected)
for {
@@ -1228,7 +912,7 @@ func autoUpgrade(cfg config.Wrapper) {
events.Default.Unsubscribe(sub)
l.Warnf("Automatically upgraded to version %q. Restarting in 1 minute.", rel.Tag)
time.Sleep(time.Minute)
exit.ExitUpgrading()
app.Stop(syncthing.ExitUpgrade)
return
}
}
@@ -1276,28 +960,13 @@ func cleanConfigDirectory() {
}
}
// checkShortIDs verifies that the configuration won't result in duplicate
// short ID:s; that is, that the devices in the cluster all have unique
// initial 64 bits.
func checkShortIDs(cfg config.Wrapper) error {
exists := make(map[protocol.ShortID]protocol.DeviceID)
for deviceID := range cfg.Devices() {
shortID := deviceID.Short()
if otherID, ok := exists[shortID]; ok {
return fmt.Errorf("%v in conflict with %v", deviceID, otherID)
}
exists[shortID] = deviceID
}
return nil
}
func showPaths(options RuntimeOptions) {
fmt.Printf("Configuration file:\n\t%s\n\n", locations.Get(locations.ConfigFile))
fmt.Printf("Database directory:\n\t%s\n\n", locations.Get(locations.Database))
fmt.Printf("Device private key & certificate files:\n\t%s\n\t%s\n\n", locations.Get(locations.KeyFile), locations.Get(locations.CertFile))
fmt.Printf("HTTPS private key & certificate files:\n\t%s\n\t%s\n\n", locations.Get(locations.HTTPSKeyFile), locations.Get(locations.HTTPSCertFile))
fmt.Printf("Log file:\n\t%s\n\n", options.logFile)
fmt.Printf("GUI override directory:\n\t%s\n\n", options.assetDir)
fmt.Printf("GUI override directory:\n\t%s\n\n", options.AssetDir)
fmt.Printf("Default sync folder directory:\n\t%s\n\n", locations.Get(locations.DefFolder))
}

View File

@@ -20,6 +20,7 @@ import (
"github.com/syncthing/syncthing/lib/locations"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
@@ -448,7 +449,7 @@ func childEnv() []string {
// panicUploadMaxWait uploading panics...
func maybeReportPanics() {
// Try to get a config to see if/where panics should be reported.
cfg, err := loadOrDefaultConfig()
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID)
if err != nil {
l.Warnln("Couldn't load config; not reporting crash")
return

10
go.mod
View File

@@ -19,9 +19,9 @@ require (
github.com/jackpal/gateway v0.0.0-20161225004348-5795ac81146e
github.com/kballard/go-shellquote v0.0.0-20170619183022-cd60e84ee657
github.com/kr/pretty v0.1.0 // indirect
github.com/lib/pq v1.1.1
github.com/lib/pq v1.2.0
github.com/lucas-clemente/quic-go v0.11.2
github.com/maruel/panicparse v1.2.1
github.com/maruel/panicparse v1.3.0
github.com/mattn/go-isatty v0.0.7
github.com/minio/sha256-simd v0.0.0-20190117184323-cc1980cb0338
github.com/onsi/ginkgo v1.8.0 // indirect
@@ -33,14 +33,14 @@ require (
github.com/prometheus/client_golang v0.9.4
github.com/rcrowley/go-metrics v0.0.0-20171128170426-e181e095bae9
github.com/sasha-s/go-deadlock v0.2.0
github.com/syncthing/notify v0.0.0-20181107104724-4e389ea6c0d8
github.com/syncthing/notify v0.0.0-20190709140112-69c7a957d3e2
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/thejerf/suture v3.0.2+incompatible
github.com/urfave/cli v1.20.0
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980
golang.org/x/sys v0.0.0-20190613124609-5ed2794edfdc // indirect
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect
golang.org/x/text v0.3.2
golang.org/x/time v0.0.0-20170927054726-6dc17368e09b
gopkg.in/asn1-ber.v1 v1.0.0-20170511165959-379148ca0225 // indirect
@@ -48,3 +48,5 @@ require (
gopkg.in/ldap.v2 v2.5.1
gopkg.in/yaml.v2 v2.2.2 // indirect
)
go 1.12

16
go.sum
View File

@@ -1,7 +1,5 @@
github.com/AudriusButkevicius/go-nat-pmp v0.0.0-20160522074932-452c97607362 h1:l4qGIzSY0WhdXdR74XMYAtfc0Ri/RJVM4p6x/E/+WkA=
github.com/AudriusButkevicius/go-nat-pmp v0.0.0-20160522074932-452c97607362/go.mod h1:CEaBhA5lh1spxbPOELh5wNLKGsVQoahjUhVrJViVK8s=
github.com/AudriusButkevicius/pfilter v0.0.0-20190525131515-730b0de4d4de h1:w1VG0ehgPh2ucQGO7wL9TBmHLzMo4dduYwyp2lhs8+A=
github.com/AudriusButkevicius/pfilter v0.0.0-20190525131515-730b0de4d4de/go.mod h1:1N0EEx/irz4B1qV17wW82TFbjQrE7oX316Cki6eDY0Q=
github.com/AudriusButkevicius/pfilter v0.0.0-20190627213056-c55ef6137fc6 h1:Apvc4kyfdrOxG+F5dn8osz+45kwGJa6CySQn0tB38SU=
github.com/AudriusButkevicius/pfilter v0.0.0-20190627213056-c55ef6137fc6/go.mod h1:1N0EEx/irz4B1qV17wW82TFbjQrE7oX316Cki6eDY0Q=
github.com/AudriusButkevicius/recli v0.0.5 h1:xUa55PvWTHBm17T6RvjElRO3y5tALpdceH86vhzQ5wg=
@@ -74,18 +72,24 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lucas-clemente/quic-go v0.11.2 h1:Mop0ac3zALaBR3wGs6j8OYe/tcFvFsxTUFMkE/7yUOI=
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
github.com/marten-seemann/qtls v0.2.3 h1:0yWJ43C62LsZt08vuQJDK1uC1czUc3FJeCLPoNAI4vA=
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
github.com/maruel/panicparse v1.2.1 h1:mNlHGiakrixj+AwF/qRpTwnj+zsWYPRLQ7wRqnJsfO0=
github.com/maruel/panicparse v1.2.1/go.mod h1:vszMjr5QQ4F5FSRfraldcIA/BCw5xrdLL+zEcU2nRBs=
github.com/maruel/panicparse v1.3.0 h1:1Ep/RaYoSL1r5rTILHQQbyzHG8T4UP5ZbQTYTo4bdDc=
github.com/maruel/panicparse v1.3.0/go.mod h1:vszMjr5QQ4F5FSRfraldcIA/BCw5xrdLL+zEcU2nRBs=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/minio/sha256-simd v0.0.0-20190117184323-cc1980cb0338 h1:USW1+zAUkUSvk097CAX/i8KR3r6f+DHNhk6Xe025Oyw=
github.com/minio/sha256-simd v0.0.0-20190117184323-cc1980cb0338/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
@@ -135,8 +139,8 @@ github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syncthing/notify v0.0.0-20181107104724-4e389ea6c0d8 h1:ewsMW/a4xDpqHyIteoD29ayMn6GdkFZc2T0PX2K6PAg=
github.com/syncthing/notify v0.0.0-20181107104724-4e389ea6c0d8/go.mod h1:Sn4ChoS7e4FxjCN1XHPVBT43AgnRLbuaB8pEc1Zcdjg=
github.com/syncthing/notify v0.0.0-20190709140112-69c7a957d3e2 h1:6tuEEEpg+mxM82E0YingzoXzXXISYR/o/7I9n573LWI=
github.com/syncthing/notify v0.0.0-20190709140112-69c7a957d3e2/go.mod h1:Sn4ChoS7e4FxjCN1XHPVBT43AgnRLbuaB8pEc1Zcdjg=
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/thejerf/suture v3.0.2+incompatible h1:GtMydYcnK4zBJ0KL6Lx9vLzl6Oozb65wh252FTBxrvM=
@@ -171,8 +175,8 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190613124609-5ed2794edfdc h1:x+/QxSNkVFAC+v4pL1f6mZr1z+qgi+FoR8ccXZPVC10=
golang.org/x/sys v0.0.0-20190613124609-5ed2794edfdc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=

View File

@@ -325,7 +325,7 @@
<span ng-switch-when="idle"><span class="hidden-xs" translate>Up to Date</span><span class="visible-xs" aria-label="{{'Up to Date' | translate}}"><i class="fas fa-fw fa-check"></i></span></span>
<span ng-switch-when="syncing">
<span class="hidden-xs" translate>Syncing</span>
<span ng-show="syncRemaining(folder.id)">({{syncPercentage(folder.id) | percent}}, {{syncRemaining(folder.id) | binary}}B)</span>
<span>({{syncPercentage(folder.id) | percent}}, {{model[folder.id].needBytes | binary}}B)</span>
</span>
<span ng-switch-when="outofsync"><span class="hidden-xs" translate>Out of Sync</span><span class="visible-xs" aria-label="{{'Out of Sync' | translate}}"><i class="fas fa-fw fa-exclamation-circle"></i></span></span>
<span ng-switch-when="faileditems"><span class="hidden-xs" translate>Failed Items</span><span class="visible-xs" aria-label="{{'Failed Items' | translate}}"><i class="fas fa-fw fa-exclamation-circle"></i></span></span>

View File

@@ -816,22 +816,6 @@ angular.module('syncthing.core')
return Math.floor(pct);
};
$scope.syncRemaining = function (folder) {
// Remaining sync bytes
if (typeof $scope.model[folder] === 'undefined') {
return 0;
}
if ($scope.model[folder].globalBytes === 0) {
return 0;
}
var bytes = $scope.model[folder].globalBytes - $scope.model[folder].inSyncBytes;
if (isNaN(bytes) || bytes < 0) {
return 0;
}
return bytes;
};
$scope.scanPercentage = function (folder) {
if (!$scope.scanProgress[folder]) {
return undefined;

View File

@@ -94,7 +94,7 @@
<p class="help-block" ng-if="!upgradeInfo">
<span translate>Unavailable/Disabled by administrator or maintainer</span>
</p>
<p class="help-block" ng-if="version.isCandidate"">
<p class="help-block" ng-if="version.isCandidate && upgradeInfo"">
<span translate>Automatic upgrades are always enabled for candidate releases.</span>
</p>
</div>

View File

@@ -28,6 +28,10 @@ import (
"time"
metrics "github.com/rcrowley/go-metrics"
"github.com/thejerf/suture"
"github.com/vitrun/qart/qr"
"golang.org/x/crypto/bcrypt"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/connections"
@@ -44,9 +48,7 @@ import (
"github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syncthing/syncthing/lib/upgrade"
"github.com/syncthing/syncthing/lib/ur"
"github.com/thejerf/suture"
"github.com/vitrun/qart/qr"
"golang.org/x/crypto/bcrypt"
"github.com/syncthing/syncthing/lib/util"
)
// matches a bcrypt hash and not too much else
@@ -60,6 +62,8 @@ const (
)
type service struct {
suture.Service
id protocol.DeviceID
cfg config.Wrapper
statics *staticsServer
@@ -102,7 +106,7 @@ type Service interface {
}
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, cpu Rater, contr Controller, noUpgrade bool) Service {
return &service{
s := &service{
id: id,
cfg: cfg,
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
@@ -123,10 +127,11 @@ func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonNam
contr: contr,
noUpgrade: noUpgrade,
tlsDefaultCommonName: tlsDefaultCommonName,
stop: make(chan struct{}),
configChanged: make(chan struct{}),
startedOnce: make(chan struct{}),
}
s.Service = util.AsService(s.serve)
return s
}
func (s *service) WaitForStart() error {
@@ -190,7 +195,7 @@ func sendJSON(w http.ResponseWriter, jsonObject interface{}) {
fmt.Fprintf(w, "%s\n", bs)
}
func (s *service) Serve() {
func (s *service) serve(stop chan struct{}) {
listener, err := s.getListener(s.cfg.GUI())
if err != nil {
select {
@@ -360,7 +365,7 @@ func (s *service) Serve() {
// Wait for stop, restart or error signals
select {
case <-s.stop:
case <-stop:
// Shutting down permanently
l.Debugln("shutting down (stop)")
case <-s.configChanged:
@@ -378,17 +383,11 @@ func (s *service) Complete() bool {
select {
case <-s.startedOnce:
return s.startupErr != nil
case <-s.stop:
return true
default:
}
return false
}
func (s *service) Stop() {
close(s.stop)
}
func (s *service) String() string {
return fmt.Sprintf("api.service@%p", s)
}

View File

@@ -666,7 +666,10 @@ func TestConfigPostOK(t *testing.T) {
cfg := bytes.NewBuffer([]byte(`{
"version": 15,
"folders": [
{"id": "foo"}
{
"id": "foo",
"path": "TestConfigPostOK"
}
]
}`))
@@ -677,6 +680,7 @@ func TestConfigPostOK(t *testing.T) {
if resp.StatusCode != http.StatusOK {
t.Error("Expected 200 OK, not", resp.Status)
}
os.RemoveAll("TestConfigPostOK")
}
func TestConfigPostDupFolder(t *testing.T) {

View File

@@ -8,7 +8,6 @@ package beacon
import (
"net"
stdsync "sync"
"github.com/thejerf/suture"
)
@@ -24,21 +23,3 @@ type Interface interface {
Recv() ([]byte, net.Addr)
Error() error
}
type errorHolder struct {
err error
mut stdsync.Mutex // uses stdlib sync as I want this to be trivially embeddable, and there is no risk of blocking
}
func (e *errorHolder) setError(err error) {
e.mut.Lock()
e.err = err
e.mut.Unlock()
}
func (e *errorHolder) Error() error {
e.mut.Lock()
err := e.err
e.mut.Unlock()
return err
}

View File

@@ -11,8 +11,9 @@ import (
"net"
"time"
"github.com/syncthing/syncthing/lib/sync"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/util"
)
type Broadcast struct {
@@ -44,16 +45,16 @@ func NewBroadcast(port int) *Broadcast {
}
b.br = &broadcastReader{
port: port,
outbox: b.outbox,
connMut: sync.NewMutex(),
port: port,
outbox: b.outbox,
}
b.br.ServiceWithError = util.AsServiceWithError(b.br.serve)
b.Add(b.br)
b.bw = &broadcastWriter{
port: port,
inbox: b.inbox,
connMut: sync.NewMutex(),
port: port,
inbox: b.inbox,
}
b.bw.ServiceWithError = util.AsServiceWithError(b.bw.serve)
b.Add(b.bw)
return b
@@ -76,34 +77,42 @@ func (b *Broadcast) Error() error {
}
type broadcastWriter struct {
port int
inbox chan []byte
conn *net.UDPConn
connMut sync.Mutex
errorHolder
util.ServiceWithError
port int
inbox chan []byte
}
func (w *broadcastWriter) Serve() {
func (w *broadcastWriter) serve(stop chan struct{}) error {
l.Debugln(w, "starting")
defer l.Debugln(w, "stopping")
conn, err := net.ListenUDP("udp4", nil)
if err != nil {
l.Debugln(err)
w.setError(err)
return
return err
}
defer conn.Close()
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-stop:
case <-done:
}
conn.Close()
}()
w.connMut.Lock()
w.conn = conn
w.connMut.Unlock()
for {
var bs []byte
select {
case bs = <-w.inbox:
case <-stop:
return nil
}
for bs := range w.inbox {
addrs, err := net.InterfaceAddrs()
if err != nil {
l.Debugln(err)
w.setError(err)
w.SetError(err)
continue
}
@@ -134,14 +143,13 @@ func (w *broadcastWriter) Serve() {
// Write timeouts should not happen. We treat it as a fatal
// error on the socket.
l.Debugln(err)
w.setError(err)
return
return err
}
if err != nil {
// Some other error that we don't expect. Debug and continue.
l.Debugln(err)
w.setError(err)
w.SetError(err)
continue
}
@@ -150,57 +158,49 @@ func (w *broadcastWriter) Serve() {
}
if success > 0 {
w.setError(nil)
w.SetError(nil)
}
}
}
func (w *broadcastWriter) Stop() {
w.connMut.Lock()
if w.conn != nil {
w.conn.Close()
}
w.connMut.Unlock()
}
func (w *broadcastWriter) String() string {
return fmt.Sprintf("broadcastWriter@%p", w)
}
type broadcastReader struct {
port int
outbox chan recv
conn *net.UDPConn
connMut sync.Mutex
errorHolder
util.ServiceWithError
port int
outbox chan recv
}
func (r *broadcastReader) Serve() {
func (r *broadcastReader) serve(stop chan struct{}) error {
l.Debugln(r, "starting")
defer l.Debugln(r, "stopping")
conn, err := net.ListenUDP("udp4", &net.UDPAddr{Port: r.port})
if err != nil {
l.Debugln(err)
r.setError(err)
return
return err
}
defer conn.Close()
r.connMut.Lock()
r.conn = conn
r.connMut.Unlock()
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-stop:
case <-done:
}
conn.Close()
}()
bs := make([]byte, 65536)
for {
n, addr, err := conn.ReadFrom(bs)
if err != nil {
l.Debugln(err)
r.setError(err)
return
return err
}
r.setError(nil)
r.SetError(nil)
l.Debugf("recv %d bytes from %s", n, addr)
@@ -208,19 +208,12 @@ func (r *broadcastReader) Serve() {
copy(c, bs)
select {
case r.outbox <- recv{c, addr}:
case <-stop:
return nil
default:
l.Debugln("dropping message")
}
}
}
func (r *broadcastReader) Stop() {
r.connMut.Lock()
if r.conn != nil {
r.conn.Close()
}
r.connMut.Unlock()
}
func (r *broadcastReader) String() string {

View File

@@ -14,6 +14,8 @@ import (
"github.com/thejerf/suture"
"golang.org/x/net/ipv6"
"github.com/syncthing/syncthing/lib/util"
)
type Multicast struct {
@@ -45,15 +47,15 @@ func NewMulticast(addr string) *Multicast {
m.mr = &multicastReader{
addr: addr,
outbox: m.outbox,
stop: make(chan struct{}),
}
m.mr.ServiceWithError = util.AsServiceWithError(m.mr.serve)
m.Add(m.mr)
m.mw = &multicastWriter{
addr: addr,
inbox: m.inbox,
stop: make(chan struct{}),
}
m.mw.ServiceWithError = util.AsServiceWithError(m.mw.serve)
m.Add(m.mw)
return m
@@ -76,29 +78,35 @@ func (m *Multicast) Error() error {
}
type multicastWriter struct {
util.ServiceWithError
addr string
inbox <-chan []byte
errorHolder
stop chan struct{}
}
func (w *multicastWriter) Serve() {
func (w *multicastWriter) serve(stop chan struct{}) error {
l.Debugln(w, "starting")
defer l.Debugln(w, "stopping")
gaddr, err := net.ResolveUDPAddr("udp6", w.addr)
if err != nil {
l.Debugln(err)
w.setError(err)
return
return err
}
conn, err := net.ListenPacket("udp6", ":0")
if err != nil {
l.Debugln(err)
w.setError(err)
return
return err
}
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-stop:
case <-done:
}
conn.Close()
}()
pconn := ipv6.NewPacketConn(conn)
@@ -106,12 +114,18 @@ func (w *multicastWriter) Serve() {
HopLimit: 1,
}
for bs := range w.inbox {
for {
var bs []byte
select {
case bs = <-w.inbox:
case <-stop:
return nil
}
intfs, err := net.Interfaces()
if err != nil {
l.Debugln(err)
w.setError(err)
return
return err
}
success := 0
@@ -123,62 +137,66 @@ func (w *multicastWriter) Serve() {
if err != nil {
l.Debugln(err, "on write to", gaddr, intf.Name)
w.setError(err)
w.SetError(err)
continue
}
l.Debugf("sent %d bytes to %v on %s", len(bs), gaddr, intf.Name)
success++
select {
case <-stop:
return nil
default:
}
}
if success > 0 {
w.setError(nil)
} else {
l.Debugln(err)
w.setError(err)
w.SetError(nil)
}
}
}
func (w *multicastWriter) Stop() {
close(w.stop)
}
func (w *multicastWriter) String() string {
return fmt.Sprintf("multicastWriter@%p", w)
}
type multicastReader struct {
util.ServiceWithError
addr string
outbox chan<- recv
errorHolder
stop chan struct{}
}
func (r *multicastReader) Serve() {
func (r *multicastReader) serve(stop chan struct{}) error {
l.Debugln(r, "starting")
defer l.Debugln(r, "stopping")
gaddr, err := net.ResolveUDPAddr("udp6", r.addr)
if err != nil {
l.Debugln(err)
r.setError(err)
return
return err
}
conn, err := net.ListenPacket("udp6", r.addr)
if err != nil {
l.Debugln(err)
r.setError(err)
return
return err
}
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-stop:
case <-done:
}
conn.Close()
}()
intfs, err := net.Interfaces()
if err != nil {
l.Debugln(err)
r.setError(err)
return
return err
}
pconn := ipv6.NewPacketConn(conn)
@@ -195,16 +213,20 @@ func (r *multicastReader) Serve() {
if joined == 0 {
l.Debugln("no multicast interfaces available")
r.setError(errors.New("no multicast interfaces available"))
return
return errors.New("no multicast interfaces available")
}
bs := make([]byte, 65536)
for {
select {
case <-stop:
return nil
default:
}
n, _, addr, err := pconn.ReadFrom(bs)
if err != nil {
l.Debugln(err)
r.setError(err)
r.SetError(err)
continue
}
l.Debugf("recv %d bytes from %s", n, addr)
@@ -219,10 +241,6 @@ func (r *multicastReader) Serve() {
}
}
func (r *multicastReader) Stop() {
close(r.stop)
}
func (r *multicastReader) String() string {
return fmt.Sprintf("multicastReader@%p", r)
}

View File

@@ -10,6 +10,7 @@ package config
import (
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -91,6 +92,12 @@ var (
}
)
var (
errFolderIDEmpty = errors.New("folder has empty ID")
errFolderIDDuplicate = errors.New("folder has duplicate ID")
errFolderPathEmpty = errors.New("folder has empty path")
)
func New(myID protocol.DeviceID) Configuration {
var cfg Configuration
cfg.Version = CurrentVersion
@@ -263,6 +270,16 @@ found:
func (cfg *Configuration) clean() error {
util.FillNilSlices(&cfg.Options)
// Ensure that the device list is
// - free from duplicates
// - no devices with empty ID
// - sorted by ID
// Happen before preparting folders as that needs a correct device list.
cfg.Devices = ensureNoDuplicateOrEmptyIDDevices(cfg.Devices)
sort.Slice(cfg.Devices, func(a, b int) bool {
return cfg.Devices[a].DeviceID.Compare(cfg.Devices[b].DeviceID) == -1
})
// Prepare folders and check for duplicates. Duplicates are bad and
// dangerous, can't currently be resolved in the GUI, and shouldn't
// happen when configured by the GUI. We return with an error in that
@@ -273,12 +290,17 @@ func (cfg *Configuration) clean() error {
folder.prepare()
if folder.ID == "" {
return fmt.Errorf("folder with empty ID in configuration")
return errFolderIDEmpty
}
if folder.Path == "" {
return fmt.Errorf("folder %q: %v", folder.ID, errFolderPathEmpty)
}
if _, ok := existingFolders[folder.ID]; ok {
return fmt.Errorf("duplicate folder ID %q in configuration", folder.ID)
return fmt.Errorf("folder %q: %v", folder.ID, errFolderIDDuplicate)
}
existingFolders[folder.ID] = folder
}
@@ -298,14 +320,6 @@ func (cfg *Configuration) clean() error {
existingDevices[device.DeviceID] = true
}
// Ensure that the device list is
// - free from duplicates
// - sorted by ID
cfg.Devices = ensureNoDuplicateDevices(cfg.Devices)
sort.Slice(cfg.Devices, func(a, b int) bool {
return cfg.Devices[a].DeviceID.Compare(cfg.Devices[b].DeviceID) == -1
})
// Ensure that the folder list is sorted by ID
sort.Slice(cfg.Folders, func(a, b int) bool {
return cfg.Folders[a].ID < cfg.Folders[b].ID
@@ -464,14 +478,14 @@ loop:
return devices[0:count]
}
func ensureNoDuplicateDevices(devices []DeviceConfiguration) []DeviceConfiguration {
func ensureNoDuplicateOrEmptyIDDevices(devices []DeviceConfiguration) []DeviceConfiguration {
count := len(devices)
i := 0
seenDevices := make(map[protocol.DeviceID]bool)
loop:
for i < count {
id := devices[i].DeviceID
if _, ok := seenDevices[id]; ok {
if _, ok := seenDevices[id]; ok || id == protocol.EmptyDeviceID {
devices[i] = devices[count-1]
count--
continue loop

View File

@@ -711,23 +711,19 @@ func TestDuplicateFolders(t *testing.T) {
// Duplicate folders are a loading error
_, err := Load("testdata/dupfolders.xml", device1)
if err == nil || !strings.HasPrefix(err.Error(), "duplicate folder ID") {
if err == nil || !strings.Contains(err.Error(), errFolderIDDuplicate.Error()) {
t.Fatal(`Expected error to mention "duplicate folder ID":`, err)
}
}
func TestEmptyFolderPaths(t *testing.T) {
// Empty folder paths are allowed at the loading stage, and should not
// Empty folder paths are not allowed at the loading stage, and should not
// get messed up by the prepare steps (e.g., become the current dir or
// get a slash added so that it becomes the root directory or similar).
wrapper, err := Load("testdata/nopath.xml", device1)
if err != nil {
t.Fatal(err)
}
folder := wrapper.Folders()["f1"]
if folder.cachedFilesystem != nil {
t.Errorf("Expected %q to be empty", folder.cachedFilesystem)
_, err := Load("testdata/nopath.xml", device1)
if err == nil || !strings.Contains(err.Error(), errFolderPathEmpty.Error()) {
t.Fatal("Expected error due to empty folder path, got", err)
}
}
@@ -929,6 +925,7 @@ func TestIssue4219(t *testing.T) {
"folders": [
{
"id": "abcd123",
"path": "testdata",
"devices":[
{"deviceID": "GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY"}
]
@@ -1103,6 +1100,32 @@ func TestDeviceConfigObservedNotNil(t *testing.T) {
}
}
func TestRemoveDeviceWithEmptyID(t *testing.T) {
cfg := Configuration{
Devices: []DeviceConfiguration{
{
Name: "foo",
},
},
Folders: []FolderConfiguration{
{
ID: "foo",
Path: "testdata",
Devices: []FolderDeviceConfiguration{{}},
},
},
}
cfg.clean()
if len(cfg.Devices) != 0 {
t.Error("Expected device with empty ID to be removed from config:", cfg.Devices)
}
if len(cfg.Folders[0].Devices) != 0 {
t.Error("Expected device with empty ID to be removed from folder")
}
}
// defaultConfigAsMap returns a valid default config as a JSON-decoded
// map[string]interface{}. This is useful to override random elements and
// re-encode into JSON.

View File

@@ -92,7 +92,7 @@ func (f FolderConfiguration) Copy() FolderConfiguration {
func (f FolderConfiguration) Filesystem() fs.Filesystem {
// This is intentionally not a pointer method, because things like
// cfg.Folders["default"].Filesystem() should be valid.
if f.cachedFilesystem == nil && f.Path != "" {
if f.cachedFilesystem == nil {
l.Infoln("bug: uncached filesystem call (should only happen in tests)")
return fs.NewFilesystem(f.FilesystemType, f.Path)
}
@@ -209,9 +209,7 @@ func (f *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
}
func (f *FolderConfiguration) prepare() {
if f.Path != "" {
f.cachedFilesystem = fs.NewFilesystem(f.FilesystemType, f.Path)
}
f.cachedFilesystem = fs.NewFilesystem(f.FilesystemType, f.Path)
if f.RescanIntervalS > MaxRescanIntervalS {
f.RescanIntervalS = MaxRescanIntervalS

View File

@@ -15,7 +15,7 @@
<!-- duplicate, will be removed -->
<address>192.0.2.5</address>
</device>
<folder id="f2" directory="testdata/">
<folder id="f2" path="testdata/">
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
<device id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA"></device>
<!-- duplicate device, will be removed -->

View File

@@ -1,6 +1,6 @@
<configuration version="15">
<folder id="f1" directory="testdata/">
<folder id="f1" path="testdata/">
</folder>
<folder id="f1" directory="testdata/">
<folder id="f1" path="testdata/">
</folder>
</configuration>

View File

@@ -8,7 +8,7 @@
<ignoredFolder id="folder1"/>
<ignoredFolder id="folder2"/>
</device>
<folder id="folder1" directory="testdata/">
<folder id="folder1" path="testdata/">
<device id="GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY"></device>
</folder>
</configuration>

View File

@@ -1,25 +1,25 @@
<configuration version="10">
<folder id="f1" directory="testdata/">
<folder id="f1" path="testdata/">
</folder>
<folder id="f2" directory="testdata/">
<folder id="f2" path="testdata/">
<order>random</order>
</folder>
<folder id="f3" directory="testdata/">
<folder id="f3" path="testdata/">
<order>alphabetic</order>
</folder>
<folder id="f4" directory="testdata/">
<folder id="f4" path="testdata/">
<order>whatever</order>
</folder>
<folder id="f5" directory="testdata/">
<folder id="f5" path="testdata/">
<order>smallestFirst</order>
</folder>
<folder id="f6" directory="testdata/">
<folder id="f6" path="testdata/">
<order>largestFirst</order>
</folder>
<folder id="f7" directory="testdata/">
<folder id="f7" path="testdata/">
<order>oldestFirst</order>
</folder>
<folder id="f8" directory="testdata/">
<folder id="f8" path="testdata/">
<order>newestFirst</order>
</folder>
</configuration>

View File

@@ -23,6 +23,7 @@ import (
"github.com/syncthing/syncthing/lib/connections/registry"
"github.com/syncthing/syncthing/lib/nat"
"github.com/syncthing/syncthing/lib/stun"
"github.com/syncthing/syncthing/lib/util"
)
func init() {
@@ -33,6 +34,7 @@ func init() {
}
type quicListener struct {
util.ServiceWithError
nat atomic.Value
onAddressesChangedNotifier
@@ -40,12 +42,10 @@ type quicListener struct {
uri *url.URL
cfg config.Wrapper
tlsCfg *tls.Config
stop chan struct{}
conns chan internalConn
factory listenerFactory
address *url.URL
err error
mut sync.Mutex
}
@@ -77,20 +77,13 @@ func (t *quicListener) OnExternalAddressChanged(address *stun.Host, via string)
}
}
func (t *quicListener) Serve() {
t.mut.Lock()
t.err = nil
t.mut.Unlock()
func (t *quicListener) serve(stop chan struct{}) error {
network := strings.Replace(t.uri.Scheme, "quic", "udp", -1)
packetConn, err := net.ListenPacket(network, t.uri.Host)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/quic):", err)
return
return err
}
defer func() { _ = packetConn.Close() }()
@@ -105,11 +98,8 @@ func (t *quicListener) Serve() {
listener, err := quic.Listen(conn, t.tlsCfg, quicConfig)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/quic):", err)
return
return err
}
l.Infof("QUIC listener (%v) starting", packetConn.LocalAddr())
@@ -118,7 +108,7 @@ func (t *quicListener) Serve() {
// Accept is forever, so handle stops externally.
go func() {
select {
case <-t.stop:
case <-stop:
_ = listener.Close()
}
}()
@@ -128,11 +118,11 @@ func (t *quicListener) Serve() {
session, err := listener.Accept()
select {
case <-t.stop:
case <-stop:
if err == nil {
_ = session.Close()
}
return
return nil
default:
}
if err != nil {
@@ -150,7 +140,7 @@ func (t *quicListener) Serve() {
select {
case <-ok:
return
case <-t.stop:
case <-stop:
_ = session.Close()
case <-time.After(10 * time.Second):
l.Debugln("timed out waiting for AcceptStream on", session.RemoteAddr())
@@ -170,10 +160,6 @@ func (t *quicListener) Serve() {
}
}
func (t *quicListener) Stop() {
close(t.stop)
}
func (t *quicListener) URI() *url.URL {
return t.uri
}
@@ -192,13 +178,6 @@ func (t *quicListener) LANAddresses() []*url.URL {
return []*url.URL{t.uri}
}
func (t *quicListener) Error() error {
t.mut.Lock()
err := t.err
t.mut.Unlock()
return err
}
func (t *quicListener) String() string {
return t.uri.String()
}
@@ -227,9 +206,9 @@ func (f *quicListenerFactory) New(uri *url.URL, cfg config.Wrapper, tlsCfg *tls.
cfg: cfg,
tlsCfg: tlsCfg,
conns: conns,
stop: make(chan struct{}),
factory: f,
}
l.ServiceWithError = util.AsServiceWithError(l.serve)
l.nat.Store(stun.NATUnknown)
return l
}

View File

@@ -184,16 +184,22 @@ func NewService(cfg config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *t
// the common handling regardless of whether the connection was
// incoming or outgoing.
service.Add(serviceFunc(service.connect))
service.Add(serviceFunc(service.handle))
service.Add(util.AsService(service.connect))
service.Add(util.AsService(service.handle))
service.Add(service.listenerSupervisor)
return service
}
func (s *service) handle() {
next:
for c := range s.conns {
func (s *service) handle(stop chan struct{}) {
var c internalConn
for {
select {
case <-stop:
return
case c = <-s.conns:
}
cs := c.ConnectionState()
// We should have negotiated the next level protocol "bep/1.0" as part
@@ -298,7 +304,7 @@ next:
// config. Warn instead of Info.
l.Warnf("Bad certificate from %s at %s: %v", remoteID, c, err)
c.Close()
continue next
continue
}
// Wrap the connection in rate limiters. The limiter itself will
@@ -313,11 +319,11 @@ next:
l.Infof("Established secure connection to %s at %s", remoteID, c)
s.model.AddConnection(modelConn, hello)
continue next
continue
}
}
func (s *service) connect() {
func (s *service) connect(stop chan struct{}) {
nextDial := make(map[string]time.Time)
// Used as delay for the first few connection attempts, increases
@@ -465,11 +471,16 @@ func (s *service) connect() {
if initialRampup < sleep {
l.Debugln("initial rampup; sleep", initialRampup, "and update to", initialRampup*2)
time.Sleep(initialRampup)
sleep = initialRampup
initialRampup *= 2
} else {
l.Debugln("sleep until next dial", sleep)
time.Sleep(sleep)
}
select {
case <-time.After(sleep):
case <-stop:
return
}
}
}

View File

@@ -191,13 +191,6 @@ type Model interface {
GetHello(protocol.DeviceID) protocol.HelloIntf
}
// serviceFunc wraps a function to create a suture.Service without stop
// functionality.
type serviceFunc func()
func (f serviceFunc) Serve() { f() }
func (f serviceFunc) Stop() {}
type onAddressesChangedNotifier struct {
callbacks []func(genericListener)
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/dialer"
"github.com/syncthing/syncthing/lib/nat"
"github.com/syncthing/syncthing/lib/util"
)
func init() {
@@ -26,43 +27,32 @@ func init() {
}
type tcpListener struct {
util.ServiceWithError
onAddressesChangedNotifier
uri *url.URL
cfg config.Wrapper
tlsCfg *tls.Config
stop chan struct{}
conns chan internalConn
factory listenerFactory
natService *nat.Service
mapping *nat.Mapping
err error
mut sync.RWMutex
}
func (t *tcpListener) Serve() {
t.mut.Lock()
t.err = nil
t.mut.Unlock()
func (t *tcpListener) serve(stop chan struct{}) error {
tcaddr, err := net.ResolveTCPAddr(t.uri.Scheme, t.uri.Host)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/tcp):", err)
return
return err
}
listener, err := net.ListenTCP(t.uri.Scheme, tcaddr)
if err != nil {
t.mut.Lock()
t.err = err
t.mut.Unlock()
l.Infoln("Listen (BEP/tcp):", err)
return
return err
}
defer listener.Close()
@@ -86,14 +76,14 @@ func (t *tcpListener) Serve() {
listener.SetDeadline(time.Now().Add(time.Second))
conn, err := listener.Accept()
select {
case <-t.stop:
case <-stop:
if err == nil {
conn.Close()
}
t.mut.Lock()
t.mapping = nil
t.mut.Unlock()
return
return nil
default:
}
if err != nil {
@@ -104,7 +94,7 @@ func (t *tcpListener) Serve() {
if acceptFailures > maxAcceptFailures {
// Return to restart the listener, because something
// seems permanently damaged.
return
return err
}
// Slightly increased delay for each failure.
@@ -137,10 +127,6 @@ func (t *tcpListener) Serve() {
}
}
func (t *tcpListener) Stop() {
close(t.stop)
}
func (t *tcpListener) URI() *url.URL {
return t.uri
}
@@ -174,13 +160,6 @@ func (t *tcpListener) LANAddresses() []*url.URL {
return []*url.URL{t.uri}
}
func (t *tcpListener) Error() error {
t.mut.RLock()
err := t.err
t.mut.RUnlock()
return err
}
func (t *tcpListener) String() string {
return t.uri.String()
}
@@ -196,15 +175,16 @@ func (t *tcpListener) NATType() string {
type tcpListenerFactory struct{}
func (f *tcpListenerFactory) New(uri *url.URL, cfg config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
return &tcpListener{
l := &tcpListener{
uri: fixupPort(uri, config.DefaultTCPPort),
cfg: cfg,
tlsCfg: tlsCfg,
conns: conns,
natService: natService,
stop: make(chan struct{}),
factory: f,
}
l.ServiceWithError = util.AsServiceWithError(l.serve)
return l
}
func (tcpListenerFactory) Valid(_ config.Configuration) error {

View File

@@ -172,7 +172,8 @@ func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator)
if shouldDebug() {
if seq := db.keyer.SequenceFromSequenceKey(dbi.Key()); f.Sequence != seq {
panic(fmt.Sprintf("sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq))
l.Warnf("Sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq)
panic("sequence index corruption")
}
}
if !fn(f) {

View File

@@ -19,19 +19,22 @@ import (
stdsync "sync"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/dialer"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/util"
)
type globalClient struct {
suture.Service
server string
addrList AddressLister
announceClient httpClient
queryClient httpClient
noAnnounce bool
noLookup bool
stop chan struct{}
errorHolder
}
@@ -122,8 +125,8 @@ func NewGlobal(server string, cert tls.Certificate, addrList AddressLister) (Fin
queryClient: queryClient,
noAnnounce: opts.noAnnounce,
noLookup: opts.noLookup,
stop: make(chan struct{}),
}
cl.Service = util.AsService(cl.serve)
if !opts.noAnnounce {
// If we are supposed to annonce, it's an error until we've done so.
cl.setError(errors.New("not announced"))
@@ -183,11 +186,11 @@ func (c *globalClient) String() string {
return "global@" + c.server
}
func (c *globalClient) Serve() {
func (c *globalClient) serve(stop chan struct{}) {
if c.noAnnounce {
// We're configured to not do announcements, only lookups. To maintain
// the same interface, we just pause here if Serve() is run.
<-c.stop
<-stop
return
}
@@ -207,7 +210,7 @@ func (c *globalClient) Serve() {
case <-timer.C:
c.sendAnnouncement(timer)
case <-c.stop:
case <-stop:
return
}
}
@@ -276,10 +279,6 @@ func (c *globalClient) sendAnnouncement(timer *time.Timer) {
timer.Reset(defaultReannounceInterval)
}
func (c *globalClient) Stop() {
close(c.stop)
}
func (c *globalClient) Cache() map[protocol.DeviceID]CacheEntry {
// The globalClient doesn't do caching
return nil

View File

@@ -11,7 +11,9 @@ package fs
import "github.com/syncthing/notify"
const (
subEventMask = notify.NoteDelete | notify.NoteWrite | notify.NoteRename
permEventMask = notify.NoteAttrib
// Platform independent notify.Create is required, as kqueue does not have
// any event signalling file creation, but notify does generate those internally.
subEventMask = notify.NoteDelete | notify.NoteWrite | notify.NoteRename | notify.Create
permEventMask = notify.NoteAttrib | notify.NoteExtend
rmEventMask = notify.NoteDelete | notify.NoteRename
)

View File

@@ -8,7 +8,6 @@ package model
import (
"context"
"errors"
"fmt"
"math/rand"
"path/filepath"
@@ -28,14 +27,15 @@ import (
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/watchaggregator"
"github.com/thejerf/suture"
)
// scanLimiter limits the number of concurrent scans. A limit of zero means no limit.
var scanLimiter = newByteSemaphore(0)
var errWatchNotStarted = errors.New("not started")
type folder struct {
suture.Service
stateTracker
config.FolderConfiguration
*stats.FolderStatisticsReference
@@ -54,7 +54,6 @@ type folder struct {
scanNow chan rescanRequest
scanDelay chan time.Duration
initialScanFinished chan struct{}
stopped chan struct{}
scanErrors []FileError
scanErrorsMut sync.Mutex
@@ -98,7 +97,6 @@ func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg conf
scanNow: make(chan rescanRequest),
scanDelay: make(chan time.Duration),
initialScanFinished: make(chan struct{}),
stopped: make(chan struct{}),
scanErrorsMut: sync.NewMutex(),
pullScheduled: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a pull if we're busy when it comes.
@@ -109,7 +107,7 @@ func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg conf
}
}
func (f *folder) Serve() {
func (f *folder) serve(_ chan struct{}) {
atomic.AddInt32(&f.model.foldersRunning, 1)
defer atomic.AddInt32(&f.model.foldersRunning, -1)
@@ -119,7 +117,6 @@ func (f *folder) Serve() {
defer func() {
f.scanTimer.Stop()
f.setState(FolderIdle)
close(f.stopped)
}()
pause := f.basePause()
@@ -220,8 +217,8 @@ func (f *folder) SchedulePull() {
}
}
func (f *folder) Jobs() ([]string, []string) {
return nil, nil
func (f *folder) Jobs(_, _ int) ([]string, []string, int) {
return nil, nil, 0
}
func (f *folder) Scan(subdirs []string) error {
@@ -256,7 +253,7 @@ func (f *folder) Delay(next time.Duration) {
func (f *folder) Stop() {
f.cancel()
<-f.stopped
f.Service.Stop()
}
// CheckHealth checks the folder for common errors, updates the folder state
@@ -426,6 +423,12 @@ func (f *folder) scanSubdirs(subDirs []string) error {
var iterError error
f.fset.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
select {
case <-f.ctx.Done():
return false
default:
}
file := fi.(db.FileInfoTruncated)
if err := batch.flushIfFull(); err != nil {
@@ -510,6 +513,12 @@ func (f *folder) scanSubdirs(subDirs []string) error {
return true
})
select {
case <-f.ctx.Done():
return f.ctx.Err()
default:
}
if iterError == nil && len(toIgnore) > 0 {
for _, file := range toIgnore {
l.Debugln("marking file as ignored", f)
@@ -564,19 +573,8 @@ func (f *folder) WatchError() error {
func (f *folder) stopWatch() {
f.watchMut.Lock()
f.watchCancel()
prevErr := f.watchErr
f.watchErr = errWatchNotStarted
f.watchMut.Unlock()
if prevErr != errWatchNotStarted {
data := map[string]interface{}{
"folder": f.ID,
"to": errWatchNotStarted.Error(),
}
if prevErr != nil {
data["from"] = prevErr.Error()
}
events.Default.Log(events.FolderWatchStateChanged, data)
}
f.setWatchError(nil)
}
// scheduleWatchRestart makes sure watching is restarted from the main for loop
@@ -641,7 +639,6 @@ func (f *folder) monitorWatch(ctx context.Context) {
if _, ok := err.(*fs.ErrWatchEventOutsideRoot); ok {
l.Warnln(err)
warnedOutside = true
return
}
}
aggrCancel()
@@ -676,17 +673,18 @@ func (f *folder) setWatchError(err error) {
if err == nil {
return
}
if prevErr == errWatchNotStarted {
l.Infof("Error while trying to start filesystem watcher for folder %s, trying again in 1min: %v", f.Description(), err)
msg := fmt.Sprintf("Error while trying to start filesystem watcher for folder %s, trying again in 1min: %v", f.Description(), err)
if prevErr != err {
l.Infof(msg)
return
}
l.Debugf("Repeat error while trying to start filesystem watcher for folder %s, trying again in 1min: %v", f.Description(), err)
l.Debugf(msg)
}
// scanOnWatchErr schedules a full scan immediately if an error occurred while watching.
func (f *folder) scanOnWatchErr() {
f.watchMut.Lock()
if f.watchErr != nil && f.watchErr != errWatchNotStarted {
if f.watchErr != nil {
f.Delay(0)
}
f.watchMut.Unlock()

View File

@@ -12,6 +12,7 @@ import (
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/util"
"github.com/syncthing/syncthing/lib/versioner"
)
@@ -28,6 +29,7 @@ func newSendOnlyFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher,
folder: newFolder(model, fset, ignores, cfg),
}
f.folder.puller = f
f.folder.Service = util.AsService(f.serve)
return f
}
@@ -66,7 +68,7 @@ func (f *sendOnlyFolder) pull() bool {
curFile, ok := f.fset.Get(protocol.LocalDeviceID, intf.FileName())
if !ok {
if intf.IsDeleted() {
panic("Should never get a deleted file as needed when we don't have it")
l.Debugln("Should never get a deleted file as needed when we don't have it")
}
return true
}

View File

@@ -28,6 +28,7 @@ import (
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
"github.com/syncthing/syncthing/lib/versioner"
"github.com/syncthing/syncthing/lib/weakhash"
)
@@ -116,6 +117,7 @@ func newSendReceiveFolder(model *model, fset *db.FileSet, ignores *ignore.Matche
pullErrorsMut: sync.NewMutex(),
}
f.folder.puller = f
f.folder.Service = util.AsService(f.serve)
if f.Copiers == 0 {
f.Copiers = defaultCopiers
@@ -577,14 +579,10 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, dbUpdateChan chan<
case err == nil && !info.IsDir():
// Check that it is what we have in the database.
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
if changed, err := f.itemChanged(info, curFile, hasCurFile, scanChan); err != nil {
if err := f.scanIfItemChanged(info, curFile, hasCurFile, scanChan); err != nil {
err = errors.Wrap(err, "handling dir")
f.newPullError(file.Name, err)
return
} else if changed {
l.Debugln("item changed on disk compared to db; not replacing with dir:", file.Name)
scanChan <- curFile.Name
f.newPullError(file.Name, errModified)
return
}
// Remove it to replace with the dir.
@@ -735,14 +733,10 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, dbUpdateChan c
if info, err := f.fs.Lstat(file.Name); err == nil {
// Check that it is what we have in the database.
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
if changed, err := f.itemChanged(info, curFile, hasCurFile, scanChan); err != nil {
if err := f.scanIfItemChanged(info, curFile, hasCurFile, scanChan); err != nil {
err = errors.Wrap(err, "handling symlink")
f.newPullError(file.Name, err)
return
} else if changed {
l.Debugln("item changed on disk compared to db; not replacing with symlink:", file.Name)
scanChan <- curFile.Name
f.newPullError(file.Name, errModified)
return
}
// Remove it to replace with the symlink. This also handles the
// "change symlink type" path.
@@ -1262,7 +1256,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
if len(hashesToFind) > 0 {
file, err = f.fs.Open(state.file.Name)
if err == nil {
weakHashFinder, err = weakhash.NewFinder(f.ctx, file, int(state.file.BlockSize()), hashesToFind)
weakHashFinder, err = weakhash.NewFinder(f.ctx, file, state.file.BlockSize(), hashesToFind)
if err != nil {
l.Debugln("weak hasher", err)
}
@@ -1514,12 +1508,10 @@ func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCu
// There is an old file or directory already in place. We need to
// handle that.
if changed, err := f.itemChanged(stat, curFile, hasCurFile, scanChan); err != nil {
if err := f.scanIfItemChanged(stat, curFile, hasCurFile, scanChan); err != nil {
err = errors.Wrap(err, "handling file")
f.newPullError(file.Name, err)
return err
} else if changed {
l.Debugln("file changed on disk compared to db; not finishing:", file.Name)
scanChan <- curFile.Name
return errModified
}
if !curFile.IsDirectory() && !curFile.IsSymlink() && f.inConflict(curFile.Version, file.Version) {
@@ -1599,8 +1591,8 @@ func (f *sendReceiveFolder) BringToFront(filename string) {
f.queue.BringToFront(filename)
}
func (f *sendReceiveFolder) Jobs() ([]string, []string) {
return f.queue.Jobs()
func (f *sendReceiveFolder) Jobs(page, perpage int) ([]string, []string, int) {
return f.queue.Jobs(page, perpage)
}
// dbUpdaterRoutine aggregates db updates and commits them in batches no
@@ -1903,18 +1895,19 @@ func (f *sendReceiveFolder) deleteDirOnDisk(dir string, scanChan chan<- string)
return err
}
// itemChanged returns true if the given disk file differs from the information
// in the database and schedules that file for scanning
func (f *sendReceiveFolder) itemChanged(stat fs.FileInfo, item protocol.FileInfo, hasItem bool, scanChan chan<- string) (changed bool, err error) {
// scanIfItemChanged schedules the given file for scanning and returns errModified
// if it differs from the information in the database. Returns nil if the file has
// not changed.
func (f *sendReceiveFolder) scanIfItemChanged(stat fs.FileInfo, item protocol.FileInfo, hasItem bool, scanChan chan<- string) (err error) {
defer func() {
if changed {
if err == errModified {
scanChan <- item.Name
}
}()
if !hasItem || item.Deleted {
// The item appeared from nowhere
return true, nil
return errModified
}
// Check that the item on disk is what we expect it to be according
@@ -1923,10 +1916,14 @@ func (f *sendReceiveFolder) itemChanged(stat fs.FileInfo, item protocol.FileInfo
// touching the item.
statItem, err := scanner.CreateFileInfo(stat, item.Name, f.fs)
if err != nil {
return false, errors.Wrap(err, "comparing item on disk to db")
return errors.Wrap(err, "comparing item on disk to db")
}
return !statItem.IsEquivalentOptional(item, f.IgnorePerms, true, protocol.LocalAllFlags), nil
if !statItem.IsEquivalentOptional(item, f.IgnorePerms, true, protocol.LocalAllFlags) {
return errModified
}
return nil
}
// checkToBeDeleted makes sure the file on disk is compatible with what there is
@@ -1943,14 +1940,7 @@ func (f *sendReceiveFolder) checkToBeDeleted(cur protocol.FileInfo, scanChan cha
// do not delete.
return err
}
changed, err := f.itemChanged(stat, cur, true, scanChan)
if err != nil {
return err
}
if changed {
return errModified
}
return nil
return f.scanIfItemChanged(stat, cur, true, scanChan)
}
func (f *sendReceiveFolder) maybeCopyOwner(path string) error {

View File

@@ -11,11 +11,13 @@ import (
"strings"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/util"
)
const minSummaryInterval = time.Minute
@@ -34,7 +36,6 @@ type folderSummaryService struct {
cfg config.Wrapper
model Model
id protocol.DeviceID
stop chan struct{}
immediate chan string
// For keeping track of folders to recalculate for
@@ -54,24 +55,18 @@ func NewFolderSummaryService(cfg config.Wrapper, m Model, id protocol.DeviceID)
cfg: cfg,
model: m,
id: id,
stop: make(chan struct{}),
immediate: make(chan string),
folders: make(map[string]struct{}),
foldersMut: sync.NewMutex(),
lastEventReqMut: sync.NewMutex(),
}
service.Add(serviceFunc(service.listenForUpdates))
service.Add(serviceFunc(service.calculateSummaries))
service.Add(util.AsService(service.listenForUpdates))
service.Add(util.AsService(service.calculateSummaries))
return service
}
func (c *folderSummaryService) Stop() {
c.Supervisor.Stop()
close(c.stop)
}
func (c *folderSummaryService) String() string {
return fmt.Sprintf("FolderSummaryService@%p", c)
}
@@ -148,8 +143,8 @@ func (c *folderSummaryService) OnEventRequest() {
// listenForUpdates subscribes to the event bus and makes note of folders that
// need their data recalculated.
func (c *folderSummaryService) listenForUpdates() {
sub := events.Default.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged | events.RemoteDownloadProgress | events.DeviceConnected | events.FolderWatchStateChanged)
func (c *folderSummaryService) listenForUpdates(stop chan struct{}) {
sub := events.Default.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged | events.RemoteDownloadProgress | events.DeviceConnected | events.FolderWatchStateChanged | events.DownloadProgress)
defer events.Default.Unsubscribe(sub)
for {
@@ -157,71 +152,87 @@ func (c *folderSummaryService) listenForUpdates() {
select {
case ev := <-sub.C():
if ev.Type == events.DeviceConnected {
// When a device connects we schedule a refresh of all
// folders shared with that device.
data := ev.Data.(map[string]string)
deviceID, _ := protocol.DeviceIDFromString(data["id"])
c.foldersMut.Lock()
nextFolder:
for _, folder := range c.cfg.Folders() {
for _, dev := range folder.Devices {
if dev.DeviceID == deviceID {
c.folders[folder.ID] = struct{}{}
continue nextFolder
}
}
}
c.foldersMut.Unlock()
continue
}
// The other events all have a "folder" attribute that they
// affect. Whenever the local or remote index is updated for a
// given folder we make a note of it.
data := ev.Data.(map[string]interface{})
folder := data["folder"].(string)
switch ev.Type {
case events.StateChanged:
if data["to"].(string) == "idle" && data["from"].(string) == "syncing" {
// The folder changed to idle from syncing. We should do an
// immediate refresh to update the GUI. The send to
// c.immediate must be nonblocking so that we can continue
// handling events.
c.foldersMut.Lock()
select {
case c.immediate <- folder:
delete(c.folders, folder)
default:
c.folders[folder] = struct{}{}
}
c.foldersMut.Unlock()
}
default:
// This folder needs to be refreshed whenever we do the next
// refresh.
c.foldersMut.Lock()
c.folders[folder] = struct{}{}
c.foldersMut.Unlock()
}
case <-c.stop:
c.processUpdate(ev)
case <-stop:
return
}
}
}
func (c *folderSummaryService) processUpdate(ev events.Event) {
var folder string
switch ev.Type {
case events.DeviceConnected:
// When a device connects we schedule a refresh of all
// folders shared with that device.
data := ev.Data.(map[string]string)
deviceID, _ := protocol.DeviceIDFromString(data["id"])
c.foldersMut.Lock()
nextFolder:
for _, folder := range c.cfg.Folders() {
for _, dev := range folder.Devices {
if dev.DeviceID == deviceID {
c.folders[folder.ID] = struct{}{}
continue nextFolder
}
}
}
c.foldersMut.Unlock()
return
case events.DownloadProgress:
data := ev.Data.(map[string]map[string]*pullerProgress)
c.foldersMut.Lock()
for folder := range data {
c.folders[folder] = struct{}{}
}
c.foldersMut.Unlock()
return
case events.StateChanged:
data := ev.Data.(map[string]interface{})
if !(data["to"].(string) == "idle" && data["from"].(string) == "syncing") {
return
}
// The folder changed to idle from syncing. We should do an
// immediate refresh to update the GUI. The send to
// c.immediate must be nonblocking so that we can continue
// handling events.
folder = data["folder"].(string)
select {
case c.immediate <- folder:
c.foldersMut.Lock()
delete(c.folders, folder)
c.foldersMut.Unlock()
return
default:
// Refresh whenever we do the next summary.
}
default:
// The other events all have a "folder" attribute that they
// affect. Whenever the local or remote index is updated for a
// given folder we make a note of it.
// This folder needs to be refreshed whenever we do the next
// refresh.
folder = ev.Data.(map[string]interface{})["folder"].(string)
}
c.foldersMut.Lock()
c.folders[folder] = struct{}{}
c.foldersMut.Unlock()
}
// calculateSummaries periodically recalculates folder summaries and
// completion percentage, and sends the results on the event bus.
func (c *folderSummaryService) calculateSummaries() {
func (c *folderSummaryService) calculateSummaries(stop chan struct{}) {
const pumpInterval = 2 * time.Second
pump := time.NewTimer(pumpInterval)
@@ -242,7 +253,7 @@ func (c *folderSummaryService) calculateSummaries() {
case folder := <-c.immediate:
c.sendSummary(folder)
case <-c.stop:
case <-stop:
return
}
}
@@ -303,10 +314,3 @@ func (c *folderSummaryService) sendSummary(folder string) {
events.Default.Log(events.FolderCompletion, comp)
}
}
// serviceFunc wraps a function to create a suture.Service without stop
// functionality.
type serviceFunc func()
func (f serviceFunc) Serve() { f() }
func (f serviceFunc) Stop() {}

View File

@@ -32,6 +32,7 @@ import (
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/upgrade"
"github.com/syncthing/syncthing/lib/util"
"github.com/syncthing/syncthing/lib/versioner"
"github.com/thejerf/suture"
)
@@ -47,8 +48,8 @@ type service interface {
Override()
Revert()
DelayScan(d time.Duration)
SchedulePull() // something relevant changed, we should try a pull
Jobs() ([]string, []string) // In progress, Queued
SchedulePull() // something relevant changed, we should try a pull
Jobs(page, perpage int) ([]string, []string, int) // In progress, Queued, skipped
Scan(subs []string) error
Serve()
Stop()
@@ -221,16 +222,8 @@ func (m *model) Stop() {
for id := range devs {
ids = append(ids, id)
}
m.pmut.RLock()
closed := make([]chan struct{}, 0, len(m.closed))
for _, c := range m.closed {
closed = append(closed, c)
}
m.pmut.RUnlock()
m.closeConns(ids, errStopped)
for _, c := range closed {
<-c
}
w := m.closeConns(ids, errStopped)
w.Wait()
}
// StartDeadlockDetector starts a deadlock detector on the models locks which
@@ -255,10 +248,10 @@ func (m *model) StartFolder(folder string) {
// Need to hold lock on m.fmut when calling this.
func (m *model) startFolderLocked(cfg config.FolderConfiguration) {
if err := m.checkFolderRunningLocked(cfg.ID); err == errFolderMissing {
panic("cannot start nonexistent folder " + cfg.Description())
} else if err == nil {
panic("cannot start already running folder " + cfg.Description())
_, ok := m.folderRunners[cfg.ID]
if ok {
l.Warnln("Cannot start already running folder", cfg.Description())
panic("cannot start already running folder")
}
folderFactory, ok := folderFactories[cfg.Type]
@@ -370,17 +363,20 @@ func (m *model) AddFolder(cfg config.FolderConfiguration) {
panic("cannot add empty folder path")
}
// Creating the fileset can take a long time (metadata calculation) so
// we do it outside of the lock.
fset := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db)
m.fmut.Lock()
defer m.fmut.Unlock()
m.addFolderLocked(cfg)
m.addFolderLocked(cfg, fset)
}
func (m *model) addFolderLocked(cfg config.FolderConfiguration) {
func (m *model) addFolderLocked(cfg config.FolderConfiguration, fset *db.FileSet) {
m.folderCfgs[cfg.ID] = cfg
folderFs := cfg.Filesystem()
m.folderFiles[cfg.ID] = db.NewFileSet(cfg.ID, folderFs, m.db)
m.folderFiles[cfg.ID] = fset
ignores := ignore.New(folderFs, ignore.WithCache(m.cacheIgnoredFiles))
ignores := ignore.New(cfg.Filesystem(), ignore.WithCache(m.cacheIgnoredFiles))
if err := ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
l.Warnln("Loading ignores:", err)
}
@@ -410,12 +406,16 @@ func (m *model) tearDownFolderLocked(cfg config.FolderConfiguration, err error)
// Close connections to affected devices
// Must happen before stopping the folder service to abort ongoing
// transmissions and thus allow timely service termination.
m.closeConns(cfg.DeviceIDs(), err)
w := m.closeConns(cfg.DeviceIDs(), err)
for _, id := range tokens {
m.RemoveAndWait(id, 0)
}
// Wait for connections to stop to ensure that no more calls to methods
// expecting this folder to exist happen (e.g. .IndexUpdate).
w.Wait()
m.fmut.Lock()
// Clean up our config maps
@@ -431,7 +431,8 @@ func (m *model) RestartFolder(from, to config.FolderConfiguration) {
panic("bug: cannot restart empty folder ID")
}
if to.ID != from.ID {
panic(fmt.Sprintf("bug: folder restart cannot change ID %q -> %q", from.ID, to.ID))
l.Warnf("bug: folder restart cannot change ID %q -> %q", from.ID, to.ID)
panic("bug: folder restart cannot change ID")
}
// This mutex protects the entirety of the restart operation, preventing
@@ -463,7 +464,12 @@ func (m *model) RestartFolder(from, to config.FolderConfiguration) {
m.tearDownFolderLocked(from, fmt.Errorf("%v folder %v", errMsg, to.Description()))
if !to.Paused {
m.addFolderLocked(to)
// Creating the fileset can take a long time (metadata calculation)
// so we do it outside of the lock.
m.fmut.Unlock()
fset := db.NewFileSet(to.ID, to.Filesystem(), m.db)
m.fmut.Lock()
m.addFolderLocked(to, fset)
m.startFolderLocked(to)
}
l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type)
@@ -708,7 +714,7 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
}
// This might might be more than it really is, because some blocks can be of a smaller size.
downloaded = int64(counts[ft.Name] * int(ft.BlockSize()))
downloaded = int64(counts[ft.Name]) * int64(ft.BlockSize())
fileNeed = ft.FileSize() - downloaded
if fileNeed < 0 {
@@ -791,7 +797,8 @@ func (m *model) ReceiveOnlyChangedSize(folder string) db.Counts {
return db.Counts{}
}
// NeedSize returns the number and total size of currently needed files.
// NeedSize returns the number of currently needed files and their total size
// minus the amount that has already been downloaded.
func (m *model) NeedSize(folder string) db.Counts {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
@@ -815,8 +822,7 @@ func (m *model) NeedSize(folder string) db.Counts {
}
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
// progress, queued, and to be queued on next puller iteration.
func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) {
m.fmut.RLock()
rf, rfOk := m.folderFiles[folder]
@@ -835,11 +841,7 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
get := perpage
if runnerOk {
allProgressNames, allQueuedNames := runner.Jobs()
var progressNames, queuedNames []string
progressNames, skip, get = getChunk(allProgressNames, skip, get)
queuedNames, skip, get = getChunk(allQueuedNames, skip, get)
progressNames, queuedNames, skipped := runner.Jobs(page, perpage)
progress = make([]db.FileInfoTruncated, len(progressNames))
queued = make([]db.FileInfoTruncated, len(queuedNames))
@@ -858,6 +860,12 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
seen[name] = struct{}{}
}
}
get -= len(seen)
if get == 0 {
return progress, queued, nil
}
skip -= skipped
}
rest = make([]db.FileInfoTruncated, 0, perpage)
@@ -986,7 +994,8 @@ func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []prot
m.fmut.RUnlock()
if !existing {
panic(fmt.Sprintf("%v for nonexistent folder %q", op, folder))
l.Warnf("%v for nonexistent folder %q", op, folder)
panic("handling index for nonexistent folder")
}
if running {
@@ -994,7 +1003,8 @@ func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []prot
} else if update {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
panic(fmt.Sprintf("%v for not running folder %q", op, folder))
l.Warnf("%v for not running folder %q", op, folder)
panic("handling index for not running folder")
}
m.pmut.RLock()
@@ -1161,19 +1171,19 @@ func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
}
}
// The token isn't tracked as the service stops when the connection
// terminates and is automatically removed from supervisor (by
// implementing suture.IsCompletable).
m.Add(&indexSender{
is := &indexSender{
conn: conn,
connClosed: closed,
folder: folder.ID,
fset: fs,
prevSequence: startSequence,
dropSymlinks: dropSymlinks,
stop: make(chan struct{}),
stopped: make(chan struct{}),
})
}
is.Service = util.AsService(is.serve)
// The token isn't tracked as the service stops when the connection
// terminates and is automatically removed from supervisor (by
// implementing suture.IsCompletable).
m.Add(is)
}
m.pmut.Lock()
@@ -1426,23 +1436,39 @@ func (m *model) Closed(conn protocol.Connection, err error) {
close(closed)
}
// closeConns will close the underlying connection for given devices
func (m *model) closeConns(devs []protocol.DeviceID, err error) {
// closeConns will close the underlying connection for given devices and return
// a waiter that will return once all the connections are finished closing.
func (m *model) closeConns(devs []protocol.DeviceID, err error) config.Waiter {
conns := make([]connections.Connection, 0, len(devs))
closed := make([]chan struct{}, 0, len(devs))
m.pmut.Lock()
for _, dev := range devs {
if conn, ok := m.conn[dev]; ok {
conns = append(conns, conn)
closed = append(closed, m.closed[dev])
}
}
m.pmut.Unlock()
for _, conn := range conns {
conn.Close(err)
}
return &channelWaiter{chans: closed}
}
func (m *model) closeConn(dev protocol.DeviceID, err error) {
m.closeConns([]protocol.DeviceID{dev}, err)
// closeConn closes the underlying connection for the given device and returns
// a waiter that will return once the connection is finished closing.
func (m *model) closeConn(dev protocol.DeviceID, err error) config.Waiter {
return m.closeConns([]protocol.DeviceID{dev}, err)
}
type channelWaiter struct {
chans []chan struct{}
}
func (w *channelWaiter) Wait() {
for _, c := range w.chans {
<-c
}
}
// Implements protocol.RequestResponse
@@ -1609,7 +1635,7 @@ func (m *model) recheckFile(deviceID protocol.DeviceID, folderFs fs.Filesystem,
return
}
blockIndex := int(offset) / cf.BlockSize()
blockIndex := int(offset / int64(cf.BlockSize()))
if blockIndex >= len(cf.Blocks) {
l.Debugf("%v recheckFile: %s: %q / %q i=%d: block index too far", m, deviceID, folder, name, blockIndex)
return
@@ -1888,6 +1914,7 @@ func (m *model) deviceWasSeen(deviceID protocol.DeviceID) {
}
type indexSender struct {
suture.Service
conn protocol.Connection
folder string
dev string
@@ -1895,13 +1922,9 @@ type indexSender struct {
prevSequence int64
dropSymlinks bool
connClosed chan struct{}
stop chan struct{}
stopped chan struct{}
}
func (s *indexSender) Serve() {
defer close(s.stopped)
func (s *indexSender) serve(stop chan struct{}) {
var err error
l.Debugf("Starting indexSender for %s to %s at %s (slv=%d)", s.folder, s.dev, s.conn, s.prevSequence)
@@ -1922,7 +1945,7 @@ func (s *indexSender) Serve() {
for err == nil {
select {
case <-s.stop:
case <-stop:
return
case <-s.connClosed:
return
@@ -1935,7 +1958,7 @@ func (s *indexSender) Serve() {
// sending for.
if s.fset.Sequence(protocol.LocalDeviceID) <= s.prevSequence {
select {
case <-s.stop:
case <-stop:
return
case <-s.connClosed:
return
@@ -1955,11 +1978,6 @@ func (s *indexSender) Serve() {
}
}
func (s *indexSender) Stop() {
close(s.stop)
<-s.stopped
}
// Complete implements the suture.IsCompletable interface. When Serve terminates
// before Stop is called, the supervisor will check for this method and if it
// returns true removes the service instead of restarting it. Here it always
@@ -2220,7 +2238,7 @@ func (m *model) WatchError(folder string) error {
m.fmut.RLock()
defer m.fmut.RUnlock()
if err := m.checkFolderRunningLocked(folder); err != nil {
return err
return nil // If the folder isn't running, there's no error to report.
}
return m.folderRunners[folder].WatchError()
}
@@ -2613,20 +2631,6 @@ func mapDevices(devices []protocol.DeviceID) map[protocol.DeviceID]struct{} {
return m
}
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk(data []string, skip, get int) ([]string, int, int) {
l := len(data)
if l <= skip {
return []string{}, skip - l, get
} else if l < skip+get {
return data[skip:l], 0, get - (l - skip)
}
return data[skip : skip+get], 0, 0
}
func readOffsetIntoBuf(fs fs.Filesystem, file string, offset int64, buf []byte) error {
fd, err := fs.Open(file)
if err != nil {

View File

@@ -26,6 +26,7 @@ import (
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
@@ -3303,3 +3304,31 @@ func TestConnCloseOnRestart(t *testing.T) {
t.Fatal("Timed out before connection was closed")
}
}
func TestDevicePause(t *testing.T) {
sub := events.Default.Subscribe(events.DevicePaused)
defer events.Default.Unsubscribe(sub)
m, _, fcfg := setupModelWithConnection()
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
m.pmut.RLock()
closed := m.closed[device1]
m.pmut.RUnlock()
dev := m.cfg.Devices()[device1]
dev.Paused = true
m.cfg.SetDevice(dev)
timeout := time.NewTimer(5 * time.Second)
select {
case <-sub.C():
select {
case <-closed:
case <-timeout.C:
t.Fatal("Timed out before connection was closed")
}
case <-timeout.C:
t.Fatal("Timed out before device was paused")
}
}

View File

@@ -10,13 +10,18 @@ import (
"fmt"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
)
type ProgressEmitter struct {
suture.Service
registry map[string]map[string]*sharedPullerState // folder: name: puller
interval time.Duration
minBlocks int
@@ -27,15 +32,12 @@ type ProgressEmitter struct {
mut sync.Mutex
timer *time.Timer
stop chan struct{}
}
// NewProgressEmitter creates a new progress emitter which emits
// DownloadProgress events every interval.
func NewProgressEmitter(cfg config.Wrapper) *ProgressEmitter {
t := &ProgressEmitter{
stop: make(chan struct{}),
registry: make(map[string]map[string]*sharedPullerState),
timer: time.NewTimer(time.Millisecond),
sentDownloadStates: make(map[protocol.DeviceID]*sentDownloadState),
@@ -43,6 +45,7 @@ func NewProgressEmitter(cfg config.Wrapper) *ProgressEmitter {
foldersByConns: make(map[protocol.DeviceID][]string),
mut: sync.NewMutex(),
}
t.Service = util.AsService(t.serve)
t.CommitConfiguration(config.Configuration{}, cfg.RawCopy())
cfg.Subscribe(t)
@@ -50,14 +53,14 @@ func NewProgressEmitter(cfg config.Wrapper) *ProgressEmitter {
return t
}
// Serve starts the progress emitter which starts emitting DownloadProgress
// serve starts the progress emitter which starts emitting DownloadProgress
// events as the progress happens.
func (t *ProgressEmitter) Serve() {
func (t *ProgressEmitter) serve(stop chan struct{}) {
var lastUpdate time.Time
var lastCount, newCount int
for {
select {
case <-t.stop:
case <-stop:
l.Debugln("progress emitter: stopping")
return
case <-t.timer.C:
@@ -212,11 +215,6 @@ func (t *ProgressEmitter) CommitConfiguration(from, to config.Configuration) boo
return true
}
// Stop stops the emitter.
func (t *ProgressEmitter) Stop() {
t.stop <- struct{}{}
}
// Register a puller with the emitter which will start broadcasting pullers
// progress.
func (t *ProgressEmitter) Register(s *sharedPullerState) {

View File

@@ -84,19 +84,46 @@ func (q *jobQueue) Done(file string) {
}
}
func (q *jobQueue) Jobs() ([]string, []string) {
// Jobs returns a paginated list of file currently being pulled and files queued
// to be pulled. It also returns how many items were skipped.
func (q *jobQueue) Jobs(page, perpage int) ([]string, []string, int) {
q.mut.Lock()
defer q.mut.Unlock()
progress := make([]string, len(q.progress))
copy(progress, q.progress)
toSkip := (page - 1) * perpage
plen := len(q.progress)
qlen := len(q.queued)
queued := make([]string, len(q.queued))
for i := range q.queued {
queued[i] = q.queued[i].name
if tot := plen + qlen; tot <= toSkip {
return nil, nil, tot
}
return progress, queued
if plen >= toSkip+perpage {
progress := make([]string, perpage)
copy(progress, q.progress[toSkip:toSkip+perpage])
return progress, nil, toSkip
}
var progress []string
if plen > toSkip {
progress = make([]string, plen-toSkip)
copy(progress, q.progress[toSkip:plen])
toSkip = 0
} else {
toSkip -= plen
}
var queued []string
if qlen-toSkip < perpage-len(progress) {
queued = make([]string, qlen-toSkip)
} else {
queued = make([]string, perpage-len(progress))
}
for i := range queued {
queued[i] = q.queued[i+toSkip].name
}
return progress, queued, (page - 1) * perpage
}
func (q *jobQueue) Shuffle() {

View File

@@ -22,9 +22,9 @@ func TestJobQueue(t *testing.T) {
q.Push("f3", 0, time.Time{})
q.Push("f4", 0, time.Time{})
progress, queued := q.Jobs()
progress, queued, _ := q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
t.Fatal("Wrong length", len(progress), len(queued))
}
for i := 1; i < 5; i++ {
@@ -32,7 +32,7 @@ func TestJobQueue(t *testing.T) {
if !ok || n != fmt.Sprintf("f%d", i) {
t.Fatal("Wrong element")
}
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 1 || len(queued) != 3 {
t.Log(progress)
t.Log(queued)
@@ -40,19 +40,19 @@ func TestJobQueue(t *testing.T) {
}
q.Done(n)
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 3 {
t.Fatal("Wrong length", len(progress), len(queued))
}
q.Push(n, 0, time.Time{})
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
}
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
}
@@ -63,7 +63,7 @@ func TestJobQueue(t *testing.T) {
}
for i := 4; i > 0; i-- {
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 4-i || len(queued) != i {
t.Fatal("Wrong length")
}
@@ -71,7 +71,7 @@ func TestJobQueue(t *testing.T) {
s := fmt.Sprintf("f%d", i)
q.BringToFront(s)
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 4-i || len(queued) != i {
t.Fatal("Wrong length")
}
@@ -80,13 +80,13 @@ func TestJobQueue(t *testing.T) {
if !ok || n != s {
t.Fatal("Wrong element")
}
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 5-i || len(queued) != i-1 {
t.Fatal("Wrong length")
}
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 5-i || len(queued) != i-1 {
t.Fatal("Wrong length")
}
@@ -108,13 +108,13 @@ func TestJobQueue(t *testing.T) {
t.Fatal("Wrong length")
}
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 0 {
t.Fatal("Wrong length")
}
q.BringToFront("")
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
progress, queued, _ = q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 0 {
t.Fatal("Wrong length")
}
@@ -127,35 +127,35 @@ func TestBringToFront(t *testing.T) {
q.Push("f3", 0, time.Time{})
q.Push("f4", 0, time.Time{})
_, queued := q.Jobs()
_, queued, _ := q.Jobs(1, 100)
if diff, equal := messagediff.PrettyDiff([]string{"f1", "f2", "f3", "f4"}, queued); !equal {
t.Errorf("Order does not match. Diff:\n%s", diff)
}
q.BringToFront("f1") // corner case: does nothing
_, queued = q.Jobs()
_, queued, _ = q.Jobs(1, 100)
if diff, equal := messagediff.PrettyDiff([]string{"f1", "f2", "f3", "f4"}, queued); !equal {
t.Errorf("Order does not match. Diff:\n%s", diff)
}
q.BringToFront("f3")
_, queued = q.Jobs()
_, queued, _ = q.Jobs(1, 100)
if diff, equal := messagediff.PrettyDiff([]string{"f3", "f1", "f2", "f4"}, queued); !equal {
t.Errorf("Order does not match. Diff:\n%s", diff)
}
q.BringToFront("f2")
_, queued = q.Jobs()
_, queued, _ = q.Jobs(1, 100)
if diff, equal := messagediff.PrettyDiff([]string{"f2", "f3", "f1", "f4"}, queued); !equal {
t.Errorf("Order does not match. Diff:\n%s", diff)
}
q.BringToFront("f4") // corner case: last element
_, queued = q.Jobs()
_, queued, _ = q.Jobs(1, 100)
if diff, equal := messagediff.PrettyDiff([]string{"f4", "f2", "f3", "f1"}, queued); !equal {
t.Errorf("Order does not match. Diff:\n%s", diff)
}
@@ -171,9 +171,9 @@ func TestShuffle(t *testing.T) {
// This test will fail once in eight million times (1 / (4!)^5) :)
for i := 0; i < 5; i++ {
q.Shuffle()
_, queued := q.Jobs()
_, queued, _ := q.Jobs(1, 100)
if l := len(queued); l != 4 {
t.Fatalf("Weird length %d returned from Jobs()", l)
t.Fatalf("Weird length %d returned from jobs(1, 100)", l)
}
t.Logf("%v", queued)
@@ -195,9 +195,9 @@ func TestSortBySize(t *testing.T) {
q.SortSmallestFirst()
_, actual := q.Jobs()
_, actual, _ := q.Jobs(1, 100)
if l := len(actual); l != 4 {
t.Fatalf("Weird length %d returned from Jobs()", l)
t.Fatalf("Weird length %d returned from jobs(1, 100)", l)
}
expected := []string{"f4", "f1", "f3", "f2"}
@@ -207,9 +207,9 @@ func TestSortBySize(t *testing.T) {
q.SortLargestFirst()
_, actual = q.Jobs()
_, actual, _ = q.Jobs(1, 100)
if l := len(actual); l != 4 {
t.Fatalf("Weird length %d returned from Jobs()", l)
t.Fatalf("Weird length %d returned from jobs(1, 100)", l)
}
expected = []string{"f2", "f3", "f1", "f4"}
@@ -227,9 +227,9 @@ func TestSortByAge(t *testing.T) {
q.SortOldestFirst()
_, actual := q.Jobs()
_, actual, _ := q.Jobs(1, 100)
if l := len(actual); l != 4 {
t.Fatalf("Weird length %d returned from Jobs()", l)
t.Fatalf("Weird length %d returned from jobs(1, 100)", l)
}
expected := []string{"f4", "f1", "f3", "f2"}
@@ -239,9 +239,9 @@ func TestSortByAge(t *testing.T) {
q.SortNewestFirst()
_, actual = q.Jobs()
_, actual, _ = q.Jobs(1, 100)
if l := len(actual); l != 4 {
t.Fatalf("Weird length %d returned from Jobs()", l)
t.Fatalf("Weird length %d returned from jobs(1, 100)", l)
}
expected = []string{"f2", "f3", "f1", "f4"}
@@ -280,3 +280,136 @@ func BenchmarkJobQueuePushPopDone10k(b *testing.B) {
}
}
func TestQueuePagination(t *testing.T) {
q := newJobQueue()
// Ten random actions
names := make([]string, 10)
for i := 0; i < 10; i++ {
names[i] = fmt.Sprint("f", i)
q.Push(names[i], 0, time.Time{})
}
progress, queued, skip := q.Jobs(1, 100)
if len(progress) != 0 || len(queued) != 10 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
progress, queued, skip = q.Jobs(1, 5)
if len(progress) != 0 || len(queued) != 5 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(queued, names[:5]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[:5])
}
progress, queued, skip = q.Jobs(2, 5)
if len(progress) != 0 || len(queued) != 5 || skip != 5 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(queued, names[5:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[5:])
}
progress, queued, skip = q.Jobs(2, 7)
if len(progress) != 0 || len(queued) != 3 || skip != 7 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(queued, names[7:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[7:])
}
progress, queued, skip = q.Jobs(3, 5)
if len(progress) != 0 || len(queued) != 0 || skip != 10 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
n, ok := q.Pop()
if !ok || n != names[0] {
t.Fatal("Wrong element")
}
progress, queued, skip = q.Jobs(1, 100)
if len(progress) != 1 || len(queued) != 9 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
progress, queued, skip = q.Jobs(1, 5)
if len(progress) != 1 || len(queued) != 4 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(progress, names[:1]) {
t.Errorf("Wrong elements in progress, got %v, expected %v", progress, names[:1])
} else if !equalStrings(queued, names[1:5]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[1:5])
}
progress, queued, skip = q.Jobs(2, 5)
if len(progress) != 0 || len(queued) != 5 || skip != 5 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(queued, names[5:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[5:])
}
progress, queued, skip = q.Jobs(2, 7)
if len(progress) != 0 || len(queued) != 3 || skip != 7 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(queued, names[7:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[7:])
}
progress, queued, skip = q.Jobs(3, 5)
if len(progress) != 0 || len(queued) != 0 || skip != 10 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
for i := 1; i < 8; i++ {
n, ok := q.Pop()
if !ok || n != names[i] {
t.Fatal("Wrong element")
}
}
progress, queued, skip = q.Jobs(1, 100)
if len(progress) != 8 || len(queued) != 2 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
progress, queued, skip = q.Jobs(1, 5)
if len(progress) != 5 || len(queued) != 0 || skip != 0 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(progress, names[:5]) {
t.Errorf("Wrong elements in progress, got %v, expected %v", progress, names[:5])
}
progress, queued, skip = q.Jobs(2, 5)
if len(progress) != 3 || len(queued) != 2 || skip != 5 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(progress, names[5:8]) {
t.Errorf("Wrong elements in progress, got %v, expected %v", progress, names[5:8])
} else if !equalStrings(queued, names[8:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[8:])
}
progress, queued, skip = q.Jobs(2, 7)
if len(progress) != 1 || len(queued) != 2 || skip != 7 {
t.Error("Wrong length", len(progress), len(queued), 0)
} else if !equalStrings(progress, names[7:8]) {
t.Errorf("Wrong elements in progress, got %v, expected %v", progress, names[7:8])
} else if !equalStrings(queued, names[8:]) {
t.Errorf("Wrong elements in queued, got %v, expected %v", queued, names[8:])
}
progress, queued, skip = q.Jobs(3, 5)
if len(progress) != 0 || len(queued) != 0 || skip != 10 {
t.Error("Wrong length", len(progress), len(queued), 0)
}
}
func equalStrings(first, second []string) bool {
if len(first) != len(second) {
return false
}
for i := range first {
if first[i] != second[i] {
return false
}
}
return true
}

View File

@@ -13,6 +13,7 @@ import (
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
@@ -947,3 +948,46 @@ func TestRequestDeleteChanged(t *testing.T) {
}
}
}
func TestNeedFolderFiles(t *testing.T) {
m, fc, fcfg := setupModelWithConnection()
tfs := fcfg.Filesystem()
tmpDir := tfs.URI()
defer cleanupModelAndRemoveDir(m, tmpDir)
sub := events.Default.Subscribe(events.RemoteIndexUpdated)
defer events.Default.Unsubscribe(sub)
errPreventSync := errors.New("you aren't getting any of this")
fc.mut.Lock()
fc.requestFn = func(string, string, int64, int, []byte, bool) ([]byte, error) {
return nil, errPreventSync
}
fc.mut.Unlock()
data := []byte("foo")
num := 20
for i := 0; i < num; i++ {
fc.addFile(strconv.Itoa(i), 0644, protocol.FileInfoTypeFile, data)
}
fc.sendIndexUpdate()
select {
case <-sub.C():
case <-time.After(5 * time.Second):
t.Fatal("Timed out before receiving index")
}
progress, queued, rest := m.NeedFolderFiles(fcfg.ID, 1, 100)
if got := len(progress) + len(queued) + len(rest); got != num {
t.Errorf("Got %v needed items, expected %v", got, num)
}
exp := 10
for page := 1; page < 3; page++ {
progress, queued, rest := m.NeedFolderFiles(fcfg.ID, page, exp)
if got := len(progress) + len(queued) + len(rest); got != exp {
t.Errorf("Got %v needed items on page %v, expected %v", got, page, exp)
}
}
}

View File

@@ -19,7 +19,7 @@ func Register(provider DiscoverFunc) {
providers = append(providers, provider)
}
func discoverAll(renewal, timeout time.Duration) map[string]Device {
func discoverAll(renewal, timeout time.Duration, stop chan struct{}) map[string]Device {
wg := &sync.WaitGroup{}
wg.Add(len(providers))
@@ -28,20 +28,32 @@ func discoverAll(renewal, timeout time.Duration) map[string]Device {
for _, discoverFunc := range providers {
go func(f DiscoverFunc) {
defer wg.Done()
for _, dev := range f(renewal, timeout) {
c <- dev
select {
case c <- dev:
case <-stop:
return
}
}
wg.Done()
}(discoverFunc)
}
nats := make(map[string]Device)
go func() {
for dev := range c {
nats[dev.ID()] = dev
defer close(done)
for {
select {
case dev, ok := <-c:
if !ok {
return
}
nats[dev.ID()] = dev
case <-stop:
return
}
}
close(done)
}()
wg.Wait()

View File

@@ -14,17 +14,21 @@ import (
stdsync "sync"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
)
// Service runs a loop for discovery of IGDs (Internet Gateway Devices) and
// setup/renewal of a port mapping.
type Service struct {
id protocol.DeviceID
cfg config.Wrapper
stop chan struct{}
suture.Service
id protocol.DeviceID
cfg config.Wrapper
mappings []*Mapping
timer *time.Timer
@@ -32,27 +36,28 @@ type Service struct {
}
func NewService(id protocol.DeviceID, cfg config.Wrapper) *Service {
return &Service{
s := &Service{
id: id,
cfg: cfg,
timer: time.NewTimer(0),
mut: sync.NewRWMutex(),
}
s.Service = util.AsService(s.serve)
return s
}
func (s *Service) Serve() {
func (s *Service) serve(stop chan struct{}) {
announce := stdsync.Once{}
s.mut.Lock()
s.timer.Reset(0)
s.stop = make(chan struct{})
s.mut.Unlock()
for {
select {
case <-s.timer.C:
if found := s.process(); found != -1 {
if found := s.process(stop); found != -1 {
announce.Do(func() {
suffix := "s"
if found == 1 {
@@ -61,7 +66,7 @@ func (s *Service) Serve() {
l.Infoln("Detected", found, "NAT service"+suffix)
})
}
case <-s.stop:
case <-stop:
s.timer.Stop()
s.mut.RLock()
for _, mapping := range s.mappings {
@@ -73,7 +78,7 @@ func (s *Service) Serve() {
}
}
func (s *Service) process() int {
func (s *Service) process(stop chan struct{}) int {
// toRenew are mappings which are due for renewal
// toUpdate are the remaining mappings, which will only be updated if one of
// the old IGDs has gone away, or a new IGD has appeared, but only if we
@@ -115,25 +120,19 @@ func (s *Service) process() int {
return -1
}
nats := discoverAll(time.Duration(s.cfg.Options().NATRenewalM)*time.Minute, time.Duration(s.cfg.Options().NATTimeoutS)*time.Second)
nats := discoverAll(time.Duration(s.cfg.Options().NATRenewalM)*time.Minute, time.Duration(s.cfg.Options().NATTimeoutS)*time.Second, stop)
for _, mapping := range toRenew {
s.updateMapping(mapping, nats, true)
s.updateMapping(mapping, nats, true, stop)
}
for _, mapping := range toUpdate {
s.updateMapping(mapping, nats, false)
s.updateMapping(mapping, nats, false, stop)
}
return len(nats)
}
func (s *Service) Stop() {
s.mut.RLock()
close(s.stop)
s.mut.RUnlock()
}
func (s *Service) NewMapping(protocol Protocol, ip net.IP, port int) *Mapping {
mapping := &Mapping{
protocol: protocol,
@@ -178,17 +177,17 @@ func (s *Service) RemoveMapping(mapping *Mapping) {
// acquire mappings for natds which the mapping was unaware of before.
// Optionally takes renew flag which indicates whether or not we should renew
// mappings with existing natds
func (s *Service) updateMapping(mapping *Mapping, nats map[string]Device, renew bool) {
func (s *Service) updateMapping(mapping *Mapping, nats map[string]Device, renew bool, stop chan struct{}) {
var added, removed []Address
renewalTime := time.Duration(s.cfg.Options().NATRenewalM) * time.Minute
mapping.expires = time.Now().Add(renewalTime)
newAdded, newRemoved := s.verifyExistingMappings(mapping, nats, renew)
newAdded, newRemoved := s.verifyExistingMappings(mapping, nats, renew, stop)
added = append(added, newAdded...)
removed = append(removed, newRemoved...)
newAdded, newRemoved = s.acquireNewMappings(mapping, nats)
newAdded, newRemoved = s.acquireNewMappings(mapping, nats, stop)
added = append(added, newAdded...)
removed = append(removed, newRemoved...)
@@ -197,12 +196,18 @@ func (s *Service) updateMapping(mapping *Mapping, nats map[string]Device, renew
}
}
func (s *Service) verifyExistingMappings(mapping *Mapping, nats map[string]Device, renew bool) ([]Address, []Address) {
func (s *Service) verifyExistingMappings(mapping *Mapping, nats map[string]Device, renew bool, stop chan struct{}) ([]Address, []Address) {
var added, removed []Address
leaseTime := time.Duration(s.cfg.Options().NATLeaseM) * time.Minute
for id, address := range mapping.addressMap() {
select {
case <-stop:
return nil, nil
default:
}
// Delete addresses for NATDevice's that do not exist anymore
nat, ok := nats[id]
if !ok {
@@ -220,7 +225,7 @@ func (s *Service) verifyExistingMappings(mapping *Mapping, nats map[string]Devic
l.Debugf("Renewing %s -> %s mapping on %s", mapping, address, id)
addr, err := s.tryNATDevice(nat, mapping.address.Port, address.Port, leaseTime)
addr, err := s.tryNATDevice(nat, mapping.address.Port, address.Port, leaseTime, stop)
if err != nil {
l.Debugf("Failed to renew %s -> mapping on %s", mapping, address, id)
mapping.removeAddress(id)
@@ -242,13 +247,19 @@ func (s *Service) verifyExistingMappings(mapping *Mapping, nats map[string]Devic
return added, removed
}
func (s *Service) acquireNewMappings(mapping *Mapping, nats map[string]Device) ([]Address, []Address) {
func (s *Service) acquireNewMappings(mapping *Mapping, nats map[string]Device, stop chan struct{}) ([]Address, []Address) {
var added, removed []Address
leaseTime := time.Duration(s.cfg.Options().NATLeaseM) * time.Minute
addrMap := mapping.addressMap()
for id, nat := range nats {
select {
case <-stop:
return nil, nil
default:
}
if _, ok := addrMap[id]; ok {
continue
}
@@ -263,7 +274,7 @@ func (s *Service) acquireNewMappings(mapping *Mapping, nats map[string]Device) (
l.Debugf("Acquiring %s mapping on %s", mapping, id)
addr, err := s.tryNATDevice(nat, mapping.address.Port, 0, leaseTime)
addr, err := s.tryNATDevice(nat, mapping.address.Port, 0, leaseTime, stop)
if err != nil {
l.Debugf("Failed to acquire %s mapping on %s", mapping, id)
continue
@@ -280,7 +291,7 @@ func (s *Service) acquireNewMappings(mapping *Mapping, nats map[string]Device) (
// tryNATDevice tries to acquire a port mapping for the given internal address to
// the given external port. If external port is 0, picks a pseudo-random port.
func (s *Service) tryNATDevice(natd Device, intPort, extPort int, leaseTime time.Duration) (Address, error) {
func (s *Service) tryNATDevice(natd Device, intPort, extPort int, leaseTime time.Duration, stop chan struct{}) (Address, error) {
var err error
var port int
@@ -301,6 +312,12 @@ func (s *Service) tryNATDevice(natd Device, intPort, extPort int, leaseTime time
}
for i := 0; i < 10; i++ {
select {
case <-stop:
return Address{}, nil
default:
}
// Then try up to ten random ports.
extPort = 1024 + predictableRand.Intn(65535-1024)
name := fmt.Sprintf("syncthing-%d", extPort)

View File

@@ -10,8 +10,8 @@ import (
type countingReader struct {
io.Reader
tot int64 // bytes
last int64 // unix nanos
tot int64 // bytes (atomic, must remain 64-bit aligned)
last int64 // unix nanos (atomic, must remain 64-bit aligned)
}
var (
@@ -37,8 +37,8 @@ func (c *countingReader) Last() time.Time {
type countingWriter struct {
io.Writer
tot int64 // bytes
last int64 // unix nanos
tot int64 // bytes (atomic, must remain 64-bit aligned)
last int64 // unix nanos (atomic, must remain 64-bit aligned)
}
func (c *countingWriter) Write(bs []byte) (int, error) {

View File

@@ -187,6 +187,7 @@ type rawConnection struct {
closeBox chan asyncMessage
clusterConfigBox chan *ClusterConfig
dispatcherLoopStopped chan struct{}
preventSends chan struct{}
closed chan struct{}
closeOnce sync.Once
sendCloseOnce sync.Once
@@ -240,6 +241,7 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiv
closeBox: make(chan asyncMessage),
clusterConfigBox: make(chan *ClusterConfig),
dispatcherLoopStopped: make(chan struct{}),
preventSends: make(chan struct{}),
closed: make(chan struct{}),
compression: compress,
}
@@ -662,12 +664,13 @@ func (c *rawConnection) send(msg message, done chan struct{}) bool {
select {
case c.outbox <- asyncMessage{msg, done}:
return true
case <-c.preventSends:
case <-c.closed:
if done != nil {
close(done)
}
return false
}
if done != nil {
close(done)
}
return false
}
func (c *rawConnection) writerLoop() {

View File

@@ -9,6 +9,10 @@ import (
"time"
"github.com/syncthing/syncthing/lib/relay/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
"github.com/thejerf/suture"
)
type relayClientFactory func(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient
@@ -22,8 +26,7 @@ var (
)
type RelayClient interface {
Serve()
Stop()
suture.Service
Error() error
Latency() time.Duration
String() string
@@ -39,3 +42,42 @@ func NewClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.
return factory(uri, certs, invitations, timeout), nil
}
type commonClient struct {
util.ServiceWithError
invitations chan protocol.SessionInvitation
closeInvitationsOnFinish bool
mut sync.RWMutex
}
func newCommonClient(invitations chan protocol.SessionInvitation, serve func(chan struct{}) error) commonClient {
c := commonClient{
invitations: invitations,
mut: sync.NewRWMutex(),
}
newServe := func(stop chan struct{}) error {
defer c.cleanup()
return serve(stop)
}
c.ServiceWithError = util.AsServiceWithError(newServe)
if c.invitations == nil {
c.closeInvitationsOnFinish = true
c.invitations = make(chan protocol.SessionInvitation)
}
return c
}
func (c *commonClient) cleanup() {
c.mut.Lock()
if c.closeInvitationsOnFinish {
close(c.invitations)
}
c.mut.Unlock()
}
func (c *commonClient) Invitations() chan protocol.SessionInvitation {
c.mut.RLock()
defer c.mut.RUnlock()
return c.invitations
}

View File

@@ -14,45 +14,29 @@ import (
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/relay/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
type dynamicClient struct {
pooladdr *url.URL
certs []tls.Certificate
invitations chan protocol.SessionInvitation
closeInvitationsOnFinish bool
timeout time.Duration
commonClient
pooladdr *url.URL
certs []tls.Certificate
timeout time.Duration
mut sync.RWMutex
err error
client RelayClient
stop chan struct{}
}
func newDynamicClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient {
closeInvitationsOnFinish := false
if invitations == nil {
closeInvitationsOnFinish = true
invitations = make(chan protocol.SessionInvitation)
}
return &dynamicClient{
pooladdr: uri,
certs: certs,
invitations: invitations,
closeInvitationsOnFinish: closeInvitationsOnFinish,
timeout: timeout,
mut: sync.NewRWMutex(),
c := &dynamicClient{
pooladdr: uri,
certs: certs,
timeout: timeout,
}
c.commonClient = newCommonClient(invitations, c.serve)
return c
}
func (c *dynamicClient) Serve() {
defer c.cleanup()
c.mut.Lock()
c.stop = make(chan struct{})
c.mut.Unlock()
func (c *dynamicClient) serve(stop chan struct{}) error {
uri := *c.pooladdr
// Trim off the `dynamic+` prefix
@@ -63,8 +47,7 @@ func (c *dynamicClient) Serve() {
data, err := http.Get(uri.String())
if err != nil {
l.Debugln(c, "failed to lookup dynamic relays", err)
c.setError(err)
return
return err
}
var ann dynamicAnnouncement
@@ -72,8 +55,7 @@ func (c *dynamicClient) Serve() {
data.Body.Close()
if err != nil {
l.Debugln(c, "failed to lookup dynamic relays", err)
c.setError(err)
return
return err
}
var addrs []string
@@ -87,22 +69,18 @@ func (c *dynamicClient) Serve() {
addrs = append(addrs, ruri.String())
}
for _, addr := range relayAddressesOrder(addrs) {
for _, addr := range relayAddressesOrder(addrs, stop) {
select {
case <-c.stop:
case <-stop:
l.Debugln(c, "stopping")
c.setError(nil)
return
return nil
default:
ruri, err := url.Parse(addr)
if err != nil {
l.Debugln(c, "skipping relay", addr, err)
continue
}
client, err := NewClient(ruri, c.certs, c.invitations, c.timeout)
if err != nil {
continue
}
client := newStaticClient(ruri, c.certs, c.invitations, c.timeout)
c.mut.Lock()
c.client = client
c.mut.Unlock()
@@ -115,24 +93,23 @@ func (c *dynamicClient) Serve() {
}
}
l.Debugln(c, "could not find a connectable relay")
c.setError(fmt.Errorf("could not find a connectable relay"))
return fmt.Errorf("could not find a connectable relay")
}
func (c *dynamicClient) Stop() {
c.mut.RLock()
defer c.mut.RUnlock()
close(c.stop)
if c.client == nil {
return
if c.client != nil {
c.client.Stop()
}
c.client.Stop()
c.mut.RUnlock()
c.commonClient.Stop()
}
func (c *dynamicClient) Error() error {
c.mut.RLock()
defer c.mut.RUnlock()
if c.client == nil {
return c.err
return c.commonClient.Error()
}
return c.client.Error()
}
@@ -159,28 +136,6 @@ func (c *dynamicClient) URI() *url.URL {
return c.client.URI()
}
func (c *dynamicClient) Invitations() chan protocol.SessionInvitation {
c.mut.RLock()
inv := c.invitations
c.mut.RUnlock()
return inv
}
func (c *dynamicClient) cleanup() {
c.mut.Lock()
if c.closeInvitationsOnFinish {
close(c.invitations)
c.invitations = make(chan protocol.SessionInvitation)
}
c.mut.Unlock()
}
func (c *dynamicClient) setError(err error) {
c.mut.Lock()
c.err = err
c.mut.Unlock()
}
// This is the announcement received from the relay server;
// {"relays": [{"url": "relay://10.20.30.40:5060"}, ...]}
type dynamicAnnouncement struct {
@@ -193,7 +148,7 @@ type dynamicAnnouncement struct {
// the closest 50ms, and puts them in buckets of 50ms latency ranges. Then
// shuffles each bucket, and returns all addresses starting with the ones from
// the lowest latency bucket, ending with the highest latency buceket.
func relayAddressesOrder(input []string) []string {
func relayAddressesOrder(input []string, stop chan struct{}) []string {
buckets := make(map[int][]string)
for _, relay := range input {
@@ -205,6 +160,12 @@ func relayAddressesOrder(input []string) []string {
id := int(latency/time.Millisecond) / 50
buckets[id] = append(buckets[id], relay)
select {
case <-stop:
return nil
default:
}
}
var ids []int
@@ -215,8 +176,7 @@ func relayAddressesOrder(input []string) []string {
sort.Ints(ids)
addresses := make([]string, len(input))
addresses := make([]string, 0, len(input))
for _, id := range ids {
addresses = append(addresses, buckets[id]...)
}

View File

@@ -12,88 +12,54 @@ import (
"github.com/syncthing/syncthing/lib/dialer"
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/relay/protocol"
"github.com/syncthing/syncthing/lib/sync"
)
type staticClient struct {
uri *url.URL
invitations chan protocol.SessionInvitation
commonClient
closeInvitationsOnFinish bool
uri *url.URL
config *tls.Config
messageTimeout time.Duration
connectTimeout time.Duration
stop chan struct{}
stopped chan struct{}
stopMut sync.RWMutex
conn *tls.Conn
mut sync.RWMutex
err error
connected bool
latency time.Duration
}
func newStaticClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient {
closeInvitationsOnFinish := false
if invitations == nil {
closeInvitationsOnFinish = true
invitations = make(chan protocol.SessionInvitation)
}
stopped := make(chan struct{})
close(stopped) // not yet started, don't block on Stop()
return &staticClient{
uri: uri,
invitations: invitations,
closeInvitationsOnFinish: closeInvitationsOnFinish,
c := &staticClient{
uri: uri,
config: configForCerts(certs),
messageTimeout: time.Minute * 2,
connectTimeout: timeout,
stop: make(chan struct{}),
stopped: stopped,
stopMut: sync.NewRWMutex(),
mut: sync.NewRWMutex(),
}
c.commonClient = newCommonClient(invitations, c.serve)
return c
}
func (c *staticClient) Serve() {
defer c.cleanup()
c.stopMut.Lock()
c.stop = make(chan struct{})
c.stopped = make(chan struct{})
c.stopMut.Unlock()
defer close(c.stopped)
func (c *staticClient) serve(stop chan struct{}) error {
if err := c.connect(); err != nil {
l.Infof("Could not connect to relay %s: %s", c.uri, err)
c.setError(err)
return
return err
}
l.Debugln(c, "connected", c.conn.RemoteAddr())
defer c.disconnect()
if err := c.join(); err != nil {
c.conn.Close()
l.Infof("Could not join relay %s: %s", c.uri, err)
c.setError(err)
return
return err
}
if err := c.conn.SetDeadline(time.Time{}); err != nil {
c.conn.Close()
l.Infoln("Relay set deadline:", err)
c.setError(err)
return
return err
}
l.Infof("Joined relay %s://%s", c.uri.Scheme, c.uri.Host)
@@ -106,12 +72,10 @@ func (c *staticClient) Serve() {
messages := make(chan interface{})
errors := make(chan error, 1)
go messageReader(c.conn, messages, errors)
go messageReader(c.conn, messages, errors, stop)
timeout := time.NewTimer(c.messageTimeout)
c.stopMut.RLock()
defer c.stopMut.RUnlock()
for {
select {
case message := <-messages:
@@ -122,11 +86,9 @@ func (c *staticClient) Serve() {
case protocol.Ping:
if err := protocol.WriteMessage(c.conn, protocol.Pong{}); err != nil {
l.Infoln("Relay write:", err)
c.setError(err)
c.disconnect()
} else {
l.Debugln(c, "sent pong")
return err
}
l.Debugln(c, "sent pong")
case protocol.SessionInvitation:
ip := net.IP(msg.Address)
@@ -137,52 +99,28 @@ func (c *staticClient) Serve() {
case protocol.RelayFull:
l.Infof("Disconnected from relay %s due to it becoming full.", c.uri)
c.setError(fmt.Errorf("Relay full"))
c.disconnect()
return fmt.Errorf("relay full")
default:
l.Infoln("Relay: protocol error: unexpected message %v", msg)
c.setError(fmt.Errorf("protocol error: unexpected message %v", msg))
c.disconnect()
return fmt.Errorf("protocol error: unexpected message %v", msg)
}
case <-c.stop:
case <-stop:
l.Debugln(c, "stopping")
c.setError(nil)
c.disconnect()
return nil
// We always exit via this branch of the select, to make sure the
// the reader routine exits.
case err := <-errors:
close(errors)
close(messages)
c.mut.Lock()
if c.connected {
c.conn.Close()
c.connected = false
l.Infof("Disconnecting from relay %s due to error: %s", c.uri, err)
c.err = err
} else {
c.err = nil
}
c.mut.Unlock()
return
l.Infof("Disconnecting from relay %s due to error: %s", c.uri, err)
return err
case <-timeout.C:
l.Debugln(c, "timed out")
c.disconnect()
c.setError(fmt.Errorf("timed out"))
return fmt.Errorf("timed out")
}
}
}
func (c *staticClient) Stop() {
c.stopMut.RLock()
close(c.stop)
<-c.stopped
c.stopMut.RUnlock()
}
func (c *staticClient) StatusOK() bool {
c.mut.RLock()
con := c.connected
@@ -205,25 +143,9 @@ func (c *staticClient) URI() *url.URL {
return c.uri
}
func (c *staticClient) Invitations() chan protocol.SessionInvitation {
c.mut.RLock()
inv := c.invitations
c.mut.RUnlock()
return inv
}
func (c *staticClient) cleanup() {
c.mut.Lock()
if c.closeInvitationsOnFinish {
close(c.invitations)
c.invitations = make(chan protocol.SessionInvitation)
}
c.mut.Unlock()
}
func (c *staticClient) connect() error {
if c.uri.Scheme != "relay" {
return fmt.Errorf("Unsupported relay schema: %v", c.uri.Scheme)
return fmt.Errorf("unsupported relay scheme: %v", c.uri.Scheme)
}
t0 := time.Now()
@@ -261,19 +183,6 @@ func (c *staticClient) disconnect() {
c.conn.Close()
}
func (c *staticClient) setError(err error) {
c.mut.Lock()
c.err = err
c.mut.Unlock()
}
func (c *staticClient) Error() error {
c.mut.RLock()
err := c.err
c.mut.RUnlock()
return err
}
func (c *staticClient) join() error {
if err := protocol.WriteMessage(c.conn, protocol.JoinRelayRequest{}); err != nil {
return err
@@ -287,7 +196,7 @@ func (c *staticClient) join() error {
switch msg := message.(type) {
case protocol.Response:
if msg.Code != 0 {
return fmt.Errorf("Incorrect response code %d: %s", msg.Code, msg.Message)
return fmt.Errorf("incorrect response code %d: %s", msg.Code, msg.Message)
}
case protocol.RelayFull:
@@ -332,13 +241,17 @@ func performHandshakeAndValidation(conn *tls.Conn, uri *url.URL) error {
return nil
}
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) {
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error, stop chan struct{}) {
for {
msg, err := protocol.ReadMessage(conn)
if err != nil {
errors <- err
return
}
messages <- msg
select {
case messages <- msg:
case <-stop:
return
}
}
}

View File

@@ -537,7 +537,7 @@ func (w *walker) handleError(ctx context.Context, context, path string, err erro
// A byteCounter gets bytes added to it via Update() and then provides the
// Total() and one minute moving average Rate() in bytes per second.
type byteCounter struct {
total int64
total int64 // atomic, must remain 64-bit aligned
metrics.EWMA
stop chan struct{}
}

View File

@@ -13,7 +13,10 @@ import (
"github.com/AudriusButkevicius/pfilter"
"github.com/ccding/go-stun/stun"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/util"
)
const stunRetryInterval = 5 * time.Minute
@@ -36,7 +39,7 @@ const (
)
type writeTrackingPacketConn struct {
lastWrite int64
lastWrite int64 // atomic, must remain 64-bit aligned
net.PacketConn
}
@@ -56,6 +59,8 @@ type Subscriber interface {
}
type Service struct {
suture.Service
name string
cfg config.Wrapper
subscriber Subscriber
@@ -66,8 +71,6 @@ type Service struct {
natType NATType
addr *Host
stop chan struct{}
}
func New(cfg config.Wrapper, subscriber Subscriber, conn net.PacketConn) (*Service, net.PacketConn) {
@@ -88,7 +91,7 @@ func New(cfg config.Wrapper, subscriber Subscriber, conn net.PacketConn) (*Servi
client.SetSoftwareName("") // Explicitly unset this, seems to freak some servers out.
// Return the service and the other conn to the client
return &Service{
s := &Service{
name: "Stun@" + conn.LocalAddr().Network() + "://" + conn.LocalAddr().String(),
cfg: cfg,
@@ -100,16 +103,17 @@ func New(cfg config.Wrapper, subscriber Subscriber, conn net.PacketConn) (*Servi
natType: NATUnknown,
addr: nil,
stop: make(chan struct{}),
}, otherDataConn
}
s.Service = util.AsService(s.serve)
return s, otherDataConn
}
func (s *Service) Stop() {
close(s.stop)
_ = s.stunConn.Close()
s.Service.Stop()
}
func (s *Service) Serve() {
func (s *Service) serve(stop chan struct{}) {
for {
disabled:
s.setNATType(NATUnknown)
@@ -117,7 +121,7 @@ func (s *Service) Serve() {
if s.cfg.Options().IsStunDisabled() {
select {
case <-s.stop:
case <-stop:
return
case <-time.After(time.Second):
continue
@@ -130,12 +134,12 @@ func (s *Service) Serve() {
// This blocks until we hit an exit condition or there are issues with the STUN server.
// This returns a boolean signifying if a different STUN server should be tried (oppose to the whole thing
// shutting down and this winding itself down.
if !s.runStunForServer(addr) {
if !s.runStunForServer(addr, stop) {
// Check exit conditions.
// Have we been asked to stop?
select {
case <-s.stop:
case <-stop:
return
default:
}
@@ -159,11 +163,15 @@ func (s *Service) Serve() {
// We failed to contact all provided stun servers or the nat is not punchable.
// Chillout for a while.
time.Sleep(stunRetryInterval)
select {
case <-time.After(stunRetryInterval):
case <-stop:
return
}
}
}
func (s *Service) runStunForServer(addr string) (tryNext bool) {
func (s *Service) runStunForServer(addr string, stop chan struct{}) (tryNext bool) {
l.Debugf("Running stun for %s via %s", s, addr)
// Resolve the address, so that in case the server advertises two
@@ -201,10 +209,10 @@ func (s *Service) runStunForServer(addr string) (tryNext bool) {
return false
}
return s.stunKeepAlive(addr, extAddr)
return s.stunKeepAlive(addr, extAddr, stop)
}
func (s *Service) stunKeepAlive(addr string, extAddr *Host) (tryNext bool) {
func (s *Service) stunKeepAlive(addr string, extAddr *Host, stop chan struct{}) (tryNext bool) {
var err error
nextSleep := time.Duration(s.cfg.Options().StunKeepaliveStartS) * time.Second
@@ -247,7 +255,7 @@ func (s *Service) stunKeepAlive(addr string, extAddr *Host) (tryNext bool) {
select {
case <-time.After(sleepFor):
case <-s.stop:
case <-stop:
l.Debugf("%s stopping, aborting stun", s)
return false
}

View File

@@ -0,0 +1,54 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing
import (
"encoding/json"
"io"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/util"
)
// The auditService subscribes to events and writes these in JSON format, one
// event per line, to the specified writer.
type auditService struct {
suture.Service
w io.Writer // audit destination
sub *events.Subscription
}
func newAuditService(w io.Writer) *auditService {
s := &auditService{
w: w,
sub: events.Default.Subscribe(events.AllEvents),
}
s.Service = util.AsService(s.serve)
return s
}
// serve runs the audit service.
func (s *auditService) serve(stop chan struct{}) {
enc := json.NewEncoder(s.w)
for {
select {
case ev := <-s.sub.C():
enc.Encode(ev)
case <-stop:
return
}
}
}
// Stop stops the audit service.
func (s *auditService) Stop() {
s.Service.Stop()
events.Default.Unsubscribe(s.sub)
}

View File

@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
package syncthing
import (
"bytes"
@@ -17,13 +17,12 @@ import (
func TestAuditService(t *testing.T) {
buf := new(bytes.Buffer)
service := newAuditService(buf)
// Event sent before start, will not be logged
// Event sent before construction, will not be logged
events.Default.Log(events.ConfigSaved, "the first event")
service := newAuditService(buf)
go service.Serve()
service.WaitForStart()
// Event that should end up in the audit log
events.Default.Log(events.ConfigSaved, "the second event")
@@ -32,7 +31,6 @@ func TestAuditService(t *testing.T) {
time.Sleep(10 * time.Millisecond)
service.Stop()
service.WaitForStop()
// This event should not be logged, since we have stopped.
events.Default.Log(events.ConfigSaved, "the third event")

View File

@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
package syncthing
import (
"math"

View File

@@ -6,7 +6,7 @@
//+build solaris
package main
package syncthing
import (
"encoding/binary"

View File

@@ -6,7 +6,7 @@
//+build !windows,!solaris
package main
package syncthing
import "syscall"
import "time"

View File

@@ -6,7 +6,7 @@
//+build windows
package main
package syncthing
import "syscall"
import "time"

22
lib/syncthing/debug.go Normal file
View File

@@ -0,0 +1,22 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing
import (
"os"
"strings"
"github.com/syncthing/syncthing/lib/logger"
)
var (
l = logger.DefaultLogger.NewFacility("app", "Main run facility")
)
func init() {
l.SetDebug("app", strings.Contains(os.Getenv("STTRACE"), "app") || os.Getenv("STTRACE") == "all")
}

View File

@@ -6,7 +6,7 @@
// +build !windows
package main
package syncthing
import (
"os"

View File

@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
package syncthing
import "syscall"

482
lib/syncthing/syncthing.go Normal file
View File

@@ -0,0 +1,482 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package syncthing
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"runtime"
"strings"
"sync"
"time"
"github.com/syncthing/syncthing/lib/api"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/connections"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/locations"
"github.com/syncthing/syncthing/lib/logger"
"github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syncthing/syncthing/lib/ur"
"github.com/thejerf/suture"
)
const (
bepProtocolName = "bep/1.0"
tlsDefaultCommonName = "syncthing"
maxSystemErrors = 5
initialSystemLog = 10
maxSystemLog = 250
)
type ExitStatus int
const (
ExitSuccess ExitStatus = 0
ExitError ExitStatus = 1
ExitRestart ExitStatus = 3
ExitUpgrade ExitStatus = 4
)
type Options struct {
AssetDir string
AuditWriter io.Writer
DeadlockTimeoutS int
NoUpgrade bool
ProfilerURL string
ResetDeltaIdxs bool
Verbose bool
}
type App struct {
myID protocol.DeviceID
mainService *suture.Supervisor
cfg config.Wrapper
ll *db.Lowlevel
cert tls.Certificate
opts Options
exitStatus ExitStatus
err error
startOnce sync.Once
stop chan struct{}
stopped chan struct{}
}
func New(cfg config.Wrapper, ll *db.Lowlevel, cert tls.Certificate, opts Options) *App {
return &App{
cfg: cfg,
ll: ll,
opts: opts,
cert: cert,
stop: make(chan struct{}),
stopped: make(chan struct{}),
}
}
// Run does the same as start, but then does not return until the app stops. It
// is equivalent to calling Start and then Wait.
func (a *App) Run() ExitStatus {
a.Start()
return a.Wait()
}
// Start executes the app and returns once all the startup operations are done,
// e.g. the API is ready for use.
func (a *App) Start() {
a.startOnce.Do(func() {
if err := a.startup(); err != nil {
close(a.stop)
a.exitStatus = ExitError
a.err = err
close(a.stopped)
return
}
go a.run()
})
}
func (a *App) startup() error {
// Create a main service manager. We'll add things to this as we go along.
// We want any logging it does to go through our log system.
a.mainService = suture.New("main", suture.Spec{
Log: func(line string) {
l.Debugln(line)
},
PassThroughPanics: true,
})
a.mainService.ServeBackground()
if a.opts.AuditWriter != nil {
a.mainService.Add(newAuditService(a.opts.AuditWriter))
}
if a.opts.Verbose {
a.mainService.Add(newVerboseService())
}
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
// Event subscription for the API; must start early to catch the early
// events. The LocalChangeDetected event might overwhelm the event
// receiver in some situations so we will not subscribe to it here.
defaultSub := events.NewBufferedSubscription(events.Default.Subscribe(api.DefaultEventMask), api.EventSubBufferSize)
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(api.DiskEventMask), api.EventSubBufferSize)
// Attempt to increase the limit on number of open files to the maximum
// allowed, in case we have many peers. We don't really care enough to
// report the error if there is one.
osutil.MaximizeOpenFileLimit()
// Figure out our device ID, set it as the log prefix and log it.
a.myID = protocol.NewDeviceID(a.cert.Certificate[0])
l.SetPrefix(fmt.Sprintf("[%s] ", a.myID.String()[:5]))
l.Infoln("My ID:", a.myID)
// Select SHA256 implementation and report. Affected by the
// STHASHING environment variable.
sha256.SelectAlgo()
sha256.Report()
// Emit the Starting event, now that we know who we are.
events.Default.Log(events.Starting, map[string]string{
"home": locations.GetBaseDir(locations.ConfigBaseDir),
"myID": a.myID.String(),
})
if err := checkShortIDs(a.cfg); err != nil {
l.Warnln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one of the following:\n ", err)
return err
}
if len(a.opts.ProfilerURL) > 0 {
go func() {
l.Debugln("Starting profiler on", a.opts.ProfilerURL)
runtime.SetBlockProfileRate(1)
err := http.ListenAndServe(a.opts.ProfilerURL, nil)
if err != nil {
l.Warnln(err)
return
}
}()
}
perf := ur.CpuBench(3, 150*time.Millisecond, true)
l.Infof("Hashing performance is %.02f MB/s", perf)
if err := db.UpdateSchema(a.ll); err != nil {
l.Warnln("Database schema:", err)
return err
}
if a.opts.ResetDeltaIdxs {
l.Infoln("Reinitializing delta index IDs")
db.DropDeltaIndexIDs(a.ll)
}
protectedFiles := []string{
locations.Get(locations.Database),
locations.Get(locations.ConfigFile),
locations.Get(locations.CertFile),
locations.Get(locations.KeyFile),
}
// Remove database entries for folders that no longer exist in the config
folders := a.cfg.Folders()
for _, folder := range a.ll.ListFolders() {
if _, ok := folders[folder]; !ok {
l.Infof("Cleaning data for dropped folder %q", folder)
db.DropFolder(a.ll, folder)
}
}
// Grab the previously running version string from the database.
miscDB := db.NewMiscDataNamespace(a.ll)
prevVersion, _ := miscDB.String("prevVersion")
// Strip away prerelease/beta stuff and just compare the release
// numbers. 0.14.44 to 0.14.45-banana is an upgrade, 0.14.45-banana to
// 0.14.45-pineapple is not.
prevParts := strings.Split(prevVersion, "-")
curParts := strings.Split(build.Version, "-")
if prevParts[0] != curParts[0] {
if prevVersion != "" {
l.Infoln("Detected upgrade from", prevVersion, "to", build.Version)
}
// Drop delta indexes in case we've changed random stuff we
// shouldn't have. We will resend our index on next connect.
db.DropDeltaIndexIDs(a.ll)
// Remember the new version.
miscDB.PutString("prevVersion", build.Version)
}
m := model.NewModel(a.cfg, a.myID, "syncthing", build.Version, a.ll, protectedFiles)
if a.opts.DeadlockTimeoutS > 0 {
m.StartDeadlockDetector(time.Duration(a.opts.DeadlockTimeoutS) * time.Second)
} else if !build.IsRelease || build.IsBeta {
m.StartDeadlockDetector(20 * time.Minute)
}
// Add and start folders
for _, folderCfg := range a.cfg.Folders() {
if folderCfg.Paused {
folderCfg.CreateRoot()
continue
}
m.AddFolder(folderCfg)
m.StartFolder(folderCfg.ID)
}
a.mainService.Add(m)
// Start discovery
cachedDiscovery := discover.NewCachingMux()
a.mainService.Add(cachedDiscovery)
// The TLS configuration is used for both the listening socket and outgoing
// connections.
tlsCfg := tlsutil.SecureDefault()
tlsCfg.Certificates = []tls.Certificate{a.cert}
tlsCfg.NextProtos = []string{bepProtocolName}
tlsCfg.ClientAuth = tls.RequestClientCert
tlsCfg.SessionTicketsDisabled = true
tlsCfg.InsecureSkipVerify = true
// Start connection management
connectionsService := connections.NewService(a.cfg, a.myID, m, tlsCfg, cachedDiscovery, bepProtocolName, tlsDefaultCommonName)
a.mainService.Add(connectionsService)
if a.cfg.Options().GlobalAnnEnabled {
for _, srv := range a.cfg.GlobalDiscoveryServers() {
l.Infoln("Using discovery server", srv)
gd, err := discover.NewGlobal(srv, a.cert, connectionsService)
if err != nil {
l.Warnln("Global discovery:", err)
continue
}
// Each global discovery server gets its results cached for five
// minutes, and is not asked again for a minute when it's returned
// unsuccessfully.
cachedDiscovery.Add(gd, 5*time.Minute, time.Minute)
}
}
if a.cfg.Options().LocalAnnEnabled {
// v4 broadcasts
bcd, err := discover.NewLocal(a.myID, fmt.Sprintf(":%d", a.cfg.Options().LocalAnnPort), connectionsService)
if err != nil {
l.Warnln("IPv4 local discovery:", err)
} else {
cachedDiscovery.Add(bcd, 0, 0)
}
// v6 multicasts
mcd, err := discover.NewLocal(a.myID, a.cfg.Options().LocalAnnMCAddr, connectionsService)
if err != nil {
l.Warnln("IPv6 local discovery:", err)
} else {
cachedDiscovery.Add(mcd, 0, 0)
}
}
// Candidate builds always run with usage reporting.
if opts := a.cfg.Options(); build.IsCandidate {
l.Infoln("Anonymous usage reporting is always enabled for candidate releases.")
if opts.URAccepted != ur.Version {
opts.URAccepted = ur.Version
a.cfg.SetOptions(opts)
a.cfg.Save()
// Unique ID will be set and config saved below if necessary.
}
}
// If we are going to do usage reporting, ensure we have a valid unique ID.
if opts := a.cfg.Options(); opts.URAccepted > 0 && opts.URUniqueID == "" {
opts.URUniqueID = rand.String(8)
a.cfg.SetOptions(opts)
a.cfg.Save()
}
usageReportingSvc := ur.New(a.cfg, m, connectionsService, a.opts.NoUpgrade)
a.mainService.Add(usageReportingSvc)
// GUI
if err := a.setupGUI(m, defaultSub, diskSub, cachedDiscovery, connectionsService, usageReportingSvc, errors, systemLog); err != nil {
l.Warnln("Failed starting API:", err)
return err
}
myDev, _ := a.cfg.Device(a.myID)
l.Infof(`My name is "%v"`, myDev.Name)
for _, device := range a.cfg.Devices() {
if device.DeviceID != a.myID {
l.Infof(`Device %s is "%v" at %v`, device.DeviceID, device.Name, device.Addresses)
}
}
if isSuperUser() {
l.Warnln("Syncthing should not run as a privileged or system user. Please consider using a normal user account.")
}
events.Default.Log(events.StartupComplete, map[string]string{
"myID": a.myID.String(),
})
if a.cfg.Options().SetLowPriority {
if err := osutil.SetLowPriority(); err != nil {
l.Warnln("Failed to lower process priority:", err)
}
}
return nil
}
func (a *App) run() {
<-a.stop
a.mainService.Stop()
done := make(chan struct{})
go func() {
a.ll.Close()
close(done)
}()
select {
case <-done:
case <-time.After(10 * time.Second):
l.Warnln("Database failed to stop within 10s")
}
l.Infoln("Exiting")
close(a.stopped)
}
// Wait blocks until the app stops running.
func (a *App) Wait() ExitStatus {
<-a.stopped
return a.exitStatus
}
// Error returns an error if one occurred while running the app. It does not wait
// for the app to stop before returning.
func (a *App) Error() error {
select {
case <-a.stopped:
return nil
default:
}
return a.err
}
// Stop stops the app and sets its exit status to given reason, unless the app
// was already stopped before. In any case it returns the effective exit status.
func (a *App) Stop(stopReason ExitStatus) ExitStatus {
select {
case <-a.stopped:
case <-a.stop:
default:
// ExitSuccess is the default value for a.exitStatus. If another status
// was already set, ignore the stop reason given as argument to Stop.
if a.exitStatus == ExitSuccess {
a.exitStatus = stopReason
}
close(a.stop)
<-a.stopped
}
return a.exitStatus
}
func (a *App) setupGUI(m model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connections.Service, urService *ur.Service, errors, systemLog logger.Recorder) error {
guiCfg := a.cfg.GUI()
if !guiCfg.Enabled {
return nil
}
if guiCfg.InsecureAdminAccess {
l.Warnln("Insecure admin access is enabled.")
}
cpu := newCPUService()
a.mainService.Add(cpu)
summaryService := model.NewFolderSummaryService(a.cfg, m, a.myID)
a.mainService.Add(summaryService)
apiSvc := api.New(a.myID, a.cfg, a.opts.AssetDir, tlsDefaultCommonName, m, defaultSub, diskSub, discoverer, connectionsService, urService, summaryService, errors, systemLog, cpu, &controller{a}, a.opts.NoUpgrade)
a.mainService.Add(apiSvc)
if err := apiSvc.WaitForStart(); err != nil {
return err
}
return nil
}
// checkShortIDs verifies that the configuration won't result in duplicate
// short ID:s; that is, that the devices in the cluster all have unique
// initial 64 bits.
func checkShortIDs(cfg config.Wrapper) error {
exists := make(map[protocol.ShortID]protocol.DeviceID)
for deviceID := range cfg.Devices() {
shortID := deviceID.Short()
if otherID, ok := exists[shortID]; ok {
return fmt.Errorf("%v in conflict with %v", deviceID, otherID)
}
exists[shortID] = deviceID
}
return nil
}
// Implements api.Controller
type controller struct{ *App }
func (e *controller) Restart() {
e.Stop(ExitRestart)
}
func (e *controller) Shutdown() {
e.Stop(ExitSuccess)
}
func (e *controller) ExitUpgrading() {
e.Stop(ExitUpgrade)
}
func LoadCertificate(certFile, keyFile string) (tls.Certificate, error) {
return tls.LoadX509KeyPair(certFile, keyFile)
}
func LoadConfig(path string, cert tls.Certificate) (config.Wrapper, error) {
return config.Load(path, protocol.NewDeviceID(cert.Certificate[0]))
}
func OpenGoleveldb(path string) (*db.Lowlevel, error) {
return db.Open(path)
}

View File

@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
package syncthing
import (
"testing"

View File

@@ -4,50 +4,42 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
package syncthing
import (
"fmt"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/util"
)
// The verbose logging service subscribes to events and prints these in
// verbose format to the console using INFO level.
type verboseService struct {
stop chan struct{} // signals time to stop
started chan struct{} // signals startup complete
suture.Service
sub *events.Subscription
}
func newVerboseService() *verboseService {
return &verboseService{
stop: make(chan struct{}),
started: make(chan struct{}),
s := &verboseService{
sub: events.Default.Subscribe(events.AllEvents),
}
s.Service = util.AsService(s.serve)
return s
}
// Serve runs the verbose logging service.
func (s *verboseService) Serve() {
sub := events.Default.Subscribe(events.AllEvents)
defer events.Default.Unsubscribe(sub)
select {
case <-s.started:
// The started channel has already been closed; do nothing.
default:
// This is the first time around. Indicate that we're ready to start
// processing events.
close(s.started)
}
// serve runs the verbose logging service.
func (s *verboseService) serve(stop chan struct{}) {
for {
select {
case ev := <-sub.C():
case ev := <-s.sub.C():
formatted := s.formatEvent(ev)
if formatted != "" {
l.Verboseln(formatted)
}
case <-s.stop:
case <-stop:
return
}
}
@@ -55,13 +47,9 @@ func (s *verboseService) Serve() {
// Stop stops the verbose logging service.
func (s *verboseService) Stop() {
close(s.stop)
}
s.Service.Stop()
events.Default.Unsubscribe(s.sub)
// WaitForStart returns once the verbose logging service is ready to receive
// events, or immediately if it's already running.
func (s *verboseService) WaitForStart() {
<-s.started
}
func (s *verboseService) formatEvent(ev events.Event) string {

View File

@@ -17,7 +17,6 @@ import (
"runtime"
"sort"
"strings"
"sync"
"time"
"github.com/syncthing/syncthing/lib/build"
@@ -28,6 +27,9 @@ import (
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/upgrade"
"github.com/syncthing/syncthing/lib/util"
"github.com/thejerf/suture"
)
// Current version number of the usage report, for acceptance purposes. If
@@ -38,14 +40,12 @@ const Version = 3
var StartTime = time.Now()
type Service struct {
suture.Service
cfg config.Wrapper
model model.Model
connectionsService connections.Service
noUpgrade bool
forceRun chan struct{}
stop chan struct{}
stopped chan struct{}
stopMut sync.RWMutex
}
func New(cfg config.Wrapper, m model.Model, connectionsService connections.Service, noUpgrade bool) *Service {
@@ -54,11 +54,9 @@ func New(cfg config.Wrapper, m model.Model, connectionsService connections.Servi
model: m,
connectionsService: connectionsService,
noUpgrade: noUpgrade,
forceRun: make(chan struct{}),
stop: make(chan struct{}),
stopped: make(chan struct{}),
forceRun: make(chan struct{}, 1), // Buffered to prevent locking
}
close(svc.stopped) // Not yet running, dont block on Stop()
svc.Service = util.AsService(svc.serve)
cfg.Subscribe(svc)
return svc
}
@@ -385,20 +383,11 @@ func (s *Service) sendUsageReport() error {
return err
}
func (s *Service) Serve() {
s.stopMut.Lock()
s.stop = make(chan struct{})
s.stopped = make(chan struct{})
s.stopMut.Unlock()
func (s *Service) serve(stop chan struct{}) {
t := time.NewTimer(time.Duration(s.cfg.Options().URInitialDelayS) * time.Second)
s.stopMut.RLock()
defer func() {
close(s.stopped)
s.stopMut.RUnlock()
}()
for {
select {
case <-s.stop:
case <-stop:
return
case <-s.forceRun:
t.Reset(0)
@@ -422,23 +411,16 @@ func (s *Service) VerifyConfiguration(from, to config.Configuration) error {
func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
if from.Options.URAccepted != to.Options.URAccepted || from.Options.URUniqueID != to.Options.URUniqueID || from.Options.URURL != to.Options.URURL {
s.stopMut.RLock()
select {
case s.forceRun <- struct{}{}:
case <-s.stop:
default:
// s.forceRun is one buffered, so even though nothing
// was sent, a run will still happen after this point.
}
s.stopMut.RUnlock()
}
return true
}
func (s *Service) Stop() {
s.stopMut.RLock()
close(s.stop)
<-s.stopped
s.stopMut.RUnlock()
}
func (*Service) String() string {
return "ur.Service"
}

View File

@@ -12,6 +12,10 @@ import (
"reflect"
"strconv"
"strings"
"github.com/syncthing/syncthing/lib/sync"
"github.com/thejerf/suture"
)
type defaultParser interface {
@@ -170,3 +174,80 @@ func Address(network, host string) string {
}
return u.String()
}
// AsService wraps the given function to implement suture.Service by calling
// that function on serve and closing the passed channel when Stop is called.
func AsService(fn func(stop chan struct{})) suture.Service {
return AsServiceWithError(func(stop chan struct{}) error {
fn(stop)
return nil
})
}
type ServiceWithError interface {
suture.Service
Error() error
SetError(error)
}
// AsServiceWithError does the same as AsService, except that it keeps track
// of an error returned by the given function.
func AsServiceWithError(fn func(stop chan struct{}) error) ServiceWithError {
s := &service{
serve: fn,
stop: make(chan struct{}),
stopped: make(chan struct{}),
mut: sync.NewMutex(),
}
close(s.stopped) // not yet started, don't block on Stop()
return s
}
type service struct {
serve func(stop chan struct{}) error
stop chan struct{}
stopped chan struct{}
err error
mut sync.Mutex
}
func (s *service) Serve() {
s.mut.Lock()
select {
case <-s.stop:
s.mut.Unlock()
return
default:
}
s.err = nil
s.stopped = make(chan struct{})
s.mut.Unlock()
var err error
defer func() {
s.mut.Lock()
s.err = err
close(s.stopped)
s.mut.Unlock()
}()
err = s.serve(s.stop)
}
func (s *service) Stop() {
s.mut.Lock()
close(s.stop)
s.mut.Unlock()
<-s.stopped
}
func (s *service) Error() error {
s.mut.Lock()
defer s.mut.Unlock()
return s.err
}
func (s *service) SetError(err error) {
s.mut.Lock()
s.err = err
s.mut.Unlock()
}

View File

@@ -77,9 +77,11 @@ func (v External) Archive(filePath string) error {
}
for i, word := range words {
if replacement, ok := context[word]; ok {
words[i] = replacement
for key, val := range context {
word = strings.Replace(word, key, val, -1)
}
words[i] = word
}
cmd := exec.Command(words[0], words[1:]...)

View File

@@ -11,8 +11,11 @@ import (
"strconv"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
)
func init() {
@@ -26,13 +29,13 @@ type Interval struct {
}
type Staggered struct {
suture.Service
cleanInterval int64
folderFs fs.Filesystem
versionsFs fs.Filesystem
interval [4]Interval
mutex sync.Mutex
stop chan struct{}
testCleanDone chan struct{}
}
@@ -61,14 +64,14 @@ func NewStaggered(folderID string, folderFs fs.Filesystem, params map[string]str
{604800, maxAge}, // next year -> 1 week between versions
},
mutex: sync.NewMutex(),
stop: make(chan struct{}),
}
s.Service = util.AsService(s.serve)
l.Debugf("instantiated %#v", s)
return s
}
func (v *Staggered) Serve() {
func (v *Staggered) serve(stop chan struct{}) {
v.clean()
if v.testCleanDone != nil {
close(v.testCleanDone)
@@ -80,16 +83,12 @@ func (v *Staggered) Serve() {
select {
case <-tck.C:
v.clean()
case <-v.stop:
case <-stop:
return
}
}
}
func (v *Staggered) Stop() {
close(v.stop)
}
func (v *Staggered) clean() {
l.Debugln("Versioner clean: Waiting for lock on", v.versionsFs)
v.mutex.Lock()

View File

@@ -11,7 +11,10 @@ import (
"strconv"
"time"
"github.com/thejerf/suture"
"github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/util"
)
func init() {
@@ -20,10 +23,10 @@ func init() {
}
type Trashcan struct {
suture.Service
folderFs fs.Filesystem
versionsFs fs.Filesystem
cleanoutDays int
stop chan struct{}
}
func NewTrashcan(folderID string, folderFs fs.Filesystem, params map[string]string) Versioner {
@@ -34,8 +37,8 @@ func NewTrashcan(folderID string, folderFs fs.Filesystem, params map[string]stri
folderFs: folderFs,
versionsFs: fsFromParams(folderFs, params),
cleanoutDays: cleanoutDays,
stop: make(chan struct{}),
}
s.Service = util.AsService(s.serve)
l.Debugf("instantiated %#v", s)
return s
@@ -49,7 +52,7 @@ func (t *Trashcan) Archive(filePath string) error {
})
}
func (t *Trashcan) Serve() {
func (t *Trashcan) serve(stop chan struct{}) {
l.Debugln(t, "starting")
defer l.Debugln(t, "stopping")
@@ -59,7 +62,7 @@ func (t *Trashcan) Serve() {
for {
select {
case <-t.stop:
case <-stop:
return
case <-timer.C:
@@ -75,10 +78,6 @@ func (t *Trashcan) Serve() {
}
}
func (t *Trashcan) Stop() {
close(t.stop)
}
func (t *Trashcan) String() string {
return fmt.Sprintf("trashcan@%p", t)
}