mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-04 11:59:12 -05:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f8c0ca932 | ||
|
|
2953fe40d1 | ||
|
|
ff0a83fe5b | ||
|
|
dc42db444b | ||
|
|
a9c221189b | ||
|
|
b2966957e0 | ||
|
|
0d30166357 | ||
|
|
d65f1fb08a | ||
|
|
622b614f31 | ||
|
|
b1ade6d0c0 | ||
|
|
46becc5338 | ||
|
|
20fac4bb80 | ||
|
|
d84b6bb822 | ||
|
|
55b63941b8 | ||
|
|
e70003737b | ||
|
|
f98c21b68e | ||
|
|
c704ba9ef9 | ||
|
|
dfad6a2aa9 | ||
|
|
fb7264a663 | ||
|
|
889814a1af | ||
|
|
694a7de59d | ||
|
|
059185b325 | ||
|
|
1e9e9cbebb | ||
|
|
cdbb32d0f0 | ||
|
|
becbb3b123 | ||
|
|
06da67e6fc | ||
|
|
2f08f8021f | ||
|
|
9d3f3847ed | ||
|
|
74c8d34805 | ||
|
|
5ec1490be0 | ||
|
|
2760d032ca | ||
|
|
813e6ddf83 | ||
|
|
6ffb95f6c8 | ||
|
|
0b5c11bf93 | ||
|
|
5aade9a4a5 | ||
|
|
9717c3d292 | ||
|
|
a365ae51c4 | ||
|
|
97222797a0 | ||
|
|
e588bb29b9 | ||
|
|
2dd9450793 | ||
|
|
3ee12464b4 | ||
|
|
59ebcea356 | ||
|
|
27d4896a13 | ||
|
|
1088eb12ea | ||
|
|
f40d219370 | ||
|
|
429cc20eb7 | ||
|
|
e85ce7c94e | ||
|
|
283c8d95e2 |
1
AUTHORS
1
AUTHORS
@@ -107,6 +107,7 @@ Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
Do not report security issues in this bug tracker. Instead, contact
|
||||
security@syncthing.net directly - see https://syncthing.net/security.html
|
||||
for more information.
|
||||
### DO NOT REPORT SECURITY ISSUES IN THIS ISSUE TRACKER
|
||||
|
||||
If your issue is a support request ("How do I get my devices to connect?"
|
||||
or similar), please use the support forum at https://forum.syncthing.net/
|
||||
where a large number of helpful people hang out. This issue tracker is for
|
||||
reporting bugs or feature requests directly to the developers.
|
||||
Instead, contact security@syncthing.net directly - see
|
||||
https://syncthing.net/security.html for more information.
|
||||
|
||||
### DO NOT POST SUPPORT REQUESTS OR GENERAL QUESTIONS IN THIS ISSUE TRACKER
|
||||
|
||||
Please use the forum at https://forum.syncthing.net/ where a large number of
|
||||
helpful people hang out. This issue tracker is for reporting bugs or feature
|
||||
requests directly to the developers. Worst case you might get a short
|
||||
"that's a bug, please report it on GitHub" response on the forum, in which
|
||||
case we thank you for your patience and following our advice. :)
|
||||
|
||||
### Please do post actual bug reports and feature requests.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
1
NICKS
1
NICKS
@@ -129,6 +129,7 @@ Stefan-Code <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
timhowes <timhowes@berkeley.edu>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tobiastom <t.tom@succont.de>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
|
||||
30
build.go
30
build.go
@@ -45,6 +45,7 @@ var (
|
||||
noBuildGopath bool
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
)
|
||||
|
||||
type target struct {
|
||||
@@ -203,7 +204,7 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
if gopath() == "" {
|
||||
if gopath := gopath(); gopath == "" {
|
||||
gopath, err := temporaryBuildDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -216,6 +217,20 @@ func main() {
|
||||
}
|
||||
os.Setenv("GOPATH", gopath)
|
||||
log.Println("GOPATH is", gopath)
|
||||
} else {
|
||||
inside := false
|
||||
wd, _ := os.Getwd()
|
||||
wd, _ = filepath.EvalSymlinks(wd)
|
||||
for _, p := range filepath.SplitList(gopath) {
|
||||
p, _ = filepath.EvalSymlinks(p)
|
||||
if filepath.Join(p, "src/github.com/syncthing/syncthing") == wd {
|
||||
inside = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !inside {
|
||||
fmt.Println("You seem to have GOPATH set but the Syncthing source not placed correctly within it, which may cause problems.")
|
||||
}
|
||||
}
|
||||
|
||||
// Set path to $GOPATH/bin:$PATH so that we can for sure find tools we
|
||||
@@ -342,6 +357,7 @@ func parseFlags() {
|
||||
flag.BoolVar(&noBuildGopath, "no-build-gopath", noBuildGopath, "Don't build GOPATH, assume it's OK")
|
||||
flag.StringVar(&extraTags, "tags", extraTags, "Extra tags, space separated")
|
||||
flag.StringVar(&installSuffix, "installsuffix", installSuffix, "Install suffix, optional")
|
||||
flag.StringVar(&pkgdir, "pkgdir", "", "Set -pkgdir parameter for `go build`")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -359,9 +375,9 @@ func setup() {
|
||||
"github.com/tsenart/deadcode",
|
||||
"golang.org/x/net/html",
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"honnef.co/go/simple/cmd/gosimple",
|
||||
"honnef.co/go/staticcheck/cmd/staticcheck",
|
||||
"honnef.co/go/unused/cmd/unused",
|
||||
"honnef.co/go/tools/cmd/gosimple",
|
||||
"honnef.co/go/tools/cmd/staticcheck",
|
||||
"honnef.co/go/tools/cmd/unused",
|
||||
}
|
||||
for _, pkg := range packages {
|
||||
fmt.Println(pkg)
|
||||
@@ -404,6 +420,9 @@ func install(target target, tags []string) {
|
||||
}
|
||||
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
|
||||
args := []string{"install", "-v", "-ldflags", ldflags()}
|
||||
if pkgdir != "" {
|
||||
args = append(args, "-pkgdir", pkgdir)
|
||||
}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
@@ -427,6 +446,9 @@ func build(target target, tags []string) {
|
||||
|
||||
rmr(target.BinaryName())
|
||||
args := []string{"build", "-i", "-v", "-ldflags", ldflags()}
|
||||
if pkgdir != "" {
|
||||
args = append(args, "-pkgdir", pkgdir)
|
||||
}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
|
||||
@@ -100,6 +100,7 @@ type modelIntf interface {
|
||||
CurrentSequence(folder string) (int64, bool)
|
||||
RemoteSequence(folder string) (int64, bool)
|
||||
State(folder string) (string, time.Time, error)
|
||||
UsageReportingStats(version int) map[string]interface{}
|
||||
}
|
||||
|
||||
type configIntf interface {
|
||||
@@ -119,6 +120,7 @@ type configIntf interface {
|
||||
|
||||
type connectionsIntf interface {
|
||||
Status() map[string]interface{}
|
||||
NATType() string
|
||||
}
|
||||
|
||||
type rater interface {
|
||||
@@ -332,7 +334,7 @@ func (s *apiService) Serve() {
|
||||
}
|
||||
|
||||
// Add the CORS handling
|
||||
handler = corsMiddleware(handler)
|
||||
handler = corsMiddleware(handler, guiCfg.InsecureAllowFrameLoading)
|
||||
|
||||
if addressIsLocalhost(guiCfg.Address()) && !guiCfg.InsecureSkipHostCheck {
|
||||
// Verify source host
|
||||
@@ -459,7 +461,7 @@ func debugMiddleware(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func corsMiddleware(next http.Handler) http.Handler {
|
||||
func corsMiddleware(next http.Handler, allowFrameLoading bool) http.Handler {
|
||||
// Handle CORS headers and CORS OPTIONS request.
|
||||
// CORS OPTIONS request are typically sent by browser during AJAX preflight
|
||||
// when the browser initiate a POST request.
|
||||
@@ -486,6 +488,27 @@ func corsMiddleware(next http.Handler) http.Handler {
|
||||
return
|
||||
}
|
||||
|
||||
// Other security related headers that should be present.
|
||||
// https://www.owasp.org/index.php/Security_Headers
|
||||
|
||||
if !allowFrameLoading {
|
||||
// We don't want to be rendered in an <iframe>,
|
||||
// <frame> or <object>. (Unless we do it ourselves.
|
||||
// This is also an escape hatch for people who serve
|
||||
// Syncthing GUI as part of their own website
|
||||
// through a proxy, so they don't need to set the
|
||||
// allowFrameLoading bool.)
|
||||
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
|
||||
}
|
||||
|
||||
// If the browser senses an XSS attack it's allowed to take
|
||||
// action. (How this would not always be the default I
|
||||
// don't fully understand.)
|
||||
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
||||
|
||||
// Our content type headers are correct. Don't guess.
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
|
||||
// For everything else, pass to the next handler
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
@@ -779,18 +802,6 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// Fixup usage reporting settings
|
||||
|
||||
if curAcc := s.cfg.Options().URAccepted; to.Options.URAccepted > curAcc {
|
||||
// UR was enabled
|
||||
to.Options.URAccepted = usageReportVersion
|
||||
to.Options.URUniqueID = rand.String(8)
|
||||
} else if to.Options.URAccepted < curAcc {
|
||||
// UR was disabled
|
||||
to.Options.URAccepted = -1
|
||||
to.Options.URUniqueID = ""
|
||||
}
|
||||
|
||||
// Activate and save
|
||||
|
||||
if err := s.cfg.Replace(to); err != nil {
|
||||
@@ -882,6 +893,7 @@ func (s *apiService) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
// gives us percent
|
||||
res["cpuPercent"] = s.cpu.Rate() / 10 / float64(runtime.NumCPU())
|
||||
res["pathSeparator"] = string(filepath.Separator)
|
||||
res["urVersionMax"] = usageReportVersion
|
||||
res["uptime"] = int(time.Since(startTime).Seconds())
|
||||
res["startTime"] = startTime
|
||||
|
||||
@@ -960,7 +972,11 @@ func (s *apiService) getSystemDiscovery(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
func (s *apiService) getReport(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, reportData(s.cfg, s.model))
|
||||
version := usageReportVersion
|
||||
if val, _ := strconv.Atoi(r.URL.Query().Get("version")); val > 0 {
|
||||
version = val
|
||||
}
|
||||
sendJSON(w, reportData(s.cfg, s.model, s.connectionsService, version))
|
||||
}
|
||||
|
||||
func (s *apiService) getRandomString(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -141,17 +141,17 @@ func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *staticsServer) mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
// to the system. All our files are UTF-8.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
return "text/html; charset=utf-8"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
return "text/css; charset=utf-8"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
return "application/javascript; charset=utf-8"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
return "application/json; charset=utf-8"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
@@ -159,7 +159,7 @@ func (s *staticsServer) mimeTypeForFile(file string) string {
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
return "image/svg+xml; charset=utf-8"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
|
||||
@@ -236,6 +236,7 @@ type RuntimeOptions struct {
|
||||
resetDeltaIdxs bool
|
||||
showVersion bool
|
||||
showPaths bool
|
||||
showDeviceId bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
@@ -301,6 +302,7 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&options.showVersion, "version", false, "Show version")
|
||||
flag.BoolVar(&options.showPaths, "paths", false, "Show configuration paths")
|
||||
flag.BoolVar(&options.showDeviceId, "device-id", false, "Show the device ID")
|
||||
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&options.verbose, "verbose", false, "Print verbose log output")
|
||||
@@ -390,6 +392,17 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if options.showDeviceId {
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
if err != nil {
|
||||
l.Fatalln("Error reading device ID:", err)
|
||||
}
|
||||
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
fmt.Println(myID)
|
||||
return
|
||||
}
|
||||
|
||||
if options.browserOnly {
|
||||
openGUI()
|
||||
return
|
||||
@@ -436,7 +449,7 @@ func main() {
|
||||
}
|
||||
|
||||
func openGUI() {
|
||||
cfg, _ := loadConfig()
|
||||
cfg, _ := loadOrDefaultConfig()
|
||||
if cfg.GUI().Enabled {
|
||||
openURL(cfg.GUI().URL())
|
||||
} else {
|
||||
@@ -475,9 +488,7 @@ func generate(generateDir string) {
|
||||
l.Warnln("Config exists; will not overwrite.")
|
||||
return
|
||||
}
|
||||
var myName, _ = os.Hostname()
|
||||
var newCfg = defaultConfig(myName)
|
||||
var cfg = config.Wrap(cfgFile, newCfg)
|
||||
var cfg = defaultConfig(cfgFile)
|
||||
err = cfg.Save()
|
||||
if err != nil {
|
||||
l.Warnln("Failed to save config", err)
|
||||
@@ -507,7 +518,7 @@ func debugFacilities() string {
|
||||
}
|
||||
|
||||
func checkUpgrade() upgrade.Release {
|
||||
cfg, _ := loadConfig()
|
||||
cfg, _ := loadOrDefaultConfig()
|
||||
opts := cfg.Options()
|
||||
release, err := upgrade.LatestRelease(opts.ReleasesURL, Version, opts.UpgradeToPreReleases)
|
||||
if err != nil {
|
||||
@@ -545,7 +556,7 @@ func performUpgrade(release upgrade.Release) {
|
||||
}
|
||||
|
||||
func upgradeViaRest() error {
|
||||
cfg, _ := loadConfig()
|
||||
cfg, _ := loadOrDefaultConfig()
|
||||
u, err := url.Parse(cfg.GUI().URL())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -650,7 +661,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
"myID": myID.String(),
|
||||
})
|
||||
|
||||
cfg := loadOrCreateConfig()
|
||||
cfg := loadConfigAtStartup()
|
||||
|
||||
if err := checkShortIDs(cfg); err != nil {
|
||||
l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one of the following:\n ", err)
|
||||
@@ -869,24 +880,14 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// Unique ID will be set and config saved below if necessary.
|
||||
}
|
||||
|
||||
if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
|
||||
l.Infoln("Anonymous usage report has changed; revoking acceptance")
|
||||
opts.URAccepted = 0
|
||||
opts.URUniqueID = ""
|
||||
cfg.SetOptions(opts)
|
||||
}
|
||||
|
||||
if opts.URAccepted >= usageReportVersion && opts.URUniqueID == "" {
|
||||
// Generate and save a new unique ID if it is missing.
|
||||
if opts.URUniqueID == "" {
|
||||
opts.URUniqueID = rand.String(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
// The usageReportingManager registers itself to listen to configuration
|
||||
// changes, and there's nothing more we need to tell it from the outside.
|
||||
// Hence we don't keep the returned pointer.
|
||||
newUsageReportingManager(cfg, m)
|
||||
usageReportingSvc := newUsageReportingService(cfg, m, connectionsService)
|
||||
mainService.Add(usageReportingSvc)
|
||||
|
||||
if opts.RestartOnWakeup {
|
||||
go standbyMonitor()
|
||||
@@ -962,26 +963,28 @@ func setupSignalHandling() {
|
||||
}()
|
||||
}
|
||||
|
||||
func loadConfig() (*config.Wrapper, error) {
|
||||
func loadOrDefaultConfig() (*config.Wrapper, error) {
|
||||
cfgFile := locations[locConfigFile]
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
|
||||
if err != nil {
|
||||
myName, _ := os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
cfg = defaultConfig(cfgFile)
|
||||
}
|
||||
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
func loadOrCreateConfig() *config.Wrapper {
|
||||
cfg, err := loadConfig()
|
||||
func loadConfigAtStartup() *config.Wrapper {
|
||||
cfgFile := locations[locConfigFile]
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
if os.IsNotExist(err) {
|
||||
cfg = defaultConfig(cfgFile)
|
||||
cfg.Save()
|
||||
l.Infof("Defaults saved. Edit %s to taste or use the GUI\n", cfg.ConfigPath())
|
||||
l.Infof("Default config saved. Edit %s to taste or use the GUI\n", cfg.ConfigPath())
|
||||
} else if err == io.EOF {
|
||||
l.Fatalln("Failed to load config: unexpected end of file. Truncated or empty configuration?")
|
||||
} else if err != nil {
|
||||
l.Fatalln("Config:", err)
|
||||
l.Fatalln("Failed to load config:", err)
|
||||
}
|
||||
|
||||
if cfg.RawCopy().OriginalVersion != config.CurrentVersion {
|
||||
@@ -1084,7 +1087,9 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
|
||||
}
|
||||
}
|
||||
|
||||
func defaultConfig(myName string) config.Configuration {
|
||||
func defaultConfig(cfgFile string) *config.Wrapper {
|
||||
myName, _ := os.Hostname()
|
||||
|
||||
var defaultFolder config.FolderConfiguration
|
||||
|
||||
if !noDefaultFolder {
|
||||
@@ -1092,6 +1097,7 @@ func defaultConfig(myName string) config.Configuration {
|
||||
defaultFolder = config.NewFolderConfiguration("default", fs.FilesystemTypeBasic, locations[locDefFolder])
|
||||
defaultFolder.Label = "Default Folder"
|
||||
defaultFolder.RescanIntervalS = 60
|
||||
defaultFolder.FSWatcherDelayS = 10
|
||||
defaultFolder.MinDiskFree = config.Size{Value: 1, Unit: "%"}
|
||||
defaultFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: myID}}
|
||||
defaultFolder.AutoNormalize = true
|
||||
@@ -1128,7 +1134,7 @@ func defaultConfig(myName string) config.Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
return newCfg
|
||||
return config.Wrap(cfgFile, newCfg)
|
||||
}
|
||||
|
||||
func resetDB() error {
|
||||
|
||||
@@ -11,3 +11,7 @@ type mockedConnections struct{}
|
||||
func (m *mockedConnections) Status() map[string]interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedConnections) NATType() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -114,3 +114,7 @@ func (m *mockedModel) RemoteSequence(folder string) (int64, bool) {
|
||||
func (m *mockedModel) State(folder string) (string, time.Time, error) {
|
||||
return "", time.Time{}, nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) UsageReportingStats(version int) map[string]interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
@@ -20,71 +20,25 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/connections"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
// Current version number of the usage report, for acceptance purposes. If
|
||||
// fields are added or changed this integer must be incremented so that users
|
||||
// are prompted for acceptance of the new report.
|
||||
const usageReportVersion = 2
|
||||
|
||||
type usageReportingManager struct {
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
sup *suture.Supervisor
|
||||
}
|
||||
|
||||
func newUsageReportingManager(cfg *config.Wrapper, m *model.Model) *usageReportingManager {
|
||||
mgr := &usageReportingManager{
|
||||
cfg: cfg,
|
||||
model: m,
|
||||
}
|
||||
|
||||
// Start UR if it's enabled.
|
||||
mgr.CommitConfiguration(config.Configuration{}, cfg.RawCopy())
|
||||
|
||||
// Listen to future config changes so that we can start and stop as
|
||||
// appropriate.
|
||||
cfg.Subscribe(mgr)
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
||||
func (m *usageReportingManager) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *usageReportingManager) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if to.Options.URAccepted >= usageReportVersion && m.sup == nil {
|
||||
// Usage reporting was turned on; lets start it.
|
||||
service := newUsageReportingService(m.cfg, m.model)
|
||||
m.sup = suture.NewSimple("usageReporting")
|
||||
m.sup.Add(service)
|
||||
m.sup.ServeBackground()
|
||||
} else if to.Options.URAccepted < usageReportVersion && m.sup != nil {
|
||||
// Usage reporting was turned off
|
||||
m.sup.Stop()
|
||||
m.sup = nil
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *usageReportingManager) String() string {
|
||||
return fmt.Sprintf("usageReportingManager@%p", m)
|
||||
}
|
||||
const usageReportVersion = 3
|
||||
|
||||
// reportData returns the data to be sent in a usage report. It's used in
|
||||
// various places, so not part of the usageReportingManager object.
|
||||
func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
func reportData(cfg configIntf, m modelIntf, connectionsService connectionsIntf, version int) map[string]interface{} {
|
||||
opts := cfg.Options()
|
||||
res := make(map[string]interface{})
|
||||
res["urVersion"] = usageReportVersion
|
||||
res["urVersion"] = version
|
||||
res["uniqueID"] = opts.URUniqueID
|
||||
res["version"] = Version
|
||||
res["longVersion"] = LongVersion
|
||||
@@ -227,25 +181,162 @@ func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
res["upgradeAllowedAuto"] = !(upgrade.DisabledByCompilation || noUpgradeFromEnv) && opts.AutoUpgradeIntervalH > 0
|
||||
res["upgradeAllowedPre"] = !(upgrade.DisabledByCompilation || noUpgradeFromEnv) && opts.AutoUpgradeIntervalH > 0 && opts.UpgradeToPreReleases
|
||||
|
||||
if version >= 3 {
|
||||
res["uptime"] = int(time.Now().Sub(startTime).Seconds())
|
||||
res["natType"] = connectionsService.NATType()
|
||||
res["alwaysLocalNets"] = len(opts.AlwaysLocalNets) > 0
|
||||
res["cacheIgnoredFiles"] = opts.CacheIgnoredFiles
|
||||
res["overwriteRemoteDeviceNames"] = opts.OverwriteRemoteDevNames
|
||||
res["progressEmitterEnabled"] = opts.ProgressUpdateIntervalS > -1
|
||||
res["customDefaultFolderPath"] = opts.DefaultFolderPath != "~"
|
||||
res["weakHashSelection"] = opts.WeakHashSelectionMethod.String()
|
||||
res["customTrafficClass"] = opts.TrafficClass != 0
|
||||
res["customTempIndexMinBlocks"] = opts.TempIndexMinBlocks != 10
|
||||
res["temporariesDisabled"] = opts.KeepTemporariesH == 0
|
||||
res["temporariesCustom"] = opts.KeepTemporariesH != 24
|
||||
res["limitBandwidthInLan"] = opts.LimitBandwidthInLan
|
||||
res["customReleaseURL"] = opts.ReleasesURL != "https://upgrades.syncthing.net/meta.json"
|
||||
res["restartOnWakeup"] = opts.RestartOnWakeup
|
||||
res["customStunServers"] = len(opts.StunServers) == 0 || opts.StunServers[0] != "default" || len(opts.StunServers) > 1
|
||||
|
||||
folderUsesV3 := map[string]int{
|
||||
"scanProgressDisabled": 0,
|
||||
"conflictsDisabled": 0,
|
||||
"conflictsUnlimited": 0,
|
||||
"conflictsOther": 0,
|
||||
"disableSparseFiles": 0,
|
||||
"disableTempIndexes": 0,
|
||||
"alwaysWeakHash": 0,
|
||||
"customWeakHashThreshold": 0,
|
||||
"fsWatcherEnabled": 0,
|
||||
}
|
||||
pullOrder := make(map[string]int)
|
||||
filesystemType := make(map[string]int)
|
||||
var fsWatcherDelays []int
|
||||
for _, cfg := range cfg.Folders() {
|
||||
if cfg.ScanProgressIntervalS < 0 {
|
||||
folderUsesV3["scanProgressDisabled"]++
|
||||
}
|
||||
if cfg.MaxConflicts == 0 {
|
||||
folderUsesV3["conflictsDisabled"]++
|
||||
} else if cfg.MaxConflicts < 0 {
|
||||
folderUsesV3["conflictsUnlimited"]++
|
||||
} else {
|
||||
folderUsesV3["conflictsOther"]++
|
||||
}
|
||||
if cfg.DisableSparseFiles {
|
||||
folderUsesV3["disableSparseFiles"]++
|
||||
}
|
||||
if cfg.DisableTempIndexes {
|
||||
folderUsesV3["disableTempIndexes"]++
|
||||
}
|
||||
if cfg.WeakHashThresholdPct < 0 {
|
||||
folderUsesV3["alwaysWeakHash"]++
|
||||
} else if cfg.WeakHashThresholdPct != 25 {
|
||||
folderUsesV3["customWeakHashThreshold"]++
|
||||
}
|
||||
if cfg.FSWatcherEnabled {
|
||||
folderUsesV3["fsWatcherEnabled"]++
|
||||
}
|
||||
pullOrder[cfg.Order.String()]++
|
||||
filesystemType[cfg.FilesystemType.String()]++
|
||||
fsWatcherDelays = append(fsWatcherDelays, cfg.FSWatcherDelayS)
|
||||
}
|
||||
sort.Ints(fsWatcherDelays)
|
||||
folderUsesV3Interface := map[string]interface{}{
|
||||
"pullOrder": pullOrder,
|
||||
"filesystemType": filesystemType,
|
||||
"fsWatcherDelays": fsWatcherDelays,
|
||||
}
|
||||
for key, value := range folderUsesV3 {
|
||||
folderUsesV3Interface[key] = value
|
||||
}
|
||||
res["folderUsesV3"] = folderUsesV3Interface
|
||||
|
||||
guiCfg := cfg.GUI()
|
||||
// Anticipate multiple GUI configs in the future, hence store counts.
|
||||
guiStats := map[string]int{
|
||||
"enabled": 0,
|
||||
"useTLS": 0,
|
||||
"useAuth": 0,
|
||||
"insecureAdminAccess": 0,
|
||||
"debugging": 0,
|
||||
"insecureSkipHostCheck": 0,
|
||||
"insecureAllowFrameLoading": 0,
|
||||
"listenLocal": 0,
|
||||
"listenUnspecified": 0,
|
||||
}
|
||||
theme := make(map[string]int)
|
||||
if guiCfg.Enabled {
|
||||
guiStats["enabled"]++
|
||||
if guiCfg.UseTLS() {
|
||||
guiStats["useTLS"]++
|
||||
}
|
||||
if len(guiCfg.User) > 0 && len(guiCfg.Password) > 0 {
|
||||
guiStats["useAuth"]++
|
||||
}
|
||||
if guiCfg.InsecureAdminAccess {
|
||||
guiStats["insecureAdminAccess"]++
|
||||
}
|
||||
if guiCfg.Debugging {
|
||||
guiStats["debugging"]++
|
||||
}
|
||||
if guiCfg.InsecureSkipHostCheck {
|
||||
guiStats["insecureSkipHostCheck"]++
|
||||
}
|
||||
if guiCfg.InsecureAllowFrameLoading {
|
||||
guiStats["insecureAllowFrameLoading"]++
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address())
|
||||
if err == nil {
|
||||
if addr.IP.IsLoopback() {
|
||||
guiStats["listenLocal"]++
|
||||
} else if addr.IP.IsUnspecified() {
|
||||
guiStats["listenUnspecified"]++
|
||||
}
|
||||
}
|
||||
|
||||
theme[guiCfg.Theme]++
|
||||
}
|
||||
guiStatsInterface := map[string]interface{}{
|
||||
"theme": theme,
|
||||
}
|
||||
for key, value := range guiStats {
|
||||
guiStatsInterface[key] = value
|
||||
}
|
||||
res["guiStats"] = guiStatsInterface
|
||||
}
|
||||
|
||||
for key, value := range m.UsageReportingStats(version) {
|
||||
res[key] = value
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type usageReportingService struct {
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
stop chan struct{}
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
connectionsService *connections.Service
|
||||
forceRun chan struct{}
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newUsageReportingService(cfg *config.Wrapper, model *model.Model) *usageReportingService {
|
||||
return &usageReportingService{
|
||||
cfg: cfg,
|
||||
model: model,
|
||||
stop: make(chan struct{}),
|
||||
func newUsageReportingService(cfg *config.Wrapper, model *model.Model, connectionsService *connections.Service) *usageReportingService {
|
||||
svc := &usageReportingService{
|
||||
cfg: cfg,
|
||||
model: model,
|
||||
connectionsService: connectionsService,
|
||||
forceRun: make(chan struct{}),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
cfg.Subscribe(svc)
|
||||
return svc
|
||||
}
|
||||
|
||||
func (s *usageReportingService) sendUsageReport() error {
|
||||
d := reportData(s.cfg, s.model)
|
||||
d := reportData(s.cfg, s.model, s.connectionsService, s.cfg.Options().URAccepted)
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(d)
|
||||
|
||||
@@ -264,27 +355,45 @@ func (s *usageReportingService) sendUsageReport() error {
|
||||
|
||||
func (s *usageReportingService) Serve() {
|
||||
s.stop = make(chan struct{})
|
||||
|
||||
l.Infoln("Starting usage reporting")
|
||||
defer l.Infoln("Stopping usage reporting")
|
||||
|
||||
t := time.NewTimer(time.Duration(s.cfg.Options().URInitialDelayS) * time.Second) // time to initial report at start
|
||||
t := time.NewTimer(time.Duration(s.cfg.Options().URInitialDelayS) * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
return
|
||||
case <-s.forceRun:
|
||||
t.Reset(0)
|
||||
case <-t.C:
|
||||
err := s.sendUsageReport()
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
if s.cfg.Options().URAccepted >= 2 {
|
||||
err := s.sendUsageReport()
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
} else {
|
||||
l.Infof("Sent usage report (version %d)", s.cfg.Options().URAccepted)
|
||||
}
|
||||
}
|
||||
t.Reset(24 * time.Hour) // next report tomorrow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *usageReportingService) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *usageReportingService) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if from.Options.URAccepted != to.Options.URAccepted || from.Options.URUniqueID != to.Options.URUniqueID || from.Options.URURL != to.Options.URURL {
|
||||
s.forceRun <- struct{}{}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *usageReportingService) Stop() {
|
||||
close(s.stop)
|
||||
close(s.forceRun)
|
||||
}
|
||||
|
||||
func (usageReportingService) String() string {
|
||||
return "usageReportingService"
|
||||
}
|
||||
|
||||
// cpuBench returns CPU performance as a measure of single threaded SHA-256 MiB/s
|
||||
|
||||
16
etc/freebsd-rc/README.md
Normal file
16
etc/freebsd-rc/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
This directory contains an example for running Syncthing with a `rc.d` script in FreeBSD.
|
||||
|
||||
* Install `syncthing` in `/usr/local/bin/syncthing`.
|
||||
* Copy the `syncthing` rc.d script in `/usr/local/etc/rc.d/syncthing`.
|
||||
* To automatically start `syncthing` at boot time, add the following line to `/etc/rc.conf`:
|
||||
```
|
||||
syncthing_enable=YES
|
||||
```
|
||||
* Optional configuration options are:
|
||||
```
|
||||
syncthing_home=</path/to/syncthing/config/dir>
|
||||
syncthing_log_file=</path/to/syncthing/log/file>
|
||||
syncthing_user=<syncthing_user>
|
||||
syncthing_group=<syncthing_group>
|
||||
```
|
||||
See the rc.d script for more informations.
|
||||
54
etc/freebsd-rc/syncthing
Normal file
54
etc/freebsd-rc/syncthing
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
#
|
||||
# PROVIDE: syncthing
|
||||
# REQUIRE: DAEMON
|
||||
# KEYWORD: shutdown
|
||||
#
|
||||
# Add the following lines to /etc/rc.conf to enable this service:
|
||||
#
|
||||
# syncthing_enable: Set to NO by default. Set it to YES to enable it.
|
||||
# syncthing_home: Directory where syncthing configuration
|
||||
# data is stored.
|
||||
# Default: /usr/local/etc/syncthing
|
||||
# syncthing_log_file: Syncthing log file
|
||||
# Default: /var/log/syncthing.log
|
||||
# syncthing_user: The user account syncthing daemon runs as what
|
||||
# you want it to be.
|
||||
# Default: syncthing
|
||||
# syncthing_group: The group account syncthing daemon runs as what
|
||||
# you want it to be.
|
||||
# Default: syncthing
|
||||
|
||||
. /etc/rc.subr
|
||||
|
||||
name=syncthing
|
||||
rcvar=syncthing_enable
|
||||
|
||||
start_cmd="${name}_start"
|
||||
|
||||
load_rc_config $name
|
||||
|
||||
: ${syncthing_enable:=NO}
|
||||
: ${syncthing_home=/usr/local/etc/syncthing}
|
||||
: ${syncthing_log_file=/var/log/syncthing.log}
|
||||
: ${syncthing_user:=syncthing}
|
||||
syncthing_group=${syncthing_group:-$syncthing_user}
|
||||
|
||||
|
||||
command=/usr/local/bin/syncthing
|
||||
pidfile=/var/run/syncthing.pid
|
||||
syncthing_flags="${syncthing_home:+-home=${syncthing_home}} ${syncthing_log_file:+-logfile=${syncthing_log_file}}"
|
||||
|
||||
syncthing_start() {
|
||||
echo "Starting syncthing"
|
||||
touch ${pidfile} && chown ${syncthing_user} ${pidfile}
|
||||
touch ${syncthing_log_file} && chown ${syncthing_user} ${syncthing_log_file}
|
||||
/usr/sbin/daemon -cf -p ${pidfile} -u ${syncthing_user} ${command} ${syncthing_flags}
|
||||
}
|
||||
|
||||
syncthing_cleanup() {
|
||||
[ -f ${pidfile} ] && rm ${pidfile}
|
||||
}
|
||||
|
||||
run_rc_command $1
|
||||
@@ -183,7 +183,7 @@
|
||||
"Save": "Speichern",
|
||||
"Scan Time Remaining": "Zeit für Scan verbleibend",
|
||||
"Scanning": "Scannen",
|
||||
"See external versioner help for supported templated command line parameters.": "See external versioner help for supported templated command line parameters.",
|
||||
"See external versioner help for supported templated command line parameters.": "Siehe externe Versionshilfe für unterstützte Befehlszeilenparameter.",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du diesen Ordner teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Ordner aus, die Du mit diesem Gerät teilen möchtest",
|
||||
"Send & Receive": "Senden & empfangen",
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder.": "An external command handles the versioning. It has to remove the file from the shared folder.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "An external command handles the versioning. It has to remove the file from the synced folder.",
|
||||
"Anonymous Usage Reporting": "Anonymous Usage Reporting",
|
||||
"Anonymous usage report format has changed. Would you like to move to the new format?": "Anonymous usage report format has changed. Would you like to move to the new format?",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Any devices configured on an introducer device will be added to this device as well.",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "Automatic upgrade now offers the choice between stable releases and release candidates.",
|
||||
"Automatic upgrades": "Automatic upgrades",
|
||||
@@ -53,6 +54,7 @@
|
||||
"Device Identification": "Device Identification",
|
||||
"Device Name": "Device Name",
|
||||
"Devices": "Devices",
|
||||
"Disabled": "Disabled",
|
||||
"Disconnected": "Disconnected",
|
||||
"Discovered": "Discovered",
|
||||
"Discovery": "Discovery",
|
||||
@@ -184,6 +186,8 @@
|
||||
"Scan Time Remaining": "Scan Time Remaining",
|
||||
"Scanning": "Scanning",
|
||||
"See external versioner help for supported templated command line parameters.": "See external versioner help for supported templated command line parameters.",
|
||||
"See external versioning help for supported templated command line parameters.": "See external versioning help for supported templated command line parameters.",
|
||||
"Select a version": "Select a version",
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Send \u0026 Receive": "Send \u0026 Receive",
|
||||
@@ -197,6 +201,7 @@
|
||||
"Shared With": "Shared With",
|
||||
"Show ID": "Show ID",
|
||||
"Show QR": "Show QR",
|
||||
"Show diff with previous version": "Show diff with previous version",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.",
|
||||
"Shutdown": "Shutdown",
|
||||
@@ -252,6 +257,7 @@
|
||||
"Time": "Time",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Type": "Type",
|
||||
"Undecided (will prompt)": "Undecided (will prompt)",
|
||||
"Unknown": "Unknown",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "\"{{name}}\" ({{device}}), appareil actuellement à {{address}}, demande à se connecter.\nAcceptez-vous de l'ajouter à votre liste d'appareils connus ?",
|
||||
"Device ID": "ID de l'appareil",
|
||||
"Device Identification": "Identifiant de l'appareil",
|
||||
"Device Name": "Nom de l'appareil",
|
||||
"Device Name": "Nom convivial local de l'appareil",
|
||||
"Devices": "Appareils",
|
||||
"Disconnected": "Déconnecté",
|
||||
"Discovered": "Découvert",
|
||||
@@ -142,7 +142,7 @@
|
||||
"OK": "OK",
|
||||
"Off": "Désactivé(e)",
|
||||
"Oldest First": "Les plus anciens en premier",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Nom convivial et optionnel du partage, à votre guise. il peut être différent sur chaque appareil.",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Nom local, convivial et optionnel du partage, à votre guise. il peut être différent sur chaque appareil. Par notification initiale, il sera proposé tel quel aux nouveaux participants.\nAstuce : comme il est modifiable ultérieurement, pensez à indiquer un nom parlant pour les invités, puis renommez-le quand ils l'auront accepté (exemple d'un partage à deux membres où l'initiateur commence par donner son propre nom au partage, puis le renomme plus tard au nom du partenaire quand celui-ci l'a enregistré). Évitez les erreurs d'orthographe car ce nom servira aussi de base au chemin proposé en création (local et distant) et ce chemin est difficilement modifiable.",
|
||||
"Options": "Options",
|
||||
"Out of Sync": "Désynchronisé",
|
||||
"Out of Sync Items": "Éléments non synchronisés",
|
||||
@@ -170,7 +170,7 @@
|
||||
"Release candidates contain the latest features and fixes. They are similar to the traditional bi-weekly Syncthing releases.": "Les versions préliminaires contiennent les dernières fonctionnalités et derniers correctifs. Elles sont identiques aux traditionnelles mises à jour bimensuelles.",
|
||||
"Remote Devices": "Autres appareils",
|
||||
"Remove": "Supprimer",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Identifiant du partage. Doit être le même sur tous les appareils concernés.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Identifiant du partage. Doit être le même sur tous les appareils concernés (généré aléatoirement, mais modifiable à la création).",
|
||||
"Rescan": "Réanalyser",
|
||||
"Rescan All": "Tout réanalyser",
|
||||
"Rescan Interval": "Intervalle d'analyse",
|
||||
@@ -198,7 +198,7 @@
|
||||
"Show ID": "Afficher mon ID",
|
||||
"Show QR": "Afficher le QR",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Affiché à la place de l'ID de l'appareil dans l'état du groupe. Sera diffusé aux autres appareils comme nom convivial optionnel par défaut.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Affiché à la place de l'ID de l'appareil dans l'état du groupe. Si laissé vide, il sera renseigné par le nom convivial proposé par l'appareil distant.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Nom convivial local affiché à la place de l'ID de l'appareil dans la plupart des écrans. Si laissé vide, c'est le nom convivial local de l'appareil distant qui sera utilisé. (Modifiable ultérieurement).",
|
||||
"Shutdown": "Arrêter",
|
||||
"Shutdown Complete": "Arrêté !",
|
||||
"Simple File Versioning": "Suivi simplifié des versions",
|
||||
|
||||
@@ -183,7 +183,7 @@
|
||||
"Save": "저장",
|
||||
"Scan Time Remaining": "탐색 남은 시간",
|
||||
"Scanning": "탐색중",
|
||||
"See external versioner help for supported templated command line parameters.": "See external versioner help for supported templated command line parameters.",
|
||||
"See external versioner help for supported templated command line parameters.": "지원되는 템플릿 명령 행 매개 변수에 대해서는 외부 버전 도움말을 참조하십시오.",
|
||||
"Select the devices to share this folder with.": "이 폴더를 공유할 장치를 선택합니다.",
|
||||
"Select the folders to share with this device.": "이 장치와 공유할 폴더를 선택합니다.",
|
||||
"Send & Receive": "송신 & 수신",
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"Copied from elsewhere": "Gekopieerd vanaf elders",
|
||||
"Copied from original": "Gekopieerd van het origineel",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 voor de volgende bijdragers:",
|
||||
"Copyright © 2014-2017 the following Contributors:": "Copyright © 2014-2017 de volgende bijdragers:",
|
||||
"Copyright © 2014-2017 the following Contributors:": "Copyright © 2014-2017 voor de volgende bijdragers:",
|
||||
"Creating ignore patterns, overwriting an existing file at {%path%}.": "Negeerpatronen worden aangemaakt, bestaand bestand wordt overschreven op {{path}}.",
|
||||
"Danger!": "Let op!",
|
||||
"Deleted": "Verwijderd",
|
||||
@@ -183,7 +183,7 @@
|
||||
"Save": "Bewaar",
|
||||
"Scan Time Remaining": "Resterende scantijd",
|
||||
"Scanning": "Aan het zoeken",
|
||||
"See external versioner help for supported templated command line parameters.": "See external versioner help for supported templated command line parameters.",
|
||||
"See external versioner help for supported templated command line parameters.": "Zie de documentatie van de externe versie voor ondersteunde command line parameters.",
|
||||
"Select the devices to share this folder with.": "Selecteer de apparaten om deze map mee te delen.",
|
||||
"Select the folders to share with this device.": "Selecteer de mappen om met dit apparaat te delen.",
|
||||
"Send & Receive": "Verzenden & Ontvangen",
|
||||
|
||||
@@ -158,8 +158,8 @@
|
||||
"Please consult the release notes before performing a major upgrade.": "Por favor, consulte as notas de lançamento antes de atualizar para uma versão \"major\".",
|
||||
"Please set a GUI Authentication User and Password in the Settings dialog.": "Por favor, defina um nome de usuário e senha para acesso à interface web, nas configurações.",
|
||||
"Please wait": "Aguarde",
|
||||
"Prefix indicating that the file can be deleted if preventing directory removal": "Prefix indicating that the file can be deleted if preventing directory removal",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "Prefix indicating that the pattern should be matched without case sensitivity",
|
||||
"Prefix indicating that the file can be deleted if preventing directory removal": "Prefixo indicando que o arquivo pode ser removido caso esteja impedindo a remoção do seu diretório",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "Prefixo indicando que o filtro deve ser igualado sem distinção entre maiúsculas e minúsculas",
|
||||
"Preview": "Visualizar",
|
||||
"Preview Usage Report": "Visualizar relatório de uso",
|
||||
"Quick guide to supported patterns": "Guia rápido dos padrões suportados",
|
||||
@@ -248,7 +248,7 @@
|
||||
"This Device": "Este dispositivo",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Isto pode dar a hackers poder de leitura e escrita de qualquer arquivo em seu dispositivo.",
|
||||
"This is a major version upgrade.": "Esta é uma atualização para uma versão \"major\".",
|
||||
"This setting controls the free space required on the home (i.e., index database) disk.": "This setting controls the free space required on the home (i.e., index database) disk.",
|
||||
"This setting controls the free space required on the home (i.e., index database) disk.": "Este ajuste controla o espaço livre necessário no disco que contém o banco de dados do Syncthing.",
|
||||
"Time": "Hora",
|
||||
"Trash Can File Versioning": "Lixeira",
|
||||
"Type": "Tipo",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"API Key": "API-nyckel",
|
||||
"About": "Om",
|
||||
"Action": "Åtgärd",
|
||||
"Actions": "Funktioner",
|
||||
"Actions": "Åtgärder",
|
||||
"Add": "Lägg till",
|
||||
"Add Device": "Lägg till enhet",
|
||||
"Add Folder": "Lägg till mapp",
|
||||
|
||||
@@ -183,7 +183,7 @@
|
||||
"Save": "保存",
|
||||
"Scan Time Remaining": "扫描剩余时间",
|
||||
"Scanning": "扫描中",
|
||||
"See external versioner help for supported templated command line parameters.": "See external versioner help for supported templated command line parameters.",
|
||||
"See external versioner help for supported templated command line parameters.": "有关支持的命令行参数模板,请参阅外部的版本控制器帮助。",
|
||||
"Select the devices to share this folder with.": "选择将本文件夹共享给哪些设备。",
|
||||
"Select the folders to share with this device.": "选择与该设备共享的文件夹。",
|
||||
"Send & Receive": "发送与接收",
|
||||
|
||||
288
gui/default/assets/lang/lang-zh-TW.json
Normal file
288
gui/default/assets/lang/lang-zh-TW.json
Normal file
@@ -0,0 +1,288 @@
|
||||
{
|
||||
"A device with that ID is already added.": "該裝置識別碼已被新增。",
|
||||
"A negative number of days doesn't make sense.": "一個負的天數並不合理。",
|
||||
"A new major version may not be compatible with previous versions.": "新的主要版本可能與以前的版本不相容。",
|
||||
"API Key": "API 金鑰",
|
||||
"About": "關於",
|
||||
"Action": "動作",
|
||||
"Actions": "操作",
|
||||
"Add": "增加",
|
||||
"Add Device": "增加裝置",
|
||||
"Add Folder": "增加資料夾",
|
||||
"Add Remote Device": "新增遠端裝置",
|
||||
"Add devices from the introducer to our device list, for mutually shared folders.": "對於共用的資料夾,匯入引入者的裝置清單。",
|
||||
"Add new folder?": "新增資料夾?",
|
||||
"Address": "位址",
|
||||
"Addresses": "位址",
|
||||
"Advanced": "進階",
|
||||
"Advanced Configuration": "進階配置",
|
||||
"Advanced settings": "進階設定",
|
||||
"All Data": "全部資料",
|
||||
"Allow Anonymous Usage Reporting?": "允許匿名的使用資訊回報?",
|
||||
"Allowed Networks": "允許的網路",
|
||||
"Alphabetic": "字母順序",
|
||||
"An external command handles the versioning. It has to remove the file from the shared folder.": "處理版本的外部指令。其必須從資料夾中刪除檔案。",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "處理版本的外部指令。其必須從資料夾中刪除檔案。",
|
||||
"Anonymous Usage Reporting": "匿名的使用資訊回報",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "任何在引入者裝置所設置的裝置將會一併新增至此裝置",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "自動更新目前有穩定發行版及發行候選版可供選擇。",
|
||||
"Automatic upgrades": "自動升級",
|
||||
"Be careful!": "請小心!",
|
||||
"Bugs": "程式錯誤",
|
||||
"CPU Utilization": "CPU 使用",
|
||||
"Changelog": "更新日誌",
|
||||
"Clean out after": "於之後清空",
|
||||
"Click to see discovery failures": "點擊以查閱失敗的探索",
|
||||
"Close": "關閉",
|
||||
"Command": "指令",
|
||||
"Comment, when used at the start of a line": "註解,當輸入在一行的開頭時",
|
||||
"Compression": "壓縮",
|
||||
"Configured": "已設定",
|
||||
"Connection Error": "連線錯誤",
|
||||
"Connection Type": "連接類型",
|
||||
"Copied from elsewhere": "從別處複製",
|
||||
"Copied from original": "從原處複製",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 下列貢獻者:",
|
||||
"Copyright © 2014-2017 the following Contributors:": "Copyright © 2014-2017 下列貢獻者:",
|
||||
"Creating ignore patterns, overwriting an existing file at {%path%}.": "建立忽略樣式,覆蓋已存在的 {{path}}。",
|
||||
"Danger!": "危險!",
|
||||
"Deleted": "已刪除",
|
||||
"Device": "裝置",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "裝置 \"{{name}}\" ({{device}} 位於 {{address}}) 想要連線。 要增加新裝置嗎?",
|
||||
"Device ID": "裝置識別碼",
|
||||
"Device Identification": "裝置識別",
|
||||
"Device Name": "裝置名稱",
|
||||
"Devices": "裝置",
|
||||
"Disconnected": "斷線",
|
||||
"Discovered": "已發現",
|
||||
"Discovery": "探索",
|
||||
"Discovery Failures": "探索失敗",
|
||||
"Documentation": "說明文件",
|
||||
"Download Rate": "下載速率",
|
||||
"Downloaded": "已下載",
|
||||
"Downloading": "正在下載",
|
||||
"Edit": "編輯",
|
||||
"Edit Device": "編輯裝置",
|
||||
"Edit Folder": "編輯資料夾",
|
||||
"Editing": "正在編輯",
|
||||
"Editing {%path%}.": "正在編輯 {{path}} 。",
|
||||
"Enable NAT traversal": "啟用 NAT 穿透",
|
||||
"Enable Relaying": "啟用中繼",
|
||||
"Enter a non-negative number (e.g., \"2.35\") and select a unit. Percentages are as part of the total disk size.": "請輸入一非負數(如:\"2.35\")並選擇一個單位。百分比表示佔用磁碟容量的大小。",
|
||||
"Enter a non-privileged port number (1024 - 65535).": "輸入一個非特權通訊埠號 (1024 - 65535)。",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "輸入以半形逗號區隔的位址 (\"tcp://ip:port\", \"tcp://host:port\"),或輸入 \"dynamic\" 以進行位址的自動探索",
|
||||
"Enter ignore patterns, one per line.": "輸入忽略樣式,每行一種。",
|
||||
"Error": "錯誤",
|
||||
"External File Versioning": "外部檔案版本控制",
|
||||
"Failed Items": "失敗的項目",
|
||||
"Failure to connect to IPv6 servers is expected if there is no IPv6 connectivity.": "若未配置 IPv6,則無法連接 IPv6 伺服器係屬正常。",
|
||||
"File Pull Order": "提取檔案的順序",
|
||||
"File Versioning": "檔案版本控制",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "當改變時,檔案權限位元 File permission bits 會被忽略。用於 FAT 檔案系統上。",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "當檔案被 Syncthing 取代或刪除時,它們將被移至 .stversions 資料夾。",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "當檔案被 Syncthing 取代或刪除時,它們將被移至 .stversions 資料夾。",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "當檔案被 Syncthing 取代或刪除時,它們將被移至 .stversions 資料夾並添加日期戳記。",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "當檔案被 Syncthing 取代或刪除時,它們將被移至 .stversions 資料夾並添加日期戳記。",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "其他裝置做的改變不會影響到此裝置的檔案,但在此裝置上的變化將被發送到叢集中的其他部分。",
|
||||
"Folder": "資料夾",
|
||||
"Folder ID": "資料夾識別碼",
|
||||
"Folder Label": "資料夾標籤",
|
||||
"Folder Path": "資料夾路徑",
|
||||
"Folder Type": "資料夾類型",
|
||||
"Folders": "資料夾",
|
||||
"GUI": "GUI",
|
||||
"GUI Authentication Password": "GUI 認證密碼",
|
||||
"GUI Authentication User": "GUI 使用者認證名稱",
|
||||
"GUI Listen Address": "GUI 監聽位址",
|
||||
"GUI Listen Addresses": "GUI 監聽位址",
|
||||
"GUI Theme": "主題",
|
||||
"Generate": "產生",
|
||||
"Global Changes": "全域變動",
|
||||
"Global Discovery": "全域探索",
|
||||
"Global Discovery Servers": "全域探索伺服器",
|
||||
"Global State": "全域狀態",
|
||||
"Help": "說明",
|
||||
"Home page": "首頁",
|
||||
"Ignore": "忽略",
|
||||
"Ignore Patterns": "忽略樣式",
|
||||
"Ignore Permissions": "忽略權限",
|
||||
"Incoming Rate Limit (KiB/s)": "傳入速率限制 (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "不正確的設定可能會損壞您的資料夾內容,並導致 Syncthing 不正常運作。",
|
||||
"Introduced By": "引入自",
|
||||
"Introducer": "引入者",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "反轉給定條件 (即:不要排除)",
|
||||
"Keep Versions": "保留歷史版本數",
|
||||
"Largest First": "最大的優先",
|
||||
"Last File Received": "最後接收的檔案",
|
||||
"Last Scan": "最後掃描",
|
||||
"Last seen": "最後發現時間",
|
||||
"Later": "稍後",
|
||||
"Latest Change": "最近變動",
|
||||
"Learn more": "瞭解更多",
|
||||
"Listeners": "中繼",
|
||||
"Local Discovery": "本機探索",
|
||||
"Local State": "本機狀態",
|
||||
"Local State (Total)": "本機狀態 (總結)",
|
||||
"Major Upgrade": "重大更新",
|
||||
"Master": "Master",
|
||||
"Maximum Age": "最長保留時間",
|
||||
"Metadata Only": "僅中繼資料",
|
||||
"Minimum Free Disk Space": "最少閒置磁碟空間",
|
||||
"Move to top of queue": "移到隊列頂端",
|
||||
"Multi level wildcard (matches multiple directory levels)": "多階層萬用字元 (可比對多層資料夾)",
|
||||
"Never": "從未",
|
||||
"New Device": "新裝置",
|
||||
"New Folder": "新資料夾",
|
||||
"Newest First": "最新的優先",
|
||||
"No": "否",
|
||||
"No File Versioning": "無檔案版本控制",
|
||||
"No upgrades": "不更新",
|
||||
"Normal": "Normal",
|
||||
"Notice": "注意",
|
||||
"OK": "確定",
|
||||
"Off": "關閉",
|
||||
"Oldest First": "最舊的優先",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "資料夾的說明標籤(選擇性)。在不同裝置上可不一致。",
|
||||
"Options": "選項",
|
||||
"Out of Sync": "不同步",
|
||||
"Out of Sync Items": "不同步物件",
|
||||
"Outgoing Rate Limit (KiB/s)": "連出速率限制 (KiB/s)",
|
||||
"Override Changes": "置換改變",
|
||||
"Path": "路徑",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "資料夾在本機的路徑。若資料夾不存在則會建立。波浪符號 (~) 可用作下列資料夾的捷徑:",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "儲存歷史版本的路徑(若為空,則預設使用資料夾中的 .stversions 資料夾)。",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "儲存歷史版本的路徑 (若為空,則預設使用資料夾中的 .stversions 資料夾)。",
|
||||
"Pause": "暫停",
|
||||
"Pause All": "全部暫停",
|
||||
"Paused": "暫停",
|
||||
"Please consult the release notes before performing a major upgrade.": "執行重大升級前請先參閱版本資訊。",
|
||||
"Please set a GUI Authentication User and Password in the Settings dialog.": "請在設定對話方塊內設置 GUI 使用者認證名稱及密碼。",
|
||||
"Please wait": "請稍後",
|
||||
"Prefix indicating that the file can be deleted if preventing directory removal": "前綴表示當此檔案阻礙了資料夾刪除時,可一併刪除此檔",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "前綴表示此樣式不區分大小寫",
|
||||
"Preview": "預覽",
|
||||
"Preview Usage Report": "預覽使用資訊報告",
|
||||
"Quick guide to supported patterns": "可支援樣式的快速指南",
|
||||
"RAM Utilization": "記憶體使用",
|
||||
"Random": "隨機",
|
||||
"Reduced by ignore patterns": "已由忽略樣式縮減",
|
||||
"Release Notes": "版本資訊",
|
||||
"Release candidates contain the latest features and fixes. They are similar to the traditional bi-weekly Syncthing releases.": "發行候選版包含最新的功能及修補。與傳統 Syncthing 雙週發行版相似。",
|
||||
"Remote Devices": "遠端裝置",
|
||||
"Remove": "移除",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "資料夾的識別字。必須在叢集內所有的裝置上皆相同。",
|
||||
"Rescan": "重新掃描",
|
||||
"Rescan All": "全部重新掃描",
|
||||
"Rescan Interval": "重新掃描間隔",
|
||||
"Restart": "重新啟動",
|
||||
"Restart Needed": "需要重新啟動",
|
||||
"Restarting": "正在重新啟動",
|
||||
"Resume": "繼續",
|
||||
"Resume All": "全部繼續",
|
||||
"Reused": "重用",
|
||||
"Save": "儲存",
|
||||
"Scan Time Remaining": "剩餘掃描時間",
|
||||
"Scanning": "正在掃描",
|
||||
"See external versioner help for supported templated command line parameters.": "關於命令列模板參數請參閱外部版本管理說明。",
|
||||
"Select the devices to share this folder with.": "選擇要共享這個資料夾的裝置。",
|
||||
"Select the folders to share with this device.": "選擇要共享這個資料夾的裝置。",
|
||||
"Send & Receive": "傳送及接收",
|
||||
"Send Only": "僅傳送",
|
||||
"Settings": "設定",
|
||||
"Share": "分享",
|
||||
"Share Folder": "分享資料夾",
|
||||
"Share Folders With Device": "與裝置共享資料夾",
|
||||
"Share With Devices": "與這些裝置共享",
|
||||
"Share this folder?": "分享此資料夾?",
|
||||
"Shared With": "與誰共享",
|
||||
"Show ID": "顯示識別碼",
|
||||
"Show QR": "顯示 QR 碼",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "代替裝置識別碼顯示在叢集狀態中。這段文字將會廣播到其他的裝置作為一個可選的預設名稱。",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "代替裝置識別碼顯示在叢集狀態中。本欄若未填寫則將被更新為此裝置所廣播的名稱。",
|
||||
"Shutdown": "關閉",
|
||||
"Shutdown Complete": "關閉完成",
|
||||
"Simple File Versioning": "簡單檔案版本控制",
|
||||
"Single level wildcard (matches within a directory only)": "單階層萬用字元 (只在單個資料夾階層內比對)",
|
||||
"Smallest First": "最小的優先",
|
||||
"Source Code": "原始碼",
|
||||
"Stable releases and release candidates": "穩定發行版及發行候選版",
|
||||
"Stable releases are delayed by about two weeks. During this time they go through testing as release candidates.": "穩定發行版大約延遲兩週發佈。這段期間將作為發行候選版來測試。",
|
||||
"Stable releases only": "僅穩定發行版",
|
||||
"Staggered File Versioning": "變動式檔案版本控制",
|
||||
"Start Browser": "啟動瀏覽器",
|
||||
"Statistics": "統計",
|
||||
"Stopped": "已停止",
|
||||
"Support": "支援",
|
||||
"Sync Protocol Listen Addresses": "同步通訊協定監聽位址",
|
||||
"Syncing": "正在同步",
|
||||
"Syncthing has been shut down.": "Syncthing 已經關閉。",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing 包括以下軟體或其中的一部分:",
|
||||
"Syncthing is restarting.": "Syncthing 正在重新啟動。",
|
||||
"Syncthing is upgrading.": "Syncthing 正在進行升級。",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing 似乎離線了,或者您的網際網路連線出現問題。正在重試...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing 在處理您的請求時似乎遇到了問題。請重新整理本頁面,若問題持續發生,請重新啟動 Syncthing。",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "The Syncthing admin interface is configured to allow remote access without a password.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "匯總統計資訊可於下方網址取得。",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "組態已經儲存但尚未啟用。Syncthing 必須重新啟動以便啟用新的組態。",
|
||||
"The device ID cannot be blank.": "裝置識別碼不能為空白。",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "輸入裝置識別碼,可在其它裝置的 \"動作 > 顯示識別碼\" 對話框找到。空白及連接符號可省略。",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "經過加密的使用資訊報告會每天傳送。報告是用來追蹤常用的平台、資料夾的大小以及應用程式的版本。若傳送的資料集有異動,您會再次看到這個對話框。",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "輸入的裝置識別碼似乎無效。它應該為一串包含半形英文字母及數字,並可能會含有空白或連接符號的字串,且長度為 52 或 56 個字元。",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "The first command line parameter is the folder path and the second parameter is the relative path in the folder.",
|
||||
"The folder ID cannot be blank.": "資料夾識別碼不能為空白。",
|
||||
"The folder ID must be unique.": "資料夾識別碼必須為獨一無二的。",
|
||||
"The folder path cannot be blank.": "資料夾路徑不能空白。",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "使用下列的間隔:在第一個小時內每 30 秒保留一個版本,在第一天內每小時保留一個版本,在第 30 天內每一天保留一個版本,在達到最長保留時間前每一星期保留一個版本。",
|
||||
"The following items could not be synchronized.": "以下項目不能被同步。",
|
||||
"The maximum age must be a number and cannot be blank.": "最長保留時間必須為一個數字且不得為空。",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "一個版本被保留的最長時間 (單位為天,若設定為 0 則表示永遠保留)。",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).",
|
||||
"The number of days must be a number and cannot be blank.": "天數必須必須為一個數字且不得為空。",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "檔案在 trash can 中保留的日子。零表示永遠地保留。",
|
||||
"The number of old versions to keep, per file.": "每個檔案要保留的舊版本數量。",
|
||||
"The number of versions must be a number and cannot be blank.": "每個檔案要保留的舊版本數量必須是數字且不能為空白。",
|
||||
"The path cannot be blank.": "路徑不能空白。",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "限制速率必須為非負的數字 (0: 不設限制)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "重新掃描間隔必須為一個非負數的秒數。",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "解決間題後,將會自動重試和同步。",
|
||||
"This Device": "本機",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "This can easily give hackers access to read and change any files on your computer.",
|
||||
"This is a major version upgrade.": "這是一個主要版本更新。",
|
||||
"This setting controls the free space required on the home (i.e., index database) disk.": "此設定控制家目錄(即:索引資料庫)的必須可用空間。",
|
||||
"Time": "時間",
|
||||
"Trash Can File Versioning": "垃圾筒式檔案版本控制",
|
||||
"Type": "類型",
|
||||
"Unknown": "未知",
|
||||
"Unshared": "未共享",
|
||||
"Unused": "未使用",
|
||||
"Up to Date": "最新",
|
||||
"Updated": "已更新",
|
||||
"Upgrade": "升級",
|
||||
"Upgrade To {%version%}": "升級至 {{version}}",
|
||||
"Upgrading": "正在升級",
|
||||
"Upload Rate": "上載速率",
|
||||
"Uptime": "上線時間",
|
||||
"Usage reporting is always enabled for candidate releases.": "發行候選版永遠回報使用數據",
|
||||
"Use HTTPS for GUI": "為 GUI 使用 HTTPS",
|
||||
"Version": "版本",
|
||||
"Versions Path": "歷史版本路徑",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "當檔案歷史版本的存留時間大於設定的最大值,或是其數量在一段時間內超出允許值時,則會被刪除。",
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolder%}\".": "警告,此路徑是現存資料夾 \"{{otherFolder}}\" 的上級目錄。",
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "警告,此路徑是現存資料夾 \"{{otherFolderLabel}}\" ({{otherFolder}}) 的上級目錄。",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "警告,此路徑是現存資料夾 \"{{otherFolder}}\" 的下級目錄。",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "警告,此路徑是現存資料夾 \"{{otherFolderLabel}}\" ({{otherFolder}}) 的下級目錄。",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "當新增一個裝置時,務必記住,當前的這個裝置也同樣必須被添加至另一邊。",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "當新增一個資料夾時,請記住,資料夾識別碼是用來將裝置之間的資料夾綁定在一起的。它們有區分大小寫,且必須在所有裝置之間完全相同。",
|
||||
"Yes": "是",
|
||||
"You can also select one of these nearby devices:": "您亦可從這些附近裝置中擇一:",
|
||||
"You can change your choice at any time in the Settings dialog.": "您可以在設定對話框中隨時更改您的選擇。",
|
||||
"You can read more about the two release channels at the link below.": "您可於下方連結閱讀更多關於發行頻道的說明。",
|
||||
"You must keep at least one version.": "您必須保留至少一個版本。",
|
||||
"days": "日",
|
||||
"directories": "個目錄",
|
||||
"files": "個檔案",
|
||||
"full documentation": "完整說明文件",
|
||||
"items": "個項目",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} 想要分享資料夾 \"{{folder}}\"。",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} 想要分享資料夾 \"{{folderlabel}}\" ({{folder}})。"
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
var langPrettyprint = {"bg":"Bulgarian","ca@valencia":"Catalan (Valencian)","cs":"Czech","da":"Danish","de":"German","el":"Greek","en":"English","en-GB":"English (United Kingdom)","eo":"Esperanto","es":"Spanish","es-ES":"Spanish (Spain)","eu":"Basque","fi":"Finnish","fr":"French","fr-CA":"French (Canada)","fy":"Western Frisian","hu":"Hungarian","it":"Italian","ja":"Japanese","ko-KR":"Korean (Korea)","lt":"Lithuanian","nb":"Norwegian Bokmål","nl":"Dutch","nn":"Norwegian Nynorsk","pl":"Polish","pt-BR":"Portuguese (Brazil)","pt-PT":"Portuguese (Portugal)","ru":"Russian","sk":"Slovak","sv":"Swedish","tr":"Turkish","uk":"Ukrainian","vi":"Vietnamese","zh-CN":"Chinese (China)"}
|
||||
var langPrettyprint = {"bg":"Bulgarian","ca@valencia":"Catalan (Valencian)","cs":"Czech","da":"Danish","de":"German","el":"Greek","en":"English","en-GB":"English (United Kingdom)","eo":"Esperanto","es":"Spanish","es-ES":"Spanish (Spain)","eu":"Basque","fi":"Finnish","fr":"French","fr-CA":"French (Canada)","fy":"Western Frisian","hu":"Hungarian","it":"Italian","ja":"Japanese","ko-KR":"Korean (Korea)","lt":"Lithuanian","nb":"Norwegian Bokmål","nl":"Dutch","nn":"Norwegian Nynorsk","pl":"Polish","pt-BR":"Portuguese (Brazil)","pt-PT":"Portuguese (Portugal)","ru":"Russian","sk":"Slovak","sv":"Swedish","tr":"Turkish","uk":"Ukrainian","vi":"Vietnamese","zh-CN":"Chinese (China)","zh-TW":"Chinese (Taiwan)"}
|
||||
|
||||
@@ -1 +1 @@
|
||||
var validLangs = ["bg","ca@valencia","cs","da","de","el","en","en-GB","eo","es","es-ES","eu","fi","fr","fr-CA","fy","hu","it","ja","ko-KR","lt","nb","nl","nn","pl","pt-BR","pt-PT","ru","sk","sv","tr","uk","vi","zh-CN"]
|
||||
var validLangs = ["bg","ca@valencia","cs","da","de","el","en","en-GB","eo","es","es-ES","eu","fi","fr","fr-CA","fy","hu","it","ja","ko-KR","lt","nb","nl","nn","pl","pt-BR","pt-PT","ru","sk","sv","tr","uk","vi","zh-CN","zh-TW"]
|
||||
|
||||
@@ -368,7 +368,13 @@
|
||||
<span translate>Yes</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-if="folder.rescanIntervalS != 60">
|
||||
<tr ng-if="folder.fsNotifications">
|
||||
<th><span class="fa fa-fw fa-bolt"></span> <span translate>Filesystem Notifications</span></th>
|
||||
<td class="text-right">
|
||||
<span translate>Yes</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-if="(folder.rescanIntervalS != 60 && !folder.fsNotifications) || (folder.rescanIntervalS != 3600 && folder.fsNotifications)">
|
||||
<th><span class="fa fa-fw fa-refresh"></span> <span translate>Rescan Interval</span></th>
|
||||
<td class="text-right">{{folder.rescanIntervalS}} s</td>
|
||||
</tr>
|
||||
@@ -394,7 +400,7 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="fa fa-fw fa-share-alt"></span> <span translate>Shared With</span></th>
|
||||
<td class="text-right" title="{{sharesFolder(folder)}}">{{sharesFolder(folder)}}</td>
|
||||
<td class="text-right" ng-attr-title="{{sharesFolder(folder)}}">{{sharesFolder(folder)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="fa fa-fw fa-clock-o"></span> <span translate>Last Scan</span></th>
|
||||
@@ -645,7 +651,7 @@
|
||||
</tr>
|
||||
<tr ng-if="deviceFolders(deviceCfg).length > 0">
|
||||
<th><span class="fa fa-fw fa-folder"></span> <span translate>Folders</span></th>
|
||||
<td class="text-right" title="{{deviceFolders(deviceCfg).map(folderLabel).join(', ')}}">{{deviceFolders(deviceCfg).map(folderLabel).join(", ")}}</td>
|
||||
<td class="text-right" ng-attr-title="{{deviceFolders(deviceCfg).map(folderLabel).join(', ')}}">{{deviceFolders(deviceCfg).map(folderLabel).join(", ")}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -750,6 +756,7 @@
|
||||
<script type="text/javascript" src="syncthing/core/selectOnClickDirective.js"></script>
|
||||
<script type="text/javascript" src="syncthing/core/syncthingController.js"></script>
|
||||
<script type="text/javascript" src="syncthing/core/tooltipDirective.js"></script>
|
||||
<script type="text/javascript" src="syncthing/core/uncamelFilter.js"></script>
|
||||
<script type="text/javascript" src="syncthing/core/uniqueFolderDirective.js"></script>
|
||||
<script type="text/javascript" src="syncthing/core/validDeviceidDirective.js"></script>
|
||||
<script type="text/javascript" src="assets/lang/valid-langs.js"></script>
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
<p translate>Copyright © 2014-2017 the following Contributors:</p>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Simon Frei, Stefan Tatschner, Aaron Bieber, Adam Piggott, Adel Qalieh, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, Darshil Chanpura, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Heiko Zuerker, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jaya Chithra, Jens Diemer, Jochen Voss, Johan Vromans, Jose Manuel Delicado, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Kurt Fitzner, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mark Pulford, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Niels Peter Roest, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Robert Carosi, Roman Zaynetdinov, Ross Smith II, Sacheendra Talluri, Scott Klupfel, Stefan Kuntz, Suhas Gundimeda, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Simon Frei, Stefan Tatschner, Aaron Bieber, Adam Piggott, Adel Qalieh, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, Darshil Chanpura, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Heiko Zuerker, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jaya Chithra, Jens Diemer, Jochen Voss, Johan Vromans, Jose Manuel Delicado, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Kurt Fitzner, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mark Pulford, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Niels Peter Roest, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Robert Carosi, Roman Zaynetdinov, Ross Smith II, Sacheendra Talluri, Scott Klupfel, Stefan Kuntz, Suhas Gundimeda, Tim Abell, Tim Howes, Tobias Nygren, Tobias Tom, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
angular.module('syncthing.core')
|
||||
.directive('modal', function () {
|
||||
return {
|
||||
// If you ever change any of the petroglyphs below, please search for $parent.$parent,
|
||||
// as some templates rely on the way scope is composed in this case.
|
||||
restrict: 'E',
|
||||
templateUrl: 'modal.html',
|
||||
replace: true,
|
||||
|
||||
@@ -2,7 +2,7 @@ angular.module('syncthing.core')
|
||||
.config(function($locationProvider) {
|
||||
$locationProvider.html5Mode({enabled: true, requireBase: false}).hashPrefix('!');
|
||||
})
|
||||
.controller('SyncthingController', function ($scope, $http, $location, LocaleService, Events, $filter) {
|
||||
.controller('SyncthingController', function ($scope, $http, $location, LocaleService, Events, $filter, $q) {
|
||||
'use strict';
|
||||
|
||||
// private/helper definitions
|
||||
@@ -33,6 +33,9 @@ angular.module('syncthing.core')
|
||||
$scope.folderRejections = {};
|
||||
$scope.protocolChanged = false;
|
||||
$scope.reportData = {};
|
||||
$scope.reportDataPreview = '';
|
||||
$scope.reportDataPreviewVersion = '';
|
||||
$scope.reportDataPreviewDiff = false;
|
||||
$scope.reportPreview = false;
|
||||
$scope.folders = {};
|
||||
$scope.seenError = '';
|
||||
@@ -47,7 +50,6 @@ angular.module('syncthing.core')
|
||||
$scope.neededPageSize = 10;
|
||||
$scope.failed = {};
|
||||
$scope.failedCurrentPage = 1;
|
||||
$scope.failedCurrentFolder = undefined;
|
||||
$scope.failedPageSize = 10;
|
||||
$scope.scanProgress = {};
|
||||
$scope.themes = [];
|
||||
@@ -63,6 +65,7 @@ angular.module('syncthing.core')
|
||||
selectedDevices: {},
|
||||
type: "readwrite",
|
||||
rescanIntervalS: 60,
|
||||
fsWatcherDelayS: 10,
|
||||
minDiskFree: {value: 1, unit: "%"},
|
||||
maxConflicts: 10,
|
||||
fsync: true,
|
||||
@@ -134,6 +137,10 @@ angular.module('syncthing.core')
|
||||
|
||||
$http.get(urlbase + '/svc/report').success(function (data) {
|
||||
$scope.reportData = data;
|
||||
if ($scope.system && $scope.config.options.urAccepted > -1 && $scope.config.options.urSeen < $scope.system.urVersionMax && $scope.config.options.urAccepted < $scope.system.urVersionMax) {
|
||||
// Usage reporting format has changed, prompt the user to re-accept.
|
||||
$('#ur').modal();
|
||||
}
|
||||
}).error($scope.emitHTTPError);
|
||||
|
||||
$http.get(urlbase + '/system/upgrade').success(function (data) {
|
||||
@@ -376,6 +383,7 @@ angular.module('syncthing.core')
|
||||
$scope.config = config;
|
||||
$scope.config.options._listenAddressesStr = $scope.config.options.listenAddresses.join(', ');
|
||||
$scope.config.options._globalAnnounceServersStr = $scope.config.options.globalAnnounceServers.join(', ');
|
||||
$scope.config.options._urAcceptedStr = "" + $scope.config.options.urAccepted;
|
||||
|
||||
$scope.devices = $scope.config.devices;
|
||||
$scope.devices.forEach(function (deviceCfg) {
|
||||
@@ -412,6 +420,10 @@ angular.module('syncthing.core')
|
||||
$scope.myID = data.myID;
|
||||
$scope.system = data;
|
||||
|
||||
if ($scope.reportDataPreviewVersion === '') {
|
||||
$scope.reportDataPreviewVersion = $scope.system.urVersionMax;
|
||||
}
|
||||
|
||||
var listenersFailed = [];
|
||||
for (var address in data.connectionServiceStatus) {
|
||||
if (data.connectionServiceStatus[address].error) {
|
||||
@@ -1058,7 +1070,6 @@ angular.module('syncthing.core')
|
||||
$scope.editSettings = function () {
|
||||
// Make a working copy
|
||||
$scope.tmpOptions = angular.copy($scope.config.options);
|
||||
$scope.tmpOptions.urEnabled = ($scope.tmpOptions.urAccepted > 0);
|
||||
$scope.tmpOptions.deviceName = $scope.thisDevice().name;
|
||||
$scope.tmpOptions.upgrades = "none";
|
||||
if ($scope.tmpOptions.autoUpgradeIntervalH > 0) {
|
||||
@@ -1088,18 +1099,31 @@ angular.module('syncthing.core')
|
||||
}).error($scope.emitHTTPError);
|
||||
};
|
||||
|
||||
$scope.urVersions = function() {
|
||||
var result = [];
|
||||
if ($scope.system) {
|
||||
for (var i = $scope.system.urVersionMax; i >= 2; i--) {
|
||||
result.push("" + i);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
$scope.saveSettings = function () {
|
||||
// Make sure something changed
|
||||
var changed = !angular.equals($scope.config.options, $scope.tmpOptions) || !angular.equals($scope.config.gui, $scope.tmpGUI);
|
||||
var themeChanged = $scope.config.gui.theme !== $scope.tmpGUI.theme;
|
||||
if (changed) {
|
||||
// Angular has issues with selects with numeric values, so we handle strings here.
|
||||
$scope.tmpOptions.urAccepted = parseInt($scope.tmpOptions._urAcceptedStr);
|
||||
// Check if auto-upgrade has been enabled or disabled. This
|
||||
// also has an effect on usage reporting, so do the check
|
||||
// for that later.
|
||||
if ($scope.tmpOptions.upgrades == "candidate") {
|
||||
$scope.tmpOptions.autoUpgradeIntervalH = $scope.tmpOptions.autoUpgradeIntervalH || 12;
|
||||
$scope.tmpOptions.upgradeToPreReleases = true;
|
||||
$scope.tmpOptions.urEnabled = true;
|
||||
$scope.tmpOptions.urAccepted = $scope.system.urVersionMax;
|
||||
$scope.tmpOptions.urSeen = $scope.system.urVersionMax;
|
||||
} else if ($scope.tmpOptions.upgrades == "stable") {
|
||||
$scope.tmpOptions.autoUpgradeIntervalH = $scope.tmpOptions.autoUpgradeIntervalH || 12;
|
||||
$scope.tmpOptions.upgradeToPreReleases = false;
|
||||
@@ -1107,13 +1131,6 @@ angular.module('syncthing.core')
|
||||
$scope.tmpOptions.autoUpgradeIntervalH = 0;
|
||||
}
|
||||
|
||||
// Check if usage reporting has been enabled or disabled
|
||||
if ($scope.tmpOptions.urEnabled && $scope.tmpOptions.urAccepted <= 0) {
|
||||
$scope.tmpOptions.urAccepted = 1000;
|
||||
} else if (!$scope.tmpOptions.urEnabled && $scope.tmpOptions.urAccepted > 0) {
|
||||
$scope.tmpOptions.urAccepted = -1;
|
||||
}
|
||||
|
||||
// Check if protocol will need to be changed on restart
|
||||
if ($scope.config.gui.useTLS !== $scope.tmpGUI.useTLS) {
|
||||
$scope.protocolChanged = true;
|
||||
@@ -1691,13 +1708,17 @@ angular.module('syncthing.core')
|
||||
};
|
||||
|
||||
$scope.acceptUR = function () {
|
||||
$scope.config.options.urAccepted = 1000; // Larger than the largest existing report version
|
||||
$scope.config.options.urAccepted = $scope.system.urVersionMax;
|
||||
$scope.config.options.urSeen = $scope.system.urVersionMax;
|
||||
$scope.saveConfig();
|
||||
$('#ur').modal('hide');
|
||||
};
|
||||
|
||||
$scope.declineUR = function () {
|
||||
$scope.config.options.urAccepted = -1;
|
||||
if ($scope.config.options.urAccepted === 0) {
|
||||
$scope.config.options.urAccepted = -1;
|
||||
}
|
||||
$scope.config.options.urSeen = $scope.system.urVersionMax;
|
||||
$scope.saveConfig();
|
||||
$('#ur').modal('hide');
|
||||
};
|
||||
@@ -1747,6 +1768,31 @@ angular.module('syncthing.core')
|
||||
$scope.reportPreview = true;
|
||||
};
|
||||
|
||||
$scope.refreshReportDataPreview = function () {
|
||||
$scope.reportDataPreview = '';
|
||||
if (!$scope.reportDataPreviewVersion) {
|
||||
return;
|
||||
}
|
||||
var version = parseInt($scope.reportDataPreviewVersion);
|
||||
if ($scope.reportDataPreviewDiff && version > 2) {
|
||||
$q.all([
|
||||
$http.get(urlbase + '/svc/report?version=' + version),
|
||||
$http.get(urlbase + '/svc/report?version=' + (version-1)),
|
||||
]).then(function (responses) {
|
||||
var newReport = responses[0].data;
|
||||
var oldReport = responses[1].data;
|
||||
angular.forEach(oldReport, function(_, key) {
|
||||
delete newReport[key];
|
||||
});
|
||||
$scope.reportDataPreview = newReport;
|
||||
});
|
||||
} else {
|
||||
$http.get(urlbase + '/svc/report?version=' + version).success(function (data) {
|
||||
$scope.reportDataPreview = data;
|
||||
}).error($scope.emitHTTPError);
|
||||
}
|
||||
};
|
||||
|
||||
$scope.rescanAllFolders = function () {
|
||||
$http.post(urlbase + "/db/scan");
|
||||
};
|
||||
|
||||
27
gui/default/syncthing/core/uncamelFilter.js
Normal file
27
gui/default/syncthing/core/uncamelFilter.js
Normal file
@@ -0,0 +1,27 @@
|
||||
angular.module('syncthing.core')
|
||||
.filter('uncamel', function () {
|
||||
return function (input) {
|
||||
input = input.replace(/(.)([A-Z][a-z]+)/g, '$1 $2').replace(/([a-z0-9])([A-Z])/g, '$1 $2');
|
||||
var parts = input.split(' ');
|
||||
var lastPart = parts.splice(-1)[0];
|
||||
switch (lastPart) {
|
||||
case "S":
|
||||
parts.push('(seconds)');
|
||||
break;
|
||||
case "M":
|
||||
parts.push('(minutes)');
|
||||
break;
|
||||
case "H":
|
||||
parts.push('(hours)');
|
||||
break;
|
||||
case "Ms":
|
||||
parts.push('(milliseconds)');
|
||||
break;
|
||||
default:
|
||||
parts.push(lastPart);
|
||||
break;
|
||||
}
|
||||
input = parts.join(' ');
|
||||
return input.charAt(0).toUpperCase() + input.slice(1);
|
||||
};
|
||||
});
|
||||
@@ -180,7 +180,7 @@
|
||||
<label translate for="externalCommand">Command</label>
|
||||
<input name="externalCommand" id="externalCommand" class="form-control" type="text" ng-model="currentFolder.externalCommand" required="" aria-required="true" />
|
||||
<p class="help-block">
|
||||
<span translate ng-if="folderEditor.externalCommand.$valid || folderEditor.externalCommand.$pristine">See external versioner help for supported templated command line parameters.</span>
|
||||
<span translate ng-if="folderEditor.externalCommand.$valid || folderEditor.externalCommand.$pristine">See external versioning help for supported templated command line parameters.</span>
|
||||
<span translate ng-if="folderEditor.externalCommand.$error.required && folderEditor.externalCommand.$dirty">The path cannot be blank.</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
<div class="panel-body">
|
||||
<form class="form-horizontal" role="form">
|
||||
<div ng-repeat="(key, value) in advancedConfig.gui" ng-init="type = inputTypeFor(key, value)" ng-if="type != 'skip'" class="form-group">
|
||||
<label for="guiInput{{$index}}" class="col-sm-4 control-label">{{key}}</label>
|
||||
<label for="guiInput{{$index}}" class="col-sm-4 control-label">{{key | uncamel}}</label>
|
||||
<div class="col-sm-8">
|
||||
<input ng-if="inputTypeFor(key, value) == 'list'" id="optionsInput{{$index}}" class="form-control" type="text" ng-model="advancedConfig.gui[key]" ng-list/>
|
||||
<input ng-if="inputTypeFor(key, value) != 'list'" id="optionsInput{{$index}}" class="form-control" type="{{inputTypeFor(key, value)}}" ng-model="advancedConfig.gui[key]" />
|
||||
@@ -35,7 +35,7 @@
|
||||
<div class="panel-body">
|
||||
<form class="form-horizontal" role="form">
|
||||
<div ng-repeat="(key, value) in advancedConfig.options" ng-if="inputTypeFor(key, value) != 'skip'" class="form-group">
|
||||
<label for="optionsInput{{$index}}" class="col-sm-4 control-label">{{key}}</label>
|
||||
<label for="optionsInput{{$index}}" class="col-sm-4 control-label">{{key | uncamel}}</label>
|
||||
<div class="col-sm-8">
|
||||
<input ng-if="inputTypeFor(key, value) == 'list'" id="optionsInput{{$index}}" class="form-control" type="text" ng-model="advancedConfig.options[key]" ng-list/>
|
||||
<input ng-if="inputTypeFor(key, value) != 'list'" id="optionsInput{{$index}}" class="form-control" type="{{inputTypeFor(key, value)}}" ng-model="advancedConfig.options[key]" />
|
||||
@@ -59,7 +59,7 @@
|
||||
<div class="panel-body">
|
||||
<form class="form-horizontal" role="form">
|
||||
<div ng-repeat="(key, value) in folder" ng-if="inputTypeFor(key, value) != 'skip'" class="form-group">
|
||||
<label for="folder{{$index}}Input{{$index}}" class="col-sm-4 control-label">{{key}}</label>
|
||||
<label for="folder{{$index}}Input{{$index}}" class="col-sm-4 control-label">{{key | uncamel}}</label>
|
||||
<div class="col-sm-8">
|
||||
<input ng-if="inputTypeFor(key, value) == 'list'" id="optionsInput{{$index}}" class="form-control" type="text" ng-model="folder[key]" ng-list/>
|
||||
<input ng-if="inputTypeFor(key, value) != 'list'" id="optionsInput{{$index}}" class="form-control" type="{{inputTypeFor(key, value)}}" ng-model="folder[key]" />
|
||||
@@ -80,7 +80,7 @@
|
||||
<div class="panel-body">
|
||||
<form class="form-horizontal" role="form">
|
||||
<div ng-repeat="(key, value) in device" ng-if="inputTypeFor(key, value) != 'skip'" class="form-group">
|
||||
<label for="device{{$index}}Input{{$index}}" class="col-sm-4 control-label">{{key}}</label>
|
||||
<label for="device{{$index}}Input{{$index}}" class="col-sm-4 control-label">{{key | uncamel}}</label>
|
||||
<div class="col-sm-8">
|
||||
<input ng-if="inputTypeFor(key, value) == 'list'" id="optionsInput{{$index}}" class="form-control" type="text" ng-model="device[key]" ng-list/>
|
||||
<input ng-if="inputTypeFor(key, value) != 'list'" id="optionsInput{{$index}}" class="form-control" type="{{inputTypeFor(key, value)}}" ng-model="device[key]" />
|
||||
|
||||
@@ -139,10 +139,14 @@
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<div class="checkbox" ng-if="tmpOptions.upgrades != 'candidate'">
|
||||
<label>
|
||||
<input id="UREnabled" type="checkbox" ng-model="tmpOptions.urEnabled"/> <span translate>Anonymous Usage Reporting</span> (<a href="" translate data-toggle="modal" data-target="#urPreview">Preview</a>)
|
||||
</label>
|
||||
<div ng-if="tmpOptions.upgrades != 'candidate'">
|
||||
<label translate for="urVersion">Anonymous Usage Reporting</label> (<a href="" translate data-toggle="modal" data-target="#urPreview">Preview</a>)
|
||||
<select class="form-control" id="urVersion" ng-model="tmpOptions._urAcceptedStr">
|
||||
<option ng-repeat="n in urVersions()" value="{{n}}">{{'Version' | translate}} {{n}}</option>
|
||||
<!-- 1 does not exist, as we did not support incremental formats back then. -->
|
||||
<option value="0" translate>Undecided (will prompt)</option>
|
||||
<option value="-1" translate>Disabled</option>
|
||||
</select>
|
||||
</div>
|
||||
<p class="help-block" ng-if="tmpOptions.upgrades == 'candidate'">
|
||||
<span translate>Usage reporting is always enabled for candidate releases.</span> (<a href="" translate data-toggle="modal" data-target="#urPreview">Preview</a>)
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
<modal id="ur" status="info" icon="bar-chart" heading="{{'Allow Anonymous Usage Reporting?' | translate}}" large="yes" closeable="no">
|
||||
<div class="modal-body">
|
||||
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
||||
<p translate>The aggregated statistics are publicly available at the URL below.</p>
|
||||
<p><a href="https://data.syncthing.net/" target="_blank">https://data.syncthing.net/</a></p>
|
||||
<div ng-if="config.options.urAccepted > 0 && config.options.urAccepted < system.urVersionMax">
|
||||
<p translate>Anonymous usage report format has changed. Would you like to move to the new format?</p>
|
||||
</div>
|
||||
<div ng-if="!(config.options.urAccepted > 0 && config.options.urAccepted < system.urVersionMax)">
|
||||
<p translate>The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.</p>
|
||||
<p translate>The aggregated statistics are publicly available at the URL below.</p>
|
||||
<p><a href="https://data.syncthing.net/" target="_blank">https://data.syncthing.net/</a></p>
|
||||
</div>
|
||||
<button type="button" class="btn btn-default btn-sm" ng-click="showReportPreview()" ng-show="!reportPreview">
|
||||
<span class="fa fa-file-text-o"></span> <span translate>Preview Usage Report</span>
|
||||
</button>
|
||||
|
||||
@@ -5,8 +5,20 @@
|
||||
</p>
|
||||
<p translate>The aggregated statistics are publicly available at the URL below.</p>
|
||||
<p><a href="https://data.syncthing.net/" target="_blank">https://data.syncthing.net/</a></p>
|
||||
<label translate>Version</label>
|
||||
<select id="urPreviewVersion" class="form-control" ng-model="$parent.$parent.reportDataPreviewVersion" ng-change="refreshReportDataPreview()" >
|
||||
<option selected value translate>Select a version</option>
|
||||
<option ng-repeat="n in urVersions()" value="{{n}}">{{'Version' | translate}} {{n}}</option>
|
||||
</select>
|
||||
<div class="checkbox" ng-if="$parent.$parent.reportDataPreviewVersion > 2">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="$parent.$parent.$parent.reportDataPreviewDiff" ng-change="refreshReportDataPreview()"/>
|
||||
<span translate>Show diff with previous version</span>
|
||||
</label>
|
||||
</div>
|
||||
<hr>
|
||||
<form>
|
||||
<textarea class="form-control" rows="20">{{reportData | json}}</textarea>
|
||||
<textarea class="form-control" rows="20" ng-if="reportDataPreview">{{reportDataPreview | json}}</textarea>
|
||||
</form>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
|
||||
@@ -25,6 +25,9 @@ platforms=(
|
||||
darwin-amd64 darwin-386
|
||||
)
|
||||
|
||||
# Mac builds always require cgo
|
||||
export CGO_ENABLED=1
|
||||
|
||||
echo Building
|
||||
for plat in "${platforms[@]}"; do
|
||||
echo Building "$plat"
|
||||
|
||||
@@ -137,17 +137,11 @@ func (w *broadcastWriter) Serve() {
|
||||
return
|
||||
}
|
||||
|
||||
if err, ok := err.(net.Error); ok && err.Temporary() {
|
||||
// A transient error. Lets hope for better luck in the future.
|
||||
l.Debugln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Some other error that we don't expect. Bail and retry.
|
||||
// Some other error that we don't expect. Debug and continue.
|
||||
l.Debugln(err)
|
||||
w.setError(err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
l.Debugf("sent %d bytes to %s", len(bs), dst)
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
const (
|
||||
OldestHandledVersion = 10
|
||||
CurrentVersion = 23
|
||||
CurrentVersion = 25
|
||||
MaxRescanIntervalS = 365 * 24 * 60 * 60
|
||||
)
|
||||
|
||||
@@ -48,11 +48,8 @@ var (
|
||||
DefaultListenAddresses = []string{
|
||||
util.Address("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(DefaultTCPPort))),
|
||||
"dynamic+https://relays.syncthing.net/endpoint",
|
||||
util.Address("kcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(DefaultKCPPort))),
|
||||
}
|
||||
// DefaultKCPListenAddress gets added to the default listen address set
|
||||
// when the appropriate feature flag is set. Feature flag stuff to be
|
||||
// removed later.
|
||||
DefaultKCPListenAddress = util.Address("kcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(DefaultKCPPort)))
|
||||
// DefaultDiscoveryServersV4 should be substituted when the configuration
|
||||
// contains <globalAnnounceServer>default-v4</globalAnnounceServer>.
|
||||
DefaultDiscoveryServersV4 = []string{
|
||||
@@ -326,6 +323,12 @@ func (cfg *Configuration) clean() error {
|
||||
if cfg.Version == 22 {
|
||||
convertV22V23(cfg)
|
||||
}
|
||||
if cfg.Version == 23 {
|
||||
convertV23V24(cfg)
|
||||
}
|
||||
if cfg.Version == 24 {
|
||||
convertV24V25(cfg)
|
||||
}
|
||||
|
||||
// Build a list of available devices
|
||||
existingDevices := make(map[protocol.DeviceID]bool)
|
||||
@@ -375,6 +378,20 @@ func (cfg *Configuration) clean() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertV24V25(cfg *Configuration) {
|
||||
for i := range cfg.Folders {
|
||||
cfg.Folders[i].FSWatcherDelayS = 10
|
||||
}
|
||||
|
||||
cfg.Version = 25
|
||||
}
|
||||
|
||||
func convertV23V24(cfg *Configuration) {
|
||||
cfg.Options.URSeen = 2
|
||||
|
||||
cfg.Version = 24
|
||||
}
|
||||
|
||||
func convertV22V23(cfg *Configuration) {
|
||||
permBits := fs.FileMode(0777)
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -392,9 +409,10 @@ func convertV22V23(cfg *Configuration) {
|
||||
err = fs.Remove(".stfolder")
|
||||
if err == nil {
|
||||
err = fs.Mkdir(".stfolder", permBits)
|
||||
fs.Hide(".stfolder") // ignore error
|
||||
}
|
||||
if err != nil {
|
||||
l.Fatalln("failed to upgrade folder marker:", err)
|
||||
l.Infoln("Failed to upgrade folder marker:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,6 @@ func TestDefaultValues(t *testing.T) {
|
||||
WeakHashSelectionMethod: WeakHashAuto,
|
||||
StunKeepaliveS: 24,
|
||||
StunServers: []string{"default"},
|
||||
DefaultKCPEnabled: false,
|
||||
KCPCongestionControl: true,
|
||||
KCPReceiveWindowSize: 128,
|
||||
KCPSendWindowSize: 128,
|
||||
@@ -103,18 +102,20 @@ func TestDeviceConfig(t *testing.T) {
|
||||
|
||||
expectedFolders := []FolderConfiguration{
|
||||
{
|
||||
ID: "test",
|
||||
FilesystemType: fs.FilesystemTypeBasic,
|
||||
Path: "testdata",
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||
Type: FolderTypeSendOnly,
|
||||
RescanIntervalS: 600,
|
||||
Copiers: 0,
|
||||
Pullers: 0,
|
||||
Hashers: 0,
|
||||
AutoNormalize: true,
|
||||
MinDiskFree: Size{1, "%"},
|
||||
MaxConflicts: -1,
|
||||
ID: "test",
|
||||
FilesystemType: fs.FilesystemTypeBasic,
|
||||
Path: "testdata",
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||
Type: FolderTypeSendOnly,
|
||||
RescanIntervalS: 600,
|
||||
FSWatcherEnabled: false,
|
||||
FSWatcherDelayS: 10,
|
||||
Copiers: 0,
|
||||
Pullers: 0,
|
||||
Hashers: 0,
|
||||
AutoNormalize: true,
|
||||
MinDiskFree: Size{1, "%"},
|
||||
MaxConflicts: -1,
|
||||
Versioning: VersioningConfiguration{
|
||||
Params: map[string]string{},
|
||||
},
|
||||
@@ -200,6 +201,7 @@ func TestOverriddenValues(t *testing.T) {
|
||||
ProgressUpdateIntervalS: 10,
|
||||
LimitBandwidthInLan: true,
|
||||
MinHomeDiskFree: Size{5.2, "%"},
|
||||
URSeen: 2,
|
||||
URURL: "https://localhost/newdata",
|
||||
URInitialDelayS: 800,
|
||||
URPostInsecurely: true,
|
||||
@@ -213,7 +215,6 @@ func TestOverriddenValues(t *testing.T) {
|
||||
WeakHashSelectionMethod: WeakHashNever,
|
||||
StunKeepaliveS: 10,
|
||||
StunServers: []string{"a.stun.com", "b.stun.com"},
|
||||
DefaultKCPEnabled: true,
|
||||
KCPCongestionControl: false,
|
||||
KCPReceiveWindowSize: 1280,
|
||||
KCPSendWindowSize: 1280,
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
@@ -14,6 +15,11 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
errPathMissing = errors.New("folder path missing")
|
||||
errMarkerMissing = errors.New("folder marker missing")
|
||||
)
|
||||
|
||||
type FolderConfiguration struct {
|
||||
ID string `xml:"id,attr" json:"id"`
|
||||
Label string `xml:"label,attr" json:"label"`
|
||||
@@ -22,6 +28,8 @@ type FolderConfiguration struct {
|
||||
Type FolderType `xml:"type,attr" json:"type"`
|
||||
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
|
||||
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
|
||||
FSWatcherEnabled bool `xml:"fsWatcherEnabled,attr" json:"fsWatcherEnabled"`
|
||||
FSWatcherDelayS int `xml:"fsWatcherDelayS,attr" json:"fsWatcherDelayS"`
|
||||
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
|
||||
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
|
||||
MinDiskFree Size `xml:"minDiskFree" json:"minDiskFree"`
|
||||
@@ -80,34 +88,44 @@ func (f FolderConfiguration) Filesystem() fs.Filesystem {
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) CreateMarker() error {
|
||||
if !f.HasMarker() {
|
||||
permBits := fs.FileMode(0777)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows has no umask so we must chose a safer set of bits to
|
||||
// begin with.
|
||||
permBits = 0700
|
||||
}
|
||||
fs := f.Filesystem()
|
||||
err := fs.Mkdir(".stfolder", permBits)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir, err := fs.Open("."); err == nil {
|
||||
if serr := dir.Sync(); err != nil {
|
||||
l.Infof("fsync %q failed: %v", ".", serr)
|
||||
}
|
||||
} else {
|
||||
l.Infof("fsync %q failed: %v", ".", err)
|
||||
}
|
||||
fs.Hide(".stfolder")
|
||||
if err := f.CheckPath(); err != errMarkerMissing {
|
||||
return err
|
||||
}
|
||||
|
||||
permBits := fs.FileMode(0777)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows has no umask so we must chose a safer set of bits to
|
||||
// begin with.
|
||||
permBits = 0700
|
||||
}
|
||||
fs := f.Filesystem()
|
||||
err := fs.Mkdir(".stfolder", permBits)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir, err := fs.Open("."); err != nil {
|
||||
l.Debugln("folder marker: open . failed:", err)
|
||||
} else if err := dir.Sync(); err != nil {
|
||||
l.Debugln("folder marker: fsync . failed:", err)
|
||||
}
|
||||
fs.Hide(".stfolder")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) HasMarker() bool {
|
||||
_, err := f.Filesystem().Stat(".stfolder")
|
||||
return err == nil
|
||||
// CheckPath returns nil if the folder root exists and contains the marker file
|
||||
func (f *FolderConfiguration) CheckPath() error {
|
||||
fi, err := f.Filesystem().Stat(".")
|
||||
if err != nil || !fi.IsDir() {
|
||||
return errPathMissing
|
||||
}
|
||||
|
||||
_, err = f.Filesystem().Stat(".stfolder")
|
||||
if err != nil {
|
||||
return errMarkerMissing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) CreateRoot() (err error) {
|
||||
@@ -157,6 +175,11 @@ func (f *FolderConfiguration) prepare() {
|
||||
f.RescanIntervalS = 0
|
||||
}
|
||||
|
||||
if f.FSWatcherDelayS <= 0 {
|
||||
f.FSWatcherEnabled = false
|
||||
f.FSWatcherDelayS = 10
|
||||
}
|
||||
|
||||
if f.Versioning.Params == nil {
|
||||
f.Versioning.Params = make(map[string]string)
|
||||
}
|
||||
@@ -179,3 +202,7 @@ func (l FolderDeviceConfigurationList) Swap(a, b int) {
|
||||
func (l FolderDeviceConfigurationList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) CheckFreeSpace() (err error) {
|
||||
return checkFreeSpace(f.MinDiskFree, f.Filesystem())
|
||||
}
|
||||
|
||||
@@ -13,16 +13,17 @@ import (
|
||||
)
|
||||
|
||||
type GUIConfiguration struct {
|
||||
Enabled bool `xml:"enabled,attr" json:"enabled" default:"true"`
|
||||
RawAddress string `xml:"address" json:"address" default:"127.0.0.1:8384"`
|
||||
User string `xml:"user,omitempty" json:"user"`
|
||||
Password string `xml:"password,omitempty" json:"password"`
|
||||
RawUseTLS bool `xml:"tls,attr" json:"useTLS"`
|
||||
APIKey string `xml:"apikey,omitempty" json:"apiKey"`
|
||||
InsecureAdminAccess bool `xml:"insecureAdminAccess,omitempty" json:"insecureAdminAccess"`
|
||||
Theme string `xml:"theme" json:"theme" default:"default"`
|
||||
Debugging bool `xml:"debugging,attr" json:"debugging"`
|
||||
InsecureSkipHostCheck bool `xml:"insecureSkipHostcheck,omitempty" json:"insecureSkipHostcheck"`
|
||||
Enabled bool `xml:"enabled,attr" json:"enabled" default:"true"`
|
||||
RawAddress string `xml:"address" json:"address" default:"127.0.0.1:8384"`
|
||||
User string `xml:"user,omitempty" json:"user"`
|
||||
Password string `xml:"password,omitempty" json:"password"`
|
||||
RawUseTLS bool `xml:"tls,attr" json:"useTLS"`
|
||||
APIKey string `xml:"apikey,omitempty" json:"apiKey"`
|
||||
InsecureAdminAccess bool `xml:"insecureAdminAccess,omitempty" json:"insecureAdminAccess"`
|
||||
Theme string `xml:"theme" json:"theme" default:"default"`
|
||||
Debugging bool `xml:"debugging,attr" json:"debugging"`
|
||||
InsecureSkipHostCheck bool `xml:"insecureSkipHostcheck,omitempty" json:"insecureSkipHostcheck"`
|
||||
InsecureAllowFrameLoading bool `xml:"insecureAllowFrameLoading,omitempty" json:"insecureAllowFrameLoading"`
|
||||
}
|
||||
|
||||
func (c GUIConfiguration) Address() string {
|
||||
|
||||
@@ -112,6 +112,7 @@ type OptionsConfiguration struct {
|
||||
NATRenewalM int `xml:"natRenewalMinutes" json:"natRenewalMinutes" default:"30"`
|
||||
NATTimeoutS int `xml:"natTimeoutSeconds" json:"natTimeoutSeconds" default:"10"`
|
||||
URAccepted int `xml:"urAccepted" json:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
|
||||
URSeen int `xml:"urSeen" json:"urSeen"` // Report which the user has been prompted for.
|
||||
URUniqueID string `xml:"urUniqueID" json:"urUniqueId"` // Unique ID for reporting purposes, regenerated when UR is turned on.
|
||||
URURL string `xml:"urURL" json:"urURL" default:"https://data.syncthing.net/newdata"`
|
||||
URPostInsecurely bool `xml:"urPostInsecurely" json:"urPostInsecurely" default:"false"` // For testing
|
||||
@@ -133,7 +134,6 @@ type OptionsConfiguration struct {
|
||||
WeakHashSelectionMethod WeakHashSelectionMethod `xml:"weakHashSelectionMethod" json:"weakHashSelectionMethod"`
|
||||
StunServers []string `xml:"stunServer" json:"stunServers" default:"default"`
|
||||
StunKeepaliveS int `xml:"stunKeepaliveSeconds" json:"stunKeepaliveSeconds" default:"24"`
|
||||
DefaultKCPEnabled bool `xml:"defaultKCPEnabled" json:"defaultKCPEnabled" default:"false"`
|
||||
KCPNoDelay bool `xml:"kcpNoDelay" json:"kcpNoDelay" default:"false"`
|
||||
KCPUpdateIntervalMs int `xml:"kcpUpdateIntervalMs" json:"kcpUpdateIntervalMs" default:"25"`
|
||||
KCPFastResend bool `xml:"kcpFastResend" json:"kcpFastResend" default:"false"`
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
type Size struct {
|
||||
@@ -73,3 +75,24 @@ func (s Size) String() string {
|
||||
func (Size) ParseDefault(s string) (interface{}, error) {
|
||||
return ParseSize(s)
|
||||
}
|
||||
|
||||
func checkFreeSpace(req Size, fs fs.Filesystem) error {
|
||||
val := req.BaseValue()
|
||||
if val <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage, err := fs.Usage(".")
|
||||
if req.Percentage() {
|
||||
freePct := (float64(usage.Free) / float64(usage.Total)) * 100
|
||||
if err == nil && freePct < val {
|
||||
return fmt.Errorf("insufficient space in %v %v: %f %% < %v", fs.Type(), fs.URI(), freePct, req)
|
||||
}
|
||||
} else {
|
||||
if err == nil && float64(usage.Free) < val {
|
||||
return fmt.Errorf("insufficient space in %v %v: %v < %v", fs.Type(), fs.URI(), usage.Free, req)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
1
lib/config/testdata/overridenvalues.xml
vendored
1
lib/config/testdata/overridenvalues.xml
vendored
@@ -38,7 +38,6 @@
|
||||
<stunKeepaliveSeconds>10</stunKeepaliveSeconds>
|
||||
<stunServer>a.stun.com</stunServer>
|
||||
<stunServer>b.stun.com</stunServer>
|
||||
<defaultKCPEnabled>true</defaultKCPEnabled>
|
||||
<kcpCongestionControl>false</kcpCongestionControl>
|
||||
<kcpReceiveWindowSize>1280</kcpReceiveWindowSize>
|
||||
<kcpSendWindowSize>1280</kcpSendWindowSize>
|
||||
|
||||
16
lib/config/testdata/v24.xml
vendored
Normal file
16
lib/config/testdata/v24.xml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
<configuration version="22">
|
||||
<folder id="test" path="testdata" type="readonly" ignorePerms="false" rescanIntervalS="600" autoNormalize="true">
|
||||
<filesystemType>basic</filesystemType>
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
|
||||
<device id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2"></device>
|
||||
<minDiskFree unit="%">1</minDiskFree>
|
||||
<maxConflicts>-1</maxConflicts>
|
||||
<fsync>true</fsync>
|
||||
</folder>
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" name="node one" compression="metadata">
|
||||
<address>tcp://a</address>
|
||||
</device>
|
||||
<device id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" name="node two" compression="metadata">
|
||||
<address>tcp://b</address>
|
||||
</device>
|
||||
</configuration>
|
||||
16
lib/config/testdata/v25.xml
vendored
Normal file
16
lib/config/testdata/v25.xml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
<configuration version="25">
|
||||
<folder id="test" path="testdata" type="readonly" ignorePerms="false" rescanIntervalS="600" fsNotifications="false" notifyDelayS="10" autoNormalize="true">
|
||||
<filesystemType>basic</filesystemType>
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
|
||||
<device id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2"></device>
|
||||
<minDiskFree unit="%">1</minDiskFree>
|
||||
<maxConflicts>-1</maxConflicts>
|
||||
<fsync>true</fsync>
|
||||
</folder>
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" name="node one" compression="metadata">
|
||||
<address>tcp://a</address>
|
||||
</device>
|
||||
<device id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" name="node two" compression="metadata">
|
||||
<address>tcp://b</address>
|
||||
</device>
|
||||
</configuration>
|
||||
@@ -8,9 +8,11 @@ package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
@@ -128,15 +130,26 @@ func (w *Wrapper) RawCopy() Configuration {
|
||||
return w.cfg.Copy()
|
||||
}
|
||||
|
||||
// ReplaceBlocking swaps the current configuration object for the given one,
|
||||
// and waits for subscribers to be notified.
|
||||
func (w *Wrapper) ReplaceBlocking(cfg Configuration) error {
|
||||
w.mut.Lock()
|
||||
wg := sync.NewWaitGroup()
|
||||
err := w.replaceLocked(cfg, wg)
|
||||
w.mut.Unlock()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace swaps the current configuration object for the given one.
|
||||
func (w *Wrapper) Replace(cfg Configuration) error {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
|
||||
return w.replaceLocked(cfg)
|
||||
return w.replaceLocked(cfg, nil)
|
||||
}
|
||||
|
||||
func (w *Wrapper) replaceLocked(to Configuration) error {
|
||||
func (w *Wrapper) replaceLocked(to Configuration, wg sync.WaitGroup) error {
|
||||
from := w.cfg
|
||||
|
||||
if err := to.clean(); err != nil {
|
||||
@@ -155,14 +168,22 @@ func (w *Wrapper) replaceLocked(to Configuration) error {
|
||||
w.deviceMap = nil
|
||||
w.folderMap = nil
|
||||
|
||||
w.notifyListeners(from, to)
|
||||
w.notifyListeners(from, to, wg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) notifyListeners(from, to Configuration) {
|
||||
func (w *Wrapper) notifyListeners(from, to Configuration, wg sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
wg.Add(len(w.subs))
|
||||
}
|
||||
for _, sub := range w.subs {
|
||||
go w.notifyListener(sub, from.Copy(), to.Copy())
|
||||
go func(commiter Committer) {
|
||||
w.notifyListener(commiter, from.Copy(), to.Copy())
|
||||
if wg != nil {
|
||||
wg.Done()
|
||||
}
|
||||
}(sub)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,7 +231,7 @@ func (w *Wrapper) SetDevices(devs []DeviceConfiguration) error {
|
||||
}
|
||||
}
|
||||
|
||||
return w.replaceLocked(newCfg)
|
||||
return w.replaceLocked(newCfg, nil)
|
||||
}
|
||||
|
||||
// SetDevice adds a new device to the configuration, or overwrites an existing
|
||||
@@ -237,7 +258,7 @@ func (w *Wrapper) RemoveDevice(id protocol.DeviceID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return w.replaceLocked(newCfg)
|
||||
return w.replaceLocked(newCfg, nil)
|
||||
}
|
||||
|
||||
// Folders returns a map of folders. Folder structures should not be changed,
|
||||
@@ -273,7 +294,7 @@ func (w *Wrapper) SetFolder(fld FolderConfiguration) error {
|
||||
newCfg.Folders = append(w.cfg.Folders, fld)
|
||||
}
|
||||
|
||||
return w.replaceLocked(newCfg)
|
||||
return w.replaceLocked(newCfg, nil)
|
||||
}
|
||||
|
||||
// Options returns the current options configuration object.
|
||||
@@ -289,7 +310,7 @@ func (w *Wrapper) SetOptions(opts OptionsConfiguration) error {
|
||||
defer w.mut.Unlock()
|
||||
newCfg := w.cfg.Copy()
|
||||
newCfg.Options = opts
|
||||
return w.replaceLocked(newCfg)
|
||||
return w.replaceLocked(newCfg, nil)
|
||||
}
|
||||
|
||||
// GUI returns the current GUI configuration object.
|
||||
@@ -305,7 +326,7 @@ func (w *Wrapper) SetGUI(gui GUIConfiguration) error {
|
||||
defer w.mut.Unlock()
|
||||
newCfg := w.cfg.Copy()
|
||||
newCfg.GUI = gui
|
||||
return w.replaceLocked(newCfg)
|
||||
return w.replaceLocked(newCfg, nil)
|
||||
}
|
||||
|
||||
// IgnoredDevice returns whether or not connection attempts from the given
|
||||
@@ -404,9 +425,6 @@ func (w *Wrapper) ListenAddresses() []string {
|
||||
switch addr {
|
||||
case "default":
|
||||
addresses = append(addresses, DefaultListenAddresses...)
|
||||
if w.cfg.Options.DefaultKCPEnabled { // temporary feature flag
|
||||
addresses = append(addresses, DefaultKCPListenAddress)
|
||||
}
|
||||
default:
|
||||
addresses = append(addresses, addr)
|
||||
}
|
||||
@@ -452,3 +470,9 @@ func (w *Wrapper) MyName() string {
|
||||
cfg, _ := w.Device(myID)
|
||||
return cfg.Name
|
||||
}
|
||||
|
||||
// CheckHomeFreeSpace returns nil if the home disk has the required amount of
|
||||
// free space, or if home disk free space checking is disabled.
|
||||
func (w *Wrapper) CheckHomeFreeSpace() error {
|
||||
return checkFreeSpace(w.Options().MinHomeDiskFree, fs.NewFilesystem(fs.FilesystemTypeBasic, filepath.Dir(w.ConfigPath())))
|
||||
}
|
||||
|
||||
@@ -12,14 +12,16 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/AudriusButkevicius/kcp-go"
|
||||
"github.com/AudriusButkevicius/pfilter"
|
||||
"github.com/ccding/go-stun/stun"
|
||||
"github.com/xtaci/smux"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/nat"
|
||||
"github.com/xtaci/smux"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -38,6 +40,7 @@ type kcpListener struct {
|
||||
stop chan struct{}
|
||||
conns chan internalConn
|
||||
factory listenerFactory
|
||||
nat atomic.Value
|
||||
|
||||
address *url.URL
|
||||
err error
|
||||
@@ -183,12 +186,21 @@ func (t *kcpListener) Factory() listenerFactory {
|
||||
return t.factory
|
||||
}
|
||||
|
||||
func (t *kcpListener) NATType() string {
|
||||
v := t.nat.Load().(stun.NATType)
|
||||
if v == stun.NATUnknown || v == stun.NATError {
|
||||
return "unknown"
|
||||
}
|
||||
return v.String()
|
||||
}
|
||||
|
||||
func (t *kcpListener) stunRenewal(listener net.PacketConn) {
|
||||
client := stun.NewClientWithConnection(listener)
|
||||
client.SetSoftwareName("syncthing")
|
||||
|
||||
var natType stun.NATType
|
||||
var extAddr *stun.Host
|
||||
var udpAddr *net.UDPAddr
|
||||
var err error
|
||||
|
||||
oldType := stun.NATUnknown
|
||||
@@ -199,6 +211,7 @@ func (t *kcpListener) stunRenewal(listener net.PacketConn) {
|
||||
if t.cfg.Options().StunKeepaliveS < 1 {
|
||||
time.Sleep(time.Second)
|
||||
oldType = stun.NATUnknown
|
||||
t.nat.Store(stun.NATUnknown)
|
||||
t.mut.Lock()
|
||||
t.address = nil
|
||||
t.mut.Unlock()
|
||||
@@ -206,7 +219,17 @@ func (t *kcpListener) stunRenewal(listener net.PacketConn) {
|
||||
}
|
||||
|
||||
for _, addr := range t.cfg.StunServers() {
|
||||
client.SetServerAddr(addr)
|
||||
// Resolve the address, so that in case the server advertises two
|
||||
// IPs, we always hit the same one, as otherwise, the mapping might
|
||||
// expire as we hit the other address, and cause us to flip flop
|
||||
// between servers/external addresses, as a result flooding discovery
|
||||
// servers.
|
||||
udpAddr, err = net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
l.Debugf("%s stun addr resolution on %s: %s", t.uri, addr, err)
|
||||
continue
|
||||
}
|
||||
client.SetServerAddr(udpAddr.String())
|
||||
|
||||
natType, extAddr, err = client.Discover()
|
||||
if err != nil || extAddr == nil {
|
||||
@@ -222,6 +245,7 @@ func (t *kcpListener) stunRenewal(listener net.PacketConn) {
|
||||
|
||||
if oldType != natType {
|
||||
l.Infof("%s detected NAT type: %s", t.uri, natType)
|
||||
t.nat.Store(natType)
|
||||
}
|
||||
|
||||
for {
|
||||
@@ -273,7 +297,7 @@ func (t *kcpListener) stunRenewal(listener net.PacketConn) {
|
||||
type kcpListenerFactory struct{}
|
||||
|
||||
func (f *kcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
return &kcpListener{
|
||||
l := &kcpListener{
|
||||
uri: fixupPort(uri, config.DefaultKCPPort),
|
||||
cfg: cfg,
|
||||
tlsCfg: tlsCfg,
|
||||
@@ -281,6 +305,8 @@ func (f *kcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.
|
||||
stop: make(chan struct{}),
|
||||
factory: f,
|
||||
}
|
||||
l.nat.Store(stun.NATUnknown)
|
||||
return l
|
||||
}
|
||||
|
||||
func (kcpListenerFactory) Enabled(cfg config.Configuration) bool {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/AudriusButkevicius/kcp-go"
|
||||
"github.com/AudriusButkevicius/pfilter"
|
||||
"github.com/xtaci/smux"
|
||||
)
|
||||
@@ -24,6 +25,10 @@ var (
|
||||
filters filterList
|
||||
)
|
||||
|
||||
func init() {
|
||||
kcp.BlacklistDuration = 10 * time.Minute
|
||||
}
|
||||
|
||||
type filterList []*pfilter.PacketFilter
|
||||
|
||||
// Sort connections by whether they are unspecified or not, as connections
|
||||
|
||||
@@ -171,6 +171,10 @@ func (t *relayListener) String() string {
|
||||
return t.uri.String()
|
||||
}
|
||||
|
||||
func (t *relayListener) NATType() string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type relayListenerFactory struct{}
|
||||
|
||||
func (f *relayListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
|
||||
@@ -135,6 +135,12 @@ func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *
|
||||
}
|
||||
cfg.Subscribe(service)
|
||||
|
||||
raw := cfg.RawCopy()
|
||||
// Actually starts the listeners and NAT service
|
||||
// Need to start this before service.connect so that any dials that
|
||||
// try punch through already have a listener to cling on.
|
||||
service.CommitConfiguration(raw, raw)
|
||||
|
||||
// There are several moving parts here; one routine per listening address
|
||||
// (handled in configuration changing) to handle incoming connections,
|
||||
// one routine to periodically attempt outgoing connections, one routine to
|
||||
@@ -145,10 +151,6 @@ func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *
|
||||
service.Add(serviceFunc(service.handle))
|
||||
service.Add(service.listenerSupervisor)
|
||||
|
||||
raw := cfg.RawCopy()
|
||||
// Actually starts the listeners and NAT service
|
||||
service.CommitConfiguration(raw, raw)
|
||||
|
||||
return service
|
||||
}
|
||||
|
||||
@@ -574,6 +576,18 @@ func (s *Service) Status() map[string]interface{} {
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Service) NATType() string {
|
||||
s.listenersMut.RLock()
|
||||
defer s.listenersMut.RUnlock()
|
||||
for _, listener := range s.listeners {
|
||||
natType := listener.NATType()
|
||||
if natType != "unknown" {
|
||||
return natType
|
||||
}
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (s *Service) getDialerFactory(cfg config.Configuration, uri *url.URL) (dialerFactory, error) {
|
||||
dialerFactory, ok := dialers[uri.Scheme]
|
||||
if !ok {
|
||||
|
||||
@@ -25,6 +25,7 @@ type Connection interface {
|
||||
protocol.Connection
|
||||
io.Closer
|
||||
Type() string
|
||||
Transport() string
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
||||
@@ -74,10 +75,27 @@ func (t connType) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
func (t connType) Transport() string {
|
||||
switch t {
|
||||
case connTypeRelayClient, connTypeRelayServer:
|
||||
return "relay"
|
||||
case connTypeTCPClient, connTypeTCPServer:
|
||||
return "tcp"
|
||||
case connTypeKCPClient, connTypeKCPServer:
|
||||
return "kcp"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (c internalConn) Type() string {
|
||||
return c.connType.String()
|
||||
}
|
||||
|
||||
func (c internalConn) Transport() string {
|
||||
return c.connType.Transport()
|
||||
}
|
||||
|
||||
func (c internalConn) String() string {
|
||||
return fmt.Sprintf("%s-%s/%s", c.LocalAddr(), c.RemoteAddr(), c.connType.String())
|
||||
}
|
||||
@@ -116,6 +134,7 @@ type genericListener interface {
|
||||
OnAddressesChanged(func(genericListener))
|
||||
String() string
|
||||
Factory() listenerFactory
|
||||
NATType() string
|
||||
}
|
||||
|
||||
type Model interface {
|
||||
|
||||
@@ -176,6 +176,10 @@ func (t *tcpListener) Factory() listenerFactory {
|
||||
return t.factory
|
||||
}
|
||||
|
||||
func (t *tcpListener) NATType() string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type tcpListenerFactory struct{}
|
||||
|
||||
func (f *tcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
|
||||
@@ -317,17 +317,6 @@ func (f fsFile) Stat() (FileInfo, error) {
|
||||
return fsFileInfo{info}, nil
|
||||
}
|
||||
|
||||
func (f fsFile) Sync() error {
|
||||
err := f.File.Sync()
|
||||
// On Windows, fsyncing a directory returns a "handle is invalid"
|
||||
// So we swallow that and let things go through in order not to have to add
|
||||
// a separate way of syncing directories versus files.
|
||||
if err != nil && (runtime.GOOS != "windows" || !strings.Contains(err.Error(), "handle is invalid")) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fsFileInfo implements the fs.FileInfo interface on top of an os.FileInfo.
|
||||
type fsFileInfo struct {
|
||||
os.FileInfo
|
||||
|
||||
@@ -7,12 +7,14 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -484,3 +486,23 @@ func TestRooted(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchErrorLinuxInterpretation(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("testing of linux specific error codes")
|
||||
}
|
||||
|
||||
var errTooManyFiles syscall.Errno = 24
|
||||
var errNoSpace syscall.Errno = 28
|
||||
|
||||
if !reachedMaxUserWatches(errTooManyFiles) {
|
||||
t.Errorf("Errno %v should be recognised to be about inotify limits.", errTooManyFiles)
|
||||
}
|
||||
if !reachedMaxUserWatches(errNoSpace) {
|
||||
t.Errorf("Errno %v should be recognised to be about inotify limits.", errNoSpace)
|
||||
}
|
||||
err := errors.New("Another error")
|
||||
if reachedMaxUserWatches(err) {
|
||||
t.Errorf("This error does not concern inotify limits: %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
116
lib/fs/basicfs_watch.go
Normal file
116
lib/fs/basicfs_watch.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !solaris,!darwin solaris,cgo darwin,cgo
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/zillode/notify"
|
||||
)
|
||||
|
||||
// Notify does not block on sending to channel, so the channel must be buffered.
|
||||
// The actual number is magic.
|
||||
// Not meant to be changed, but must be changeable for tests
|
||||
var backendBuffer = 500
|
||||
|
||||
func (f *BasicFilesystem) Watch(name string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, error) {
|
||||
absName, err := f.rooted(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
absShouldIgnore := func(absPath string) bool {
|
||||
return ignore.ShouldIgnore(f.unrootedChecked(absPath))
|
||||
}
|
||||
|
||||
outChan := make(chan Event)
|
||||
backendChan := make(chan notify.EventInfo, backendBuffer)
|
||||
|
||||
eventMask := subEventMask
|
||||
if !ignorePerms {
|
||||
eventMask |= permEventMask
|
||||
}
|
||||
|
||||
if err := notify.WatchWithFilter(filepath.Join(absName, "..."), backendChan, absShouldIgnore, eventMask); err != nil {
|
||||
notify.Stop(backendChan)
|
||||
if reachedMaxUserWatches(err) {
|
||||
err = errors.New("failed to install inotify handler. Please increase inotify limits, see https://github.com/syncthing/syncthing-inotify#troubleshooting-for-folders-with-many-files-on-linux for more information")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go f.watchLoop(name, absName, backendChan, outChan, ignore, ctx)
|
||||
|
||||
return outChan, nil
|
||||
}
|
||||
|
||||
func (f *BasicFilesystem) watchLoop(name string, absName string, backendChan chan notify.EventInfo, outChan chan<- Event, ignore Matcher, ctx context.Context) {
|
||||
for {
|
||||
// Detect channel overflow
|
||||
if len(backendChan) == backendBuffer {
|
||||
outer:
|
||||
for {
|
||||
select {
|
||||
case <-backendChan:
|
||||
default:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
// When next scheduling a scan, do it on the entire folder as events have been lost.
|
||||
outChan <- Event{Name: name, Type: NonRemove}
|
||||
l.Debugln(f.Type(), f.URI(), "Watch: Event overflow, send \".\"")
|
||||
}
|
||||
|
||||
select {
|
||||
case ev := <-backendChan:
|
||||
relPath := f.unrootedChecked(ev.Path())
|
||||
if ignore.ShouldIgnore(relPath) {
|
||||
l.Debugln(f.Type(), f.URI(), "Watch: Ignoring", relPath)
|
||||
continue
|
||||
}
|
||||
evType := f.eventType(ev.Event())
|
||||
select {
|
||||
case outChan <- Event{Name: relPath, Type: evType}:
|
||||
l.Debugln(f.Type(), f.URI(), "Watch: Sending", relPath, evType)
|
||||
case <-ctx.Done():
|
||||
notify.Stop(backendChan)
|
||||
l.Debugln(f.Type(), f.URI(), "Watch: Stopped")
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
notify.Stop(backendChan)
|
||||
l.Debugln(f.Type(), f.URI(), "Watch: Stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *BasicFilesystem) eventType(notifyType notify.Event) EventType {
|
||||
if notifyType&rmEventMask != 0 {
|
||||
return Remove
|
||||
}
|
||||
return NonRemove
|
||||
}
|
||||
|
||||
// unrootedChecked returns the path relative to the folder root (same as
|
||||
// unrooted). It panics if the given path is not a subpath and handles the
|
||||
// special case when the given path is the folder root without a trailing
|
||||
// pathseparator.
|
||||
func (f *BasicFilesystem) unrootedChecked(absPath string) string {
|
||||
if absPath+string(PathSeparator) == f.root {
|
||||
return "."
|
||||
}
|
||||
relPath := f.unrooted(absPath)
|
||||
if relPath == absPath {
|
||||
panic("bug: Notify backend is processing a change outside of the watched path: " + absPath)
|
||||
}
|
||||
return relPath
|
||||
}
|
||||
18
lib/fs/basicfs_watch_errors_linux.go
Normal file
18
lib/fs/basicfs_watch_errors_linux.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import "syscall"
|
||||
|
||||
func reachedMaxUserWatches(err error) bool {
|
||||
if errno, ok := err.(syscall.Errno); ok {
|
||||
return errno == 24 || errno == 28
|
||||
}
|
||||
return false
|
||||
}
|
||||
13
lib/fs/basicfs_watch_errors_others.go
Normal file
13
lib/fs/basicfs_watch_errors_others.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package fs
|
||||
|
||||
func reachedMaxUserWatches(err error) bool {
|
||||
return false
|
||||
}
|
||||
17
lib/fs/basicfs_watch_eventtypes_fen.go
Normal file
17
lib/fs/basicfs_watch_eventtypes_fen.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build solaris,cgo
|
||||
|
||||
package fs
|
||||
|
||||
import "github.com/zillode/notify"
|
||||
|
||||
const (
|
||||
subEventMask = notify.Create | notify.FileModified | notify.FileRenameFrom | notify.FileDelete | notify.FileRenameTo
|
||||
permEventMask = notify.FileAttrib
|
||||
rmEventMask = notify.FileDelete | notify.FileRenameFrom
|
||||
)
|
||||
17
lib/fs/basicfs_watch_eventtypes_inotify.go
Normal file
17
lib/fs/basicfs_watch_eventtypes_inotify.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fs
|
||||
|
||||
import "github.com/zillode/notify"
|
||||
|
||||
const (
|
||||
subEventMask = notify.InCreate | notify.InMovedTo | notify.InDelete | notify.InDeleteSelf | notify.InModify | notify.InMovedFrom | notify.InMoveSelf
|
||||
permEventMask = notify.InAttrib
|
||||
rmEventMask = notify.InDelete | notify.InDeleteSelf | notify.InMovedFrom | notify.InMoveSelf
|
||||
)
|
||||
17
lib/fs/basicfs_watch_eventtypes_kqueue.go
Normal file
17
lib/fs/basicfs_watch_eventtypes_kqueue.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build dragonfly freebsd netbsd openbsd
|
||||
|
||||
package fs
|
||||
|
||||
import "github.com/zillode/notify"
|
||||
|
||||
const (
|
||||
subEventMask = notify.NoteDelete | notify.NoteWrite | notify.NoteRename
|
||||
permEventMask = notify.NoteAttrib
|
||||
rmEventMask = notify.NoteDelete | notify.NoteRename
|
||||
)
|
||||
21
lib/fs/basicfs_watch_eventtypes_other.go
Normal file
21
lib/fs/basicfs_watch_eventtypes_other.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !linux,!windows,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris
|
||||
// +build !darwin darwin,cgo
|
||||
|
||||
// Catch all platforms that are not specifically handled to use the generic
|
||||
// event types.
|
||||
|
||||
package fs
|
||||
|
||||
import "github.com/zillode/notify"
|
||||
|
||||
const (
|
||||
subEventMask = notify.All
|
||||
permEventMask = 0
|
||||
rmEventMask = notify.Remove | notify.Rename
|
||||
)
|
||||
17
lib/fs/basicfs_watch_eventtypes_readdcw.go
Normal file
17
lib/fs/basicfs_watch_eventtypes_readdcw.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import "github.com/zillode/notify"
|
||||
|
||||
const (
|
||||
subEventMask = notify.FileNotifyChangeFileName | notify.FileNotifyChangeDirName | notify.FileNotifyChangeSize | notify.FileNotifyChangeCreation
|
||||
permEventMask = notify.FileNotifyChangeAttributes
|
||||
rmEventMask = notify.FileActionRemoved | notify.FileActionRenamedOldName
|
||||
)
|
||||
295
lib/fs/basicfs_watch_test.go
Normal file
295
lib/fs/basicfs_watch_test.go
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !solaris,!darwin solaris,cgo darwin,cgo
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zillode/notify"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := os.RemoveAll(testDir); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dir, err := filepath.Abs(".")
|
||||
if err != nil {
|
||||
panic("Cannot get absolute path to working dir")
|
||||
}
|
||||
dir, err = filepath.EvalSymlinks(dir)
|
||||
if err != nil {
|
||||
panic("Cannot get real path to working dir")
|
||||
}
|
||||
testDirAbs = filepath.Join(dir, testDir)
|
||||
testFs = newBasicFilesystem(testDirAbs)
|
||||
if l.ShouldDebug("filesystem") {
|
||||
testFs = &logFilesystem{testFs}
|
||||
}
|
||||
|
||||
backendBuffer = 10
|
||||
defer func() {
|
||||
backendBuffer = 500
|
||||
}()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
const (
|
||||
testDir = "temporary_test_root"
|
||||
)
|
||||
|
||||
var (
|
||||
testDirAbs string
|
||||
testFs Filesystem
|
||||
)
|
||||
|
||||
func TestWatchIgnore(t *testing.T) {
|
||||
name := "ignore"
|
||||
|
||||
file := "file"
|
||||
ignored := "ignored"
|
||||
|
||||
testCase := func() {
|
||||
createTestFile(name, file)
|
||||
createTestFile(name, ignored)
|
||||
}
|
||||
|
||||
expectedEvents := []Event{
|
||||
{file, NonRemove},
|
||||
}
|
||||
|
||||
testScenario(t, name, testCase, expectedEvents, false, ignored)
|
||||
}
|
||||
|
||||
func TestWatchRename(t *testing.T) {
|
||||
name := "rename"
|
||||
|
||||
old := createTestFile(name, "oldfile")
|
||||
new := "newfile"
|
||||
|
||||
testCase := func() {
|
||||
renameTestFile(name, old, new)
|
||||
}
|
||||
|
||||
destEvent := Event{new, Remove}
|
||||
// Only on these platforms the removed file can be differentiated from
|
||||
// the created file during renaming
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "linux" || runtime.GOOS == "solaris" {
|
||||
destEvent = Event{new, NonRemove}
|
||||
}
|
||||
expectedEvents := []Event{
|
||||
{old, Remove},
|
||||
destEvent,
|
||||
}
|
||||
|
||||
testScenario(t, name, testCase, expectedEvents, false, "")
|
||||
}
|
||||
|
||||
// TestWatchOutside checks that no changes from outside the folder make it in
|
||||
func TestWatchOutside(t *testing.T) {
|
||||
outChan := make(chan Event)
|
||||
backendChan := make(chan notify.EventInfo, backendBuffer)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// testFs is Filesystem, but we need BasicFilesystem here
|
||||
fs := newBasicFilesystem(testDirAbs)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if recover() == nil {
|
||||
t.Fatalf("Watch did not panic on receiving event outside of folder")
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
fs.watchLoop(".", testDirAbs, backendChan, outChan, fakeMatcher{}, ctx)
|
||||
}()
|
||||
|
||||
backendChan <- fakeEventInfo(filepath.Join(filepath.Dir(testDirAbs), "outside"))
|
||||
}
|
||||
|
||||
func TestWatchSubpath(t *testing.T) {
|
||||
outChan := make(chan Event)
|
||||
backendChan := make(chan notify.EventInfo, backendBuffer)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// testFs is Filesystem, but we need BasicFilesystem here
|
||||
fs := newBasicFilesystem(testDirAbs)
|
||||
|
||||
abs, _ := fs.rooted("sub")
|
||||
go fs.watchLoop("sub", abs, backendChan, outChan, fakeMatcher{}, ctx)
|
||||
|
||||
backendChan <- fakeEventInfo(filepath.Join(abs, "file"))
|
||||
|
||||
timeout := time.NewTimer(2 * time.Second)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
t.Errorf("Timed out before receiving an event")
|
||||
cancel()
|
||||
case ev := <-outChan:
|
||||
if ev.Name != filepath.Join("sub", "file") {
|
||||
t.Errorf("While watching a subfolder, received an event with unexpected path %v", ev.Name)
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestWatchOverflow checks that an event at the root is sent when maxFiles is reached
|
||||
func TestWatchOverflow(t *testing.T) {
|
||||
name := "overflow"
|
||||
|
||||
testCase := func() {
|
||||
for i := 0; i < 5*backendBuffer; i++ {
|
||||
createTestFile(name, "file"+strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
|
||||
expectedEvents := []Event{
|
||||
{".", NonRemove},
|
||||
}
|
||||
|
||||
testScenario(t, name, testCase, expectedEvents, true, "")
|
||||
}
|
||||
|
||||
// path relative to folder root, also creates parent dirs if necessary
|
||||
func createTestFile(name string, file string) string {
|
||||
joined := filepath.Join(name, file)
|
||||
if err := testFs.MkdirAll(filepath.Dir(joined), 0755); err != nil {
|
||||
panic(fmt.Sprintf("Failed to create parent directory for %s: %s", joined, err))
|
||||
}
|
||||
handle, err := testFs.Create(joined)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create test file %s: %s", joined, err))
|
||||
}
|
||||
handle.Close()
|
||||
return file
|
||||
}
|
||||
|
||||
func renameTestFile(name string, old string, new string) {
|
||||
old = filepath.Join(name, old)
|
||||
new = filepath.Join(name, new)
|
||||
if err := testFs.Rename(old, new); err != nil {
|
||||
panic(fmt.Sprintf("Failed to rename %s to %s: %s", old, new, err))
|
||||
}
|
||||
}
|
||||
|
||||
func sleepMs(ms int) {
|
||||
time.Sleep(time.Duration(ms) * time.Millisecond)
|
||||
}
|
||||
|
||||
func testScenario(t *testing.T, name string, testCase func(), expectedEvents []Event, allowOthers bool, ignored string) {
|
||||
if err := testFs.MkdirAll(name, 0755); err != nil {
|
||||
panic(fmt.Sprintf("Failed to create directory %s: %s", name, err))
|
||||
}
|
||||
|
||||
// Tests pick up the previously created files/dirs, probably because
|
||||
// they get flushed to disk with a delay.
|
||||
initDelayMs := 500
|
||||
if runtime.GOOS == "darwin" {
|
||||
initDelayMs = 2000
|
||||
}
|
||||
sleepMs(initDelayMs)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
if ignored != "" {
|
||||
ignored = filepath.Join(name, ignored)
|
||||
}
|
||||
|
||||
eventChan, err := testFs.Watch(name, fakeMatcher{ignored}, ctx, false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go testWatchOutput(t, name, eventChan, expectedEvents, allowOthers, ctx, cancel)
|
||||
|
||||
timeout := time.NewTimer(2 * time.Second)
|
||||
|
||||
testCase()
|
||||
|
||||
select {
|
||||
case <-timeout.C:
|
||||
t.Errorf("Timed out before receiving all expected events")
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
if err := testFs.RemoveAll(name); err != nil {
|
||||
panic(fmt.Sprintf("Failed to remove directory %s: %s", name, err))
|
||||
}
|
||||
}
|
||||
|
||||
func testWatchOutput(t *testing.T, name string, in <-chan Event, expectedEvents []Event, allowOthers bool, ctx context.Context, cancel context.CancelFunc) {
|
||||
var expected = make(map[Event]struct{})
|
||||
for _, ev := range expectedEvents {
|
||||
ev.Name = filepath.Join(name, ev.Name)
|
||||
expected[ev] = struct{}{}
|
||||
}
|
||||
|
||||
var received Event
|
||||
var last Event
|
||||
for {
|
||||
if len(expected) == 0 {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case received = <-in:
|
||||
}
|
||||
|
||||
// apparently the backend sometimes sends repeat events
|
||||
if last == received {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := expected[received]; !ok {
|
||||
if allowOthers {
|
||||
sleepMs(100) // To facilitate overflow
|
||||
continue
|
||||
}
|
||||
t.Errorf("Received unexpected event %v expected one of %v", received, expected)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
delete(expected, received)
|
||||
last = received
|
||||
}
|
||||
}
|
||||
|
||||
type fakeMatcher struct{ match string }
|
||||
|
||||
func (fm fakeMatcher) ShouldIgnore(name string) bool {
|
||||
return name == fm.match
|
||||
}
|
||||
|
||||
type fakeEventInfo string
|
||||
|
||||
func (e fakeEventInfo) Path() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e fakeEventInfo) Event() notify.Event {
|
||||
return notify.Write
|
||||
}
|
||||
|
||||
func (e fakeEventInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
15
lib/fs/basicfs_watch_unsupported.go
Normal file
15
lib/fs/basicfs_watch_unsupported.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build solaris,!cgo darwin,!cgo
|
||||
|
||||
package fs
|
||||
|
||||
import "context"
|
||||
|
||||
func (f *BasicFilesystem) Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, error) {
|
||||
return nil, ErrWatchNotSupported
|
||||
}
|
||||
@@ -6,7 +6,10 @@
|
||||
|
||||
package fs
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type errorFilesystem struct {
|
||||
err error
|
||||
@@ -39,3 +42,6 @@ func (fs *errorFilesystem) Roots() ([]string, error)
|
||||
func (fs *errorFilesystem) Usage(name string) (Usage, error) { return Usage{}, fs.err }
|
||||
func (fs *errorFilesystem) Type() FilesystemType { return fs.fsType }
|
||||
func (fs *errorFilesystem) URI() string { return fs.uri }
|
||||
func (fs *errorFilesystem) Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, error) {
|
||||
return nil, fs.err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
@@ -33,7 +34,8 @@ type Filesystem interface {
|
||||
Rename(oldname, newname string) error
|
||||
Stat(name string) (FileInfo, error)
|
||||
SymlinksSupported() bool
|
||||
Walk(root string, walkFn WalkFunc) error
|
||||
Walk(name string, walkFn WalkFunc) error
|
||||
Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, error)
|
||||
Hide(name string) error
|
||||
Unhide(name string) error
|
||||
Glob(pattern string) ([]string, error)
|
||||
@@ -82,6 +84,42 @@ type Usage struct {
|
||||
Total int64
|
||||
}
|
||||
|
||||
type Matcher interface {
|
||||
ShouldIgnore(name string) bool
|
||||
}
|
||||
|
||||
type MatchResult interface {
|
||||
IsIgnored() bool
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Name string
|
||||
Type EventType
|
||||
}
|
||||
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
NonRemove EventType = 1 + iota
|
||||
Remove
|
||||
Mixed // Should probably not be necessary to be used in filesystem interface implementation
|
||||
)
|
||||
|
||||
func (evType EventType) String() string {
|
||||
switch {
|
||||
case evType == NonRemove:
|
||||
return "non-remove"
|
||||
case evType == Remove:
|
||||
return "remove"
|
||||
case evType == Mixed:
|
||||
return "mixed"
|
||||
default:
|
||||
panic("bug: Unknown event type")
|
||||
}
|
||||
}
|
||||
|
||||
var ErrWatchNotSupported = errors.New("watching is not supported")
|
||||
|
||||
// Equivalents from os package.
|
||||
|
||||
const ModePerm = FileMode(os.ModePerm)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -127,6 +128,12 @@ func (fs *logFilesystem) Walk(root string, walkFn WalkFunc) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *logFilesystem) Watch(path string, ignore Matcher, ctx context.Context, ignorePerms bool) (<-chan Event, error) {
|
||||
evChan, err := fs.Filesystem.Watch(path, ignore, ctx, ignorePerms)
|
||||
l.Debugln(getCaller(), fs.Type(), fs.URI(), "Watch", path, ignore, ignorePerms, err)
|
||||
return evChan, err
|
||||
}
|
||||
|
||||
func (fs *logFilesystem) Unhide(name string) error {
|
||||
err := fs.Filesystem.Unhide(name)
|
||||
l.Debugln(getCaller(), fs.Type(), fs.URI(), "Unhide", name, err)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/watchaggregator"
|
||||
)
|
||||
|
||||
type folder struct {
|
||||
@@ -22,6 +23,9 @@ type folder struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
initialScanFinished chan struct{}
|
||||
watchCancel context.CancelFunc
|
||||
watchChan chan []string
|
||||
restartWatchChan chan struct{}
|
||||
}
|
||||
|
||||
func newFolder(model *Model, cfg config.FolderConfiguration) folder {
|
||||
@@ -36,13 +40,27 @@ func newFolder(model *Model, cfg config.FolderConfiguration) folder {
|
||||
cancel: cancel,
|
||||
model: model,
|
||||
initialScanFinished: make(chan struct{}),
|
||||
watchCancel: func() {},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *folder) BringToFront(string) {}
|
||||
|
||||
func (f *folder) DelayScan(next time.Duration) {
|
||||
f.scan.Delay(next)
|
||||
}
|
||||
|
||||
func (f *folder) IndexUpdated() {
|
||||
}
|
||||
func (f *folder) DelayScan(next time.Duration) {
|
||||
f.scan.Delay(next)
|
||||
|
||||
func (f *folder) IgnoresUpdated() {
|
||||
if f.FSWatcherEnabled {
|
||||
f.scheduleWatchRestart()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *folder) Jobs() ([]string, []string) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *folder) Scan(subdirs []string) error {
|
||||
@@ -54,17 +72,42 @@ func (f *folder) Stop() {
|
||||
f.cancel()
|
||||
}
|
||||
|
||||
func (f *folder) Jobs() ([]string, []string) {
|
||||
return nil, nil
|
||||
func (f *folder) BlockStats() map[string]int {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *folder) BringToFront(string) {}
|
||||
// CheckHealth checks the folder for common errors, updates the folder state
|
||||
// and returns the current folder error, or nil if the folder is healthy.
|
||||
func (f *folder) CheckHealth() error {
|
||||
err := f.getHealthError()
|
||||
f.setError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *folder) getHealthError() error {
|
||||
// Check for folder errors, with the most serious and specific first and
|
||||
// generic ones like out of space on the home disk later.
|
||||
|
||||
if err := f.CheckPath(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.CheckFreeSpace(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.model.cfg.CheckHomeFreeSpace(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *folder) scanSubdirs(subDirs []string) error {
|
||||
if err := f.model.internalScanFolderSubdirs(f.ctx, f.folderID, subDirs); err != nil {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// the same one as returned by CheckHealth, though
|
||||
// duplicate set is handled by setError.
|
||||
f.setError(err)
|
||||
return err
|
||||
@@ -88,3 +131,62 @@ func (f *folder) scanTimerFired() {
|
||||
|
||||
f.scan.Reschedule()
|
||||
}
|
||||
|
||||
func (f *folder) startWatch() {
|
||||
ctx, cancel := context.WithCancel(f.ctx)
|
||||
f.model.fmut.RLock()
|
||||
ignores := f.model.folderIgnores[f.folderID]
|
||||
f.model.fmut.RUnlock()
|
||||
eventChan, err := f.Filesystem().Watch(".", ignores, ctx, f.IgnorePerms)
|
||||
if err != nil {
|
||||
l.Warnf("Failed to start filesystem watcher for folder %s: %v", f.Description(), err)
|
||||
} else {
|
||||
f.watchChan = make(chan []string)
|
||||
f.watchCancel = cancel
|
||||
watchaggregator.Aggregate(eventChan, f.watchChan, f.FolderConfiguration, f.model.cfg, ctx)
|
||||
l.Infoln("Started filesystem watcher for folder", f.Description())
|
||||
}
|
||||
}
|
||||
|
||||
func (f *folder) restartWatch() {
|
||||
f.watchCancel()
|
||||
f.startWatch()
|
||||
f.Scan(nil)
|
||||
}
|
||||
|
||||
func (f *folder) scheduleWatchRestart() {
|
||||
select {
|
||||
case f.restartWatchChan <- struct{}{}:
|
||||
default:
|
||||
// We might be busy doing a pull and thus not reading from this
|
||||
// channel. The channel is 1-buffered, so one notification will be
|
||||
// queued to ensure we recheck after the pull.
|
||||
}
|
||||
}
|
||||
|
||||
func (f *folder) setError(err error) {
|
||||
_, _, oldErr := f.getState()
|
||||
if (err != nil && oldErr != nil && oldErr.Error() == err.Error()) || (err == nil && oldErr == nil) {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if oldErr == nil {
|
||||
l.Warnf("Error on folder %s: %v", f.Description(), err)
|
||||
} else {
|
||||
l.Infof("Error on folder %s changed: %q -> %q", f.Description(), oldErr, err)
|
||||
}
|
||||
} else {
|
||||
l.Infoln("Cleared error on folder", f.Description())
|
||||
}
|
||||
|
||||
if f.FSWatcherEnabled {
|
||||
if err != nil {
|
||||
f.watchCancel()
|
||||
} else {
|
||||
f.scheduleWatchRestart()
|
||||
}
|
||||
}
|
||||
|
||||
f.stateTracker.setError(err)
|
||||
}
|
||||
|
||||
@@ -94,49 +94,32 @@ func (s *stateTracker) getState() (current folderState, changed time.Time, err e
|
||||
return
|
||||
}
|
||||
|
||||
// setError sets the folder state to FolderError with the specified error.
|
||||
// setError sets the folder state to FolderError with the specified error or
|
||||
// to FolderIdle if the error is nil
|
||||
func (s *stateTracker) setError(err error) {
|
||||
s.mut.Lock()
|
||||
if s.current != FolderError || s.err.Error() != err.Error() {
|
||||
eventData := map[string]interface{}{
|
||||
"folder": s.folderID,
|
||||
"to": FolderError.String(),
|
||||
"from": s.current.String(),
|
||||
"error": err.Error(),
|
||||
}
|
||||
defer s.mut.Unlock()
|
||||
|
||||
if !s.changed.IsZero() {
|
||||
eventData["duration"] = time.Since(s.changed).Seconds()
|
||||
}
|
||||
eventData := map[string]interface{}{
|
||||
"folder": s.folderID,
|
||||
"from": s.current.String(),
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
eventData["error"] = err.Error()
|
||||
s.current = FolderError
|
||||
s.err = err
|
||||
s.changed = time.Now()
|
||||
|
||||
events.Default.Log(events.StateChanged, eventData)
|
||||
}
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
// clearError sets the folder state to FolderIdle and clears the error
|
||||
func (s *stateTracker) clearError() {
|
||||
s.mut.Lock()
|
||||
if s.current == FolderError {
|
||||
eventData := map[string]interface{}{
|
||||
"folder": s.folderID,
|
||||
"to": FolderIdle.String(),
|
||||
"from": s.current.String(),
|
||||
}
|
||||
|
||||
if !s.changed.IsZero() {
|
||||
eventData["duration"] = time.Since(s.changed).Seconds()
|
||||
}
|
||||
|
||||
} else {
|
||||
s.current = FolderIdle
|
||||
s.err = nil
|
||||
s.changed = time.Now()
|
||||
|
||||
events.Default.Log(events.StateChanged, eventData)
|
||||
}
|
||||
s.mut.Unlock()
|
||||
|
||||
eventData["to"] = s.current.String()
|
||||
|
||||
if !s.changed.IsZero() {
|
||||
eventData["duration"] = time.Since(s.changed).Seconds()
|
||||
}
|
||||
|
||||
s.err = err
|
||||
s.changed = time.Now()
|
||||
|
||||
events.Default.Log(events.StateChanged, eventData)
|
||||
}
|
||||
|
||||
@@ -48,14 +48,16 @@ type service interface {
|
||||
BringToFront(string)
|
||||
DelayScan(d time.Duration)
|
||||
IndexUpdated() // Remote index was updated notification
|
||||
IgnoresUpdated() // ignore matcher was updated notification
|
||||
Jobs() ([]string, []string) // In progress, Queued
|
||||
Scan(subs []string) error
|
||||
Serve()
|
||||
Stop()
|
||||
BlockStats() map[string]int
|
||||
CheckHealth() error
|
||||
|
||||
getState() (folderState, time.Time, error)
|
||||
setState(state folderState)
|
||||
clearError()
|
||||
setError(err error)
|
||||
}
|
||||
|
||||
@@ -80,7 +82,6 @@ type Model struct {
|
||||
clientVersion string
|
||||
|
||||
folderCfgs map[string]config.FolderConfiguration // folder -> cfg
|
||||
folderFs map[string]fs.Filesystem // folder -> fs
|
||||
folderFiles map[string]*db.FileSet // folder -> files
|
||||
folderDevices folderDeviceSet // folder -> deviceIDs
|
||||
deviceFolders map[protocol.DeviceID][]string // deviceID -> folders
|
||||
@@ -106,14 +107,12 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
errFolderPathMissing = errors.New("folder path missing")
|
||||
errFolderMarkerMissing = errors.New("folder marker missing")
|
||||
errDeviceUnknown = errors.New("unknown device")
|
||||
errDevicePaused = errors.New("device is paused")
|
||||
errDeviceIgnored = errors.New("device is ignored")
|
||||
errFolderPaused = errors.New("folder is paused")
|
||||
errFolderMissing = errors.New("no such folder")
|
||||
errNetworkNotAllowed = errors.New("network not allowed")
|
||||
errDeviceUnknown = errors.New("unknown device")
|
||||
errDevicePaused = errors.New("device is paused")
|
||||
errDeviceIgnored = errors.New("device is ignored")
|
||||
errFolderPaused = errors.New("folder is paused")
|
||||
errFolderMissing = errors.New("no such folder")
|
||||
errNetworkNotAllowed = errors.New("network not allowed")
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
@@ -137,7 +136,6 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, clientName, clientVersi
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
folderCfgs: make(map[string]config.FolderConfiguration),
|
||||
folderFs: make(map[string]fs.Filesystem),
|
||||
folderFiles: make(map[string]*db.FileSet),
|
||||
folderDevices: make(folderDeviceSet),
|
||||
deviceFolders: make(map[protocol.DeviceID][]string),
|
||||
@@ -253,7 +251,15 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
}
|
||||
}
|
||||
|
||||
p := folderFactory(m, cfg, ver, fs.MtimeFS())
|
||||
ffs := fs.MtimeFS()
|
||||
|
||||
// These are our metadata files, and they should always be hidden.
|
||||
ffs.Hide(".stfolder")
|
||||
ffs.Hide(".stversions")
|
||||
ffs.Hide(".stignore")
|
||||
|
||||
p := folderFactory(m, cfg, ver, ffs)
|
||||
|
||||
m.folderRunners[folder] = p
|
||||
|
||||
m.warnAboutOverwritingProtectedFiles(folder)
|
||||
@@ -329,18 +335,15 @@ func (m *Model) addFolderLocked(cfg config.FolderConfiguration) {
|
||||
m.folderIgnores[cfg.ID] = ignores
|
||||
}
|
||||
|
||||
func (m *Model) RemoveFolder(folder string) {
|
||||
func (m *Model) RemoveFolder(cfg config.FolderConfiguration) {
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
|
||||
// Delete syncthing specific files
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
fs := folderCfg.Filesystem()
|
||||
fs.RemoveAll(".stfolder")
|
||||
cfg.Filesystem().RemoveAll(".stfolder")
|
||||
|
||||
m.tearDownFolderLocked(folder)
|
||||
m.tearDownFolderLocked(cfg.ID)
|
||||
// Remove it from the database
|
||||
db.DropFolder(m.db, folder)
|
||||
db.DropFolder(m.db, cfg.ID)
|
||||
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
@@ -393,6 +396,105 @@ func (m *Model) RestartFolder(cfg config.FolderConfiguration) {
|
||||
m.fmut.Unlock()
|
||||
}
|
||||
|
||||
func (m *Model) UsageReportingStats(version int) map[string]interface{} {
|
||||
stats := make(map[string]interface{})
|
||||
if version >= 3 {
|
||||
// Block stats
|
||||
m.fmut.Lock()
|
||||
blockStats := make(map[string]int)
|
||||
for _, folder := range m.folderRunners {
|
||||
for k, v := range folder.BlockStats() {
|
||||
blockStats[k] += v
|
||||
}
|
||||
}
|
||||
m.fmut.Unlock()
|
||||
stats["blockStats"] = blockStats
|
||||
|
||||
// Transport stats
|
||||
m.pmut.Lock()
|
||||
transportStats := make(map[string]int)
|
||||
for _, conn := range m.conn {
|
||||
transportStats[conn.Transport()]++
|
||||
}
|
||||
m.pmut.Unlock()
|
||||
stats["transportStats"] = transportStats
|
||||
|
||||
// Ignore stats
|
||||
ignoreStats := map[string]int{
|
||||
"lines": 0,
|
||||
"inverts": 0,
|
||||
"folded": 0,
|
||||
"deletable": 0,
|
||||
"rooted": 0,
|
||||
"includes": 0,
|
||||
"escapedIncludes": 0,
|
||||
"doubleStars": 0,
|
||||
"stars": 0,
|
||||
}
|
||||
var seenPrefix [3]bool
|
||||
for folder := range m.cfg.Folders() {
|
||||
lines, _, err := m.GetIgnores(folder)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ignoreStats["lines"] += len(lines)
|
||||
|
||||
for _, line := range lines {
|
||||
// Allow prefixes to be specified in any order, but only once.
|
||||
for {
|
||||
if strings.HasPrefix(line, "!") && !seenPrefix[0] {
|
||||
seenPrefix[0] = true
|
||||
line = line[1:]
|
||||
ignoreStats["inverts"] += 1
|
||||
} else if strings.HasPrefix(line, "(?i)") && !seenPrefix[1] {
|
||||
seenPrefix[1] = true
|
||||
line = line[4:]
|
||||
ignoreStats["folded"] += 1
|
||||
} else if strings.HasPrefix(line, "(?d)") && !seenPrefix[2] {
|
||||
seenPrefix[2] = true
|
||||
line = line[4:]
|
||||
ignoreStats["deletable"] += 1
|
||||
} else {
|
||||
seenPrefix[0] = false
|
||||
seenPrefix[1] = false
|
||||
seenPrefix[2] = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Noops, remove
|
||||
if strings.HasSuffix(line, "**") {
|
||||
line = line[:len(line)-2]
|
||||
}
|
||||
if strings.HasPrefix(line, "**/") {
|
||||
line = line[3:]
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "/") {
|
||||
ignoreStats["rooted"] += 1
|
||||
} else if strings.HasPrefix(line, "#include ") {
|
||||
ignoreStats["includes"] += 1
|
||||
if strings.Contains(line, "..") {
|
||||
ignoreStats["escapedIncludes"] += 1
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(line, "**") {
|
||||
ignoreStats["doubleStars"] += 1
|
||||
// Remove not to trip up star checks.
|
||||
strings.Replace(line, "**", "", -1)
|
||||
}
|
||||
|
||||
if strings.Contains(line, "*") {
|
||||
ignoreStats["stars"] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
stats["ignoreStats"] = ignoreStats
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
protocol.Statistics
|
||||
Connected bool
|
||||
@@ -1257,7 +1359,7 @@ func (m *Model) GetIgnores(folder string) ([]string, []string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.checkFolderPath(cfg); err != nil {
|
||||
if err := cfg.CheckPath(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -1687,7 +1789,7 @@ func (m *Model) ScanFolders() map[string]error {
|
||||
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// the same one as returned by CheckHealth, though
|
||||
// duplicate set is handled by setError.
|
||||
m.fmut.RLock()
|
||||
srv := m.folderRunners[folder]
|
||||
@@ -1756,7 +1858,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
defer func() {
|
||||
if ignores.Hash() != oldHash {
|
||||
l.Debugln("Folder", folder, "ignore patterns changed; triggering puller")
|
||||
runner.IndexUpdated()
|
||||
runner.IgnoresUpdated()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -1767,16 +1869,13 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
return errFolderMissing
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
runner.setError(err)
|
||||
l.Infof("Stopping folder %s due to error: %s", folderCfg.Description(), err)
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ignores.Load(".stignore"); err != nil && !fs.IsNotExist(err) {
|
||||
err = fmt.Errorf("loading ignores: %v", err)
|
||||
runner.setError(err)
|
||||
l.Infof("Stopping folder %s due to error: %s", folderCfg.Description(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1810,7 +1909,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
// The error we get here is likely an OS level error, which might not be
|
||||
// as readable as our health check errors. Check if we can get a health
|
||||
// check error first, and use that if it's available.
|
||||
if ferr := m.CheckFolderHealth(folder); ferr != nil {
|
||||
if ferr := runner.CheckHealth(); ferr != nil {
|
||||
err = ferr
|
||||
}
|
||||
runner.setError(err)
|
||||
@@ -1822,8 +1921,8 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
|
||||
for f := range fchan {
|
||||
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
l.Infof("Stopping folder %s mid-scan due to folder error: %s", folderCfg.Description(), err)
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
|
||||
return err
|
||||
}
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
@@ -1834,8 +1933,8 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
batchSizeBytes += f.ProtoSize()
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
l.Infof("Stopping folder %s mid-scan due to folder error: %s", folderCfg.Description(), err)
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
|
||||
return err
|
||||
} else if len(batch) > 0 {
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
@@ -1857,7 +1956,7 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
fset.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
iterError = err
|
||||
return false
|
||||
}
|
||||
@@ -1916,13 +2015,13 @@ func (m *Model) internalScanFolderSubdirs(ctx context.Context, folder string, su
|
||||
})
|
||||
|
||||
if iterError != nil {
|
||||
l.Infof("Stopping folder %s mid-scan due to folder error: %s", folderCfg.Description(), iterError)
|
||||
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
|
||||
return iterError
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
l.Infof("Stopping folder %s mid-scan due to folder error: %s", folderCfg.Description(), err)
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
l.Debugln("Stopping scan of folder %s due to: %s", folderCfg.Description(), err)
|
||||
return err
|
||||
} else if len(batch) > 0 {
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
@@ -2240,112 +2339,6 @@ func (m *Model) BringToFront(folder, file string) {
|
||||
}
|
||||
}
|
||||
|
||||
// CheckFolderHealth checks the folder for common errors and returns the
|
||||
// current folder error, or nil if the folder is healthy.
|
||||
func (m *Model) CheckFolderHealth(id string) error {
|
||||
folder, ok := m.cfg.Folders()[id]
|
||||
if !ok {
|
||||
return errFolderMissing
|
||||
}
|
||||
|
||||
// Check for folder errors, with the most serious and specific first and
|
||||
// generic ones like out of space on the home disk later. Note the
|
||||
// inverted error flow (err==nil checks) here.
|
||||
|
||||
err := m.checkFolderPath(folder)
|
||||
if err == nil {
|
||||
err = m.checkFolderFreeSpace(folder)
|
||||
}
|
||||
if err == nil {
|
||||
err = m.checkHomeDiskFree()
|
||||
}
|
||||
|
||||
// Set or clear the error on the runner, which also does logging and
|
||||
// generates events and stuff.
|
||||
m.runnerExchangeError(folder, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// checkFolderPath returns nil if the folder path exists and has the marker file.
|
||||
func (m *Model) checkFolderPath(folder config.FolderConfiguration) error {
|
||||
fs := folder.Filesystem()
|
||||
|
||||
if fi, err := fs.Stat("."); err != nil || !fi.IsDir() {
|
||||
return errFolderPathMissing
|
||||
}
|
||||
|
||||
if !folder.HasMarker() {
|
||||
return errFolderMarkerMissing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkFolderFreeSpace returns nil if the folder has the required amount of
|
||||
// free space, or if folder free space checking is disabled.
|
||||
func (m *Model) checkFolderFreeSpace(folder config.FolderConfiguration) error {
|
||||
return m.checkFreeSpace(folder.MinDiskFree, folder.Filesystem())
|
||||
}
|
||||
|
||||
// checkHomeDiskFree returns nil if the home disk has the required amount of
|
||||
// free space, or if home disk free space checking is disabled.
|
||||
func (m *Model) checkHomeDiskFree() error {
|
||||
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, filepath.Dir(m.cfg.ConfigPath()))
|
||||
return m.checkFreeSpace(m.cfg.Options().MinHomeDiskFree, fs)
|
||||
}
|
||||
|
||||
func (m *Model) checkFreeSpace(req config.Size, fs fs.Filesystem) error {
|
||||
val := req.BaseValue()
|
||||
if val <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage, err := fs.Usage(".")
|
||||
if req.Percentage() {
|
||||
freePct := (float64(usage.Free) / float64(usage.Total)) * 100
|
||||
if err == nil && freePct < val {
|
||||
return fmt.Errorf("insufficient space in %v %v: %f %% < %v", fs.Type(), fs.URI(), freePct, req)
|
||||
}
|
||||
} else {
|
||||
if err == nil && float64(usage.Free) < val {
|
||||
return fmt.Errorf("insufficient space in %v %v: %v < %v", fs.Type(), fs.URI(), usage.Free, req)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runnerExchangeError sets the given error (which way be nil) on the folder
|
||||
// runner. If the error differs from any previous error, logging and events
|
||||
// happen.
|
||||
func (m *Model) runnerExchangeError(folder config.FolderConfiguration, err error) {
|
||||
m.fmut.RLock()
|
||||
runner, runnerExists := m.folderRunners[folder.ID]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
var oldErr error
|
||||
if runnerExists {
|
||||
_, _, oldErr = runner.getState()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if oldErr != nil && oldErr.Error() != err.Error() {
|
||||
l.Infof("Folder %s error changed: %q -> %q", folder.Description(), oldErr, err)
|
||||
} else if oldErr == nil {
|
||||
l.Warnf("Stopping folder %s - %v", folder.Description(), err)
|
||||
}
|
||||
if runnerExists {
|
||||
runner.setError(err)
|
||||
}
|
||||
} else if oldErr != nil {
|
||||
l.Infof("Folder %q error is cleared, restarting", folder.ID)
|
||||
if runnerExists {
|
||||
runner.clearError()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) ResetFolder(folder string) {
|
||||
l.Infof("Cleaning data for folder %q", folder)
|
||||
db.DropFolder(m.db, folder)
|
||||
@@ -2384,7 +2377,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
toCfg, ok := toFolders[folderID]
|
||||
if !ok {
|
||||
// The folder was removed.
|
||||
m.RemoveFolder(folderID)
|
||||
m.RemoveFolder(fromCfg)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -2437,6 +2430,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// Some options don't require restart as those components handle it fine
|
||||
// by themselves.
|
||||
from.Options.URAccepted = to.Options.URAccepted
|
||||
from.Options.URSeen = to.Options.URSeen
|
||||
from.Options.URUniqueID = to.Options.URUniqueID
|
||||
from.Options.ListenAddresses = to.Options.ListenAddresses
|
||||
from.Options.RelaysEnabled = to.Options.RelaysEnabled
|
||||
|
||||
@@ -308,6 +308,9 @@ func (f *fakeConnection) RemoteAddr() net.Addr {
|
||||
func (f *fakeConnection) Type() string {
|
||||
return "fake"
|
||||
}
|
||||
func (f *fakeConnection) Transport() string {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (f *fakeConnection) DownloadProgress(folder string, updates []protocol.FileDownloadProgressUpdate) {
|
||||
f.downloadProgressMessages = append(f.downloadProgressMessages, downloadProgressMessage{
|
||||
@@ -1902,6 +1905,72 @@ func TestIssue3164(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue4357(t *testing.T) {
|
||||
db := db.OpenMemory()
|
||||
cfg := defaultConfig.RawCopy()
|
||||
// Create a separate wrapper not to polute other tests.
|
||||
wrapper := config.Wrap("/tmp/test", config.Configuration{})
|
||||
m := NewModel(wrapper, protocol.LocalDeviceID, "syncthing", "dev", db, nil)
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
// Force the model to wire itself and add the folders
|
||||
if err := wrapper.ReplaceBlocking(cfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := m.folderCfgs["default"]; !ok {
|
||||
t.Error("Folder should be running")
|
||||
}
|
||||
|
||||
newCfg := wrapper.RawCopy()
|
||||
newCfg.Folders[0].Paused = true
|
||||
|
||||
if err := wrapper.ReplaceBlocking(newCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := m.folderCfgs["default"]; ok {
|
||||
t.Error("Folder should not be running")
|
||||
}
|
||||
|
||||
if _, ok := m.cfg.Folder("default"); !ok {
|
||||
t.Error("should still have folder in config")
|
||||
}
|
||||
|
||||
if err := wrapper.ReplaceBlocking(config.Configuration{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := m.cfg.Folder("default"); ok {
|
||||
t.Error("should not have folder in config")
|
||||
}
|
||||
|
||||
// Add the folder back, should be running
|
||||
if err := wrapper.ReplaceBlocking(cfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := m.folderCfgs["default"]; !ok {
|
||||
t.Error("Folder should be running")
|
||||
}
|
||||
if _, ok := m.cfg.Folder("default"); !ok {
|
||||
t.Error("should still have folder in config")
|
||||
}
|
||||
|
||||
// Should not panic when removing a running folder.
|
||||
if err := wrapper.ReplaceBlocking(config.Configuration{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := m.folderCfgs["default"]; ok {
|
||||
t.Error("Folder should not be running")
|
||||
}
|
||||
if _, ok := m.cfg.Folder("default"); ok {
|
||||
t.Error("should not have folder in config")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanNoDatabaseWrite(t *testing.T) {
|
||||
// When scanning, nothing should be committed to database unless
|
||||
// something actually changed.
|
||||
@@ -1959,7 +2028,7 @@ func TestScanNoDatabaseWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue2782(t *testing.T) {
|
||||
// CheckFolderHealth should accept a symlinked folder, when using tilde-expanded path.
|
||||
// CheckHealth should accept a symlinked folder, when using tilde-expanded path.
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("not reliable on Windows")
|
||||
@@ -2001,7 +2070,10 @@ func TestIssue2782(t *testing.T) {
|
||||
t.Error("scan error:", err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("default"); err != nil {
|
||||
m.fmut.Lock()
|
||||
runner, _ := m.folderRunners["default"]
|
||||
m.fmut.Unlock()
|
||||
if err := runner.CheckHealth(); err != nil {
|
||||
t.Error("health check error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,11 +34,18 @@ func (f *sendOnlyFolder) Serve() {
|
||||
f.scan.timer.Stop()
|
||||
}()
|
||||
|
||||
if f.FSWatcherEnabled && f.CheckHealth() == nil {
|
||||
f.startWatch()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
|
||||
case <-f.restartWatchChan:
|
||||
f.restartWatch()
|
||||
|
||||
case <-f.scan.timer.C:
|
||||
l.Debugln(f, "Scanning subdirectories")
|
||||
f.scanTimerFired()
|
||||
@@ -48,6 +55,10 @@ func (f *sendOnlyFolder) Serve() {
|
||||
|
||||
case next := <-f.scan.delay:
|
||||
f.scan.timer.Reset(next)
|
||||
|
||||
case fsEvents := <-f.watchChan:
|
||||
l.Debugln(f, "filesystem notification rescan")
|
||||
f.scanSubdirs(fsEvents)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +93,9 @@ type sendReceiveFolder struct {
|
||||
|
||||
errors map[string]string // path -> error string
|
||||
errorsMut sync.Mutex
|
||||
|
||||
blockStats map[string]int
|
||||
blockStatsMut sync.Mutex
|
||||
}
|
||||
|
||||
func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, fs fs.Filesystem) service {
|
||||
@@ -107,6 +110,9 @@ func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver vers
|
||||
remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.
|
||||
|
||||
errorsMut: sync.NewMutex(),
|
||||
|
||||
blockStats: make(map[string]int),
|
||||
blockStatsMut: sync.NewMutex(),
|
||||
}
|
||||
|
||||
f.configureCopiersAndPullers()
|
||||
@@ -158,6 +164,10 @@ func (f *sendReceiveFolder) Serve() {
|
||||
var prevSec int64
|
||||
var prevIgnoreHash string
|
||||
|
||||
if f.FSWatcherEnabled && f.CheckHealth() == nil {
|
||||
f.startWatch()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
@@ -198,8 +208,8 @@ func (f *sendReceiveFolder) Serve() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping pull of", f.Description(), "due to folder error:", err)
|
||||
if err := f.CheckHealth(); err != nil {
|
||||
l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
|
||||
f.pullTimer.Reset(f.sleep)
|
||||
continue
|
||||
}
|
||||
@@ -245,16 +255,13 @@ func (f *sendReceiveFolder) Serve() {
|
||||
// errors preventing us. Flag this with a warning and
|
||||
// wait a bit longer before retrying.
|
||||
if folderErrors := f.currentErrors(); len(folderErrors) > 0 {
|
||||
for path, err := range folderErrors {
|
||||
l.Infof("Puller (folder %q, dir %q): %v", f.Description(), path, err)
|
||||
}
|
||||
events.Default.Log(events.FolderErrors, map[string]interface{}{
|
||||
"folder": f.folderID,
|
||||
"errors": folderErrors,
|
||||
})
|
||||
}
|
||||
|
||||
l.Infof("Folder %q isn't making progress. Pausing puller for %v.", f.folderID, f.pause)
|
||||
l.Infof("Folder %v isn't making progress. Pausing puller for %v.", f.Description(), f.pause)
|
||||
l.Debugln(f, "next pull in", f.pause)
|
||||
|
||||
f.pullTimer.Reset(f.pause)
|
||||
@@ -275,6 +282,13 @@ func (f *sendReceiveFolder) Serve() {
|
||||
|
||||
case next := <-f.scan.delay:
|
||||
f.scan.timer.Reset(next)
|
||||
|
||||
case fsEvents := <-f.watchChan:
|
||||
l.Debugln(f, "filesystem notification rescan")
|
||||
f.scanSubdirs(fsEvents)
|
||||
|
||||
case <-f.restartWatchChan:
|
||||
f.restartWatch()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -363,7 +377,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
if err := fileValid(intf); err != nil {
|
||||
// The file isn't valid so we can't process it. Pretend that we
|
||||
// tried and set the error for the file.
|
||||
f.newError(intf.FileName(), err)
|
||||
f.newError("need", intf.FileName(), err)
|
||||
changed++
|
||||
return true
|
||||
}
|
||||
@@ -412,7 +426,7 @@ func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
// Verify that the thing we are handling lives inside a directory,
|
||||
// and not a symlink or empty space.
|
||||
if err := osutil.TraversesSymlink(f.fs, filepath.Dir(fi.Name)); err != nil {
|
||||
f.newError(fi.Name, err)
|
||||
f.newError("traverses d", fi.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -501,7 +515,7 @@ nextFile:
|
||||
// Verify that the thing we are handling lives inside a directory,
|
||||
// and not a symlink or empty space.
|
||||
if err := osutil.TraversesSymlink(f.fs, filepath.Dir(fi.Name)); err != nil {
|
||||
f.newError(fi.Name, err)
|
||||
f.newError("traverses q", fi.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -605,8 +619,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
|
||||
case err == nil && (!info.IsDir() || info.IsSymlink()):
|
||||
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
|
||||
if err != nil {
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("dir replace", file.Name, err)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
@@ -636,15 +649,13 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
|
||||
if err = osutil.InWritableDir(mkdir, f.fs, file.Name); err == nil {
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
|
||||
} else {
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("dir mkdir", file.Name, err)
|
||||
}
|
||||
return
|
||||
// Weird error when stat()'ing the dir. Probably won't work to do
|
||||
// anything else with it if we can't even stat() it.
|
||||
case err != nil:
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("dir stat", file.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -656,8 +667,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
|
||||
} else if err := f.fs.Chmod(file.Name, mode|(fs.FileMode(info.Mode())&retainBits)); err == nil {
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
|
||||
} else {
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("dir chmod", file.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -693,8 +703,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
|
||||
// Index entry from a Syncthing predating the support for including
|
||||
// the link target in the index entry. We log this as an error.
|
||||
err = errors.New("incompatible symlink entry; rescan with newer Syncthing on source")
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("symlink", file.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -704,8 +713,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
|
||||
// path.
|
||||
err = osutil.InWritableDir(f.fs.Remove, f.fs, file.Name)
|
||||
if err != nil {
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("symlink remove", file.Name, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -719,8 +727,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
|
||||
if err = osutil.InWritableDir(createLink, f.fs, file.Name); err == nil {
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateHandleSymlink}
|
||||
} else {
|
||||
l.Debugf("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("symlink create", file.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -768,8 +775,7 @@ func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Ma
|
||||
// file and not a directory etc) and that the delete is handled.
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
|
||||
} else {
|
||||
l.Debugf("Puller (folder %q, dir %q): delete: %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("delete dir", file.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -821,8 +827,7 @@ func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo) {
|
||||
// not a directory etc) and that the delete is handled.
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
|
||||
} else {
|
||||
l.Debugf("Puller (folder %q, file %q): delete: %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("delete file", file.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -875,6 +880,11 @@ func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
f.blockStatsMut.Lock()
|
||||
f.blockStats["total"] += len(target.Blocks)
|
||||
f.blockStats["renamed"] += len(target.Blocks)
|
||||
f.blockStatsMut.Unlock()
|
||||
|
||||
// The file was renamed, so we have handled both the necessary delete
|
||||
// of the source and the creation of the target. Fix-up the metadata,
|
||||
// and update the local index of the target file.
|
||||
@@ -883,8 +893,8 @@ func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
|
||||
|
||||
err = f.shortcutFile(target)
|
||||
if err != nil {
|
||||
l.Debugf("Puller (folder %q, file %q): rename from %q metadata: %v", f.folderID, target.Name, source.Name, err)
|
||||
f.newError(target.Name, err)
|
||||
err = fmt.Errorf("from %s: %s", source.Name, err.Error())
|
||||
f.newError("rename shortcut", target.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -896,8 +906,8 @@ func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
|
||||
|
||||
err = osutil.InWritableDir(f.fs.Remove, f.fs, source.Name)
|
||||
if err != nil {
|
||||
l.Debugf("Puller (folder %q, file %q): delete %q after failed rename: %v", f.folderID, target.Name, source.Name, err)
|
||||
f.newError(target.Name, err)
|
||||
err = fmt.Errorf("from %s: %s", source.Name, err.Error())
|
||||
f.newError("rename delete", target.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -971,8 +981,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
l.Debugln("Puller: shortcut:", err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("shortcut", file.Name, err)
|
||||
} else {
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
|
||||
}
|
||||
@@ -1029,7 +1038,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
||||
if f.MinDiskFree.BaseValue() > 0 {
|
||||
if usage, err := f.fs.Usage("."); err == nil && usage.Free < blocksSize {
|
||||
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.fs.URI(), file.Name, float64(usage.Free)/1024/1024, float64(blocksSize)/1024/1024)
|
||||
f.newError(file.Name, errors.New("insufficient space"))
|
||||
f.newError("disk space", file.Name, errors.New("insufficient space"))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1082,8 +1091,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
||||
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
if !f.ignorePermissions(file) {
|
||||
if err := f.fs.Chmod(file.Name, fs.FileMode(file.Permissions&0777)); err != nil {
|
||||
l.Debugf("Puller (folder %q, file %q): shortcut: chmod: %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
f.newError("shortcut chmod", file.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1373,7 +1381,7 @@ func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
|
||||
// for this file up to ten times, but the last nine of those
|
||||
// scans will be cheap...
|
||||
go f.Scan([]string{state.curFile.Name})
|
||||
return nil
|
||||
return fmt.Errorf("file modified but not rescanned; will try again later")
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -1441,9 +1449,18 @@ func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
l.Debugln("Puller: final:", err)
|
||||
f.newError(state.file.Name, err)
|
||||
f.newError("finisher", state.file.Name, err)
|
||||
} else {
|
||||
f.blockStatsMut.Lock()
|
||||
f.blockStats["total"] += state.reused + state.copyTotal + state.pullTotal
|
||||
f.blockStats["reused"] += state.reused
|
||||
f.blockStats["pulled"] += state.pullTotal
|
||||
f.blockStats["copyOrigin"] += state.copyOrigin
|
||||
f.blockStats["copyOriginShifted"] += state.copyOriginShifted
|
||||
f.blockStats["copyElsewhere"] += state.copyTotal - state.copyOrigin
|
||||
f.blockStatsMut.Unlock()
|
||||
}
|
||||
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": f.folderID,
|
||||
"item": state.file.Name,
|
||||
@@ -1459,6 +1476,16 @@ func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *sendReceiveFolder) BlockStats() map[string]int {
|
||||
f.blockStatsMut.Lock()
|
||||
stats := make(map[string]int)
|
||||
for k, v := range f.blockStats {
|
||||
stats[k] = v
|
||||
}
|
||||
f.blockStatsMut.Unlock()
|
||||
return stats
|
||||
}
|
||||
|
||||
// Moves the given filename to the front of the job queue
|
||||
func (f *sendReceiveFolder) BringToFront(filename string) {
|
||||
f.queue.BringToFront(filename)
|
||||
@@ -1513,11 +1540,11 @@ func (f *sendReceiveFolder) dbUpdaterRoutine() {
|
||||
delete(changedDirs, dir)
|
||||
fd, err := f.fs.Open(dir)
|
||||
if err != nil {
|
||||
l.Infof("fsync %q failed: %v", dir, err)
|
||||
l.Debugf("fsync %q failed: %v", dir, err)
|
||||
continue
|
||||
}
|
||||
if err := fd.Sync(); err != nil {
|
||||
l.Infof("fsync %q failed: %v", dir, err)
|
||||
l.Debugf("fsync %q failed: %v", dir, err)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
@@ -1635,7 +1662,7 @@ func (f *sendReceiveFolder) moveForConflict(name string, lastModBy string) error
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *sendReceiveFolder) newError(path string, err error) {
|
||||
func (f *sendReceiveFolder) newError(context, path string, err error) {
|
||||
f.errorsMut.Lock()
|
||||
defer f.errorsMut.Unlock()
|
||||
|
||||
@@ -1645,8 +1672,8 @@ func (f *sendReceiveFolder) newError(path string, err error) {
|
||||
if _, ok := f.errors[path]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
f.errors[path] = err.Error()
|
||||
l.Infof("Puller (folder %q, file %q): %s: %v", f.Description(), path, context, err)
|
||||
f.errors[path] = fmt.Sprintf("%s: %s", context, err.Error())
|
||||
}
|
||||
|
||||
func (f *sendReceiveFolder) clearErrors() {
|
||||
@@ -1666,6 +1693,11 @@ func (f *sendReceiveFolder) currentErrors() []fileError {
|
||||
return errors
|
||||
}
|
||||
|
||||
func (f *sendReceiveFolder) IgnoresUpdated() {
|
||||
f.folder.IgnoresUpdated()
|
||||
f.IndexUpdated()
|
||||
}
|
||||
|
||||
// A []fileError is sent as part of an event and will be JSON serialized.
|
||||
type fileError struct {
|
||||
Path string `json:"path"`
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -164,6 +165,9 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Hide the temporary file
|
||||
s.fs.Hide(s.tempName)
|
||||
|
||||
// Don't truncate symlink files, as that will mean that the path will
|
||||
// contain a bunch of nulls.
|
||||
if s.sparse && !s.file.IsSymlink() {
|
||||
@@ -201,9 +205,8 @@ func (s *sharedPullerState) sourceFile() (fs.File, error) {
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// earlyClose prints a warning message composed of the context and
|
||||
// error, and marks the sharedPullerState as failed. Is a no-op when called on
|
||||
// an already failed state.
|
||||
// fail sets the error on the puller state compose of error, and marks the
|
||||
// sharedPullerState as failed. Is a no-op when called on an already failed state.
|
||||
func (s *sharedPullerState) fail(context string, err error) {
|
||||
s.mut.Lock()
|
||||
defer s.mut.Unlock()
|
||||
@@ -212,12 +215,11 @@ func (s *sharedPullerState) fail(context string, err error) {
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) failLocked(context string, err error) {
|
||||
if s.err != nil {
|
||||
if s.err != nil || err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
l.Infof("Puller (folder %q, file %q): %s: %v", s.folder, s.file.Name, context, err)
|
||||
s.err = err
|
||||
s.err = fmt.Errorf("%s: %s", context, err.Error())
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) failed() error {
|
||||
@@ -307,6 +309,12 @@ func (s *sharedPullerState) finalClose() (bool, error) {
|
||||
|
||||
s.closed = true
|
||||
|
||||
// Unhide the temporary file when we close it, as it's likely to
|
||||
// immediately be renamed to the final name. If this is a failed temp
|
||||
// file we will also unhide it, but I'm fine with that as we're now
|
||||
// leaving it around for potentially quite a while.
|
||||
s.fs.Unhide(s.tempName)
|
||||
|
||||
return true, s.err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/luhn"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
@@ -158,7 +157,7 @@ func luhnify(s string) (string, error) {
|
||||
for i := 0; i < 4; i++ {
|
||||
p := s[i*13 : (i+1)*13]
|
||||
copy(res[i*(13+1):], p)
|
||||
l, err := luhn.Base32.Generate(p)
|
||||
l, err := luhnBase32.generate(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -176,7 +175,7 @@ func unluhnify(s string) (string, error) {
|
||||
for i := 0; i < 4; i++ {
|
||||
p := s[i*(13+1) : (i+1)*(13+1)-1]
|
||||
copy(res[i*13:], p)
|
||||
l, err := luhn.Base32.Generate(p)
|
||||
l, err := luhnBase32.generate(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
53
lib/protocol/luhn.go
Normal file
53
lib/protocol/luhn.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An alphabet is a string of N characters, representing the digits of a given
|
||||
// base N.
|
||||
type luhnAlphabet string
|
||||
|
||||
var (
|
||||
luhnBase32 luhnAlphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
|
||||
)
|
||||
|
||||
// generate returns a check digit for the string s, which should be composed
|
||||
// of characters from the Alphabet a.
|
||||
func (a luhnAlphabet) generate(s string) (rune, error) {
|
||||
factor := 1
|
||||
sum := 0
|
||||
n := len(a)
|
||||
|
||||
for i := range s {
|
||||
codepoint := strings.IndexByte(string(a), s[i])
|
||||
if codepoint == -1 {
|
||||
return 0, fmt.Errorf("Digit %q not valid in alphabet %q", s[i], a)
|
||||
}
|
||||
addend := factor * codepoint
|
||||
if factor == 2 {
|
||||
factor = 1
|
||||
} else {
|
||||
factor = 2
|
||||
}
|
||||
addend = (addend / n) + (addend % n)
|
||||
sum += addend
|
||||
}
|
||||
remainder := sum % n
|
||||
checkCodepoint := (n - remainder) % n
|
||||
return rune(a[checkCodepoint]), nil
|
||||
}
|
||||
|
||||
// luhnValidate returns true if the last character of the string s is correct, for
|
||||
// a string s composed of characters in the alphabet a.
|
||||
func (a luhnAlphabet) luhnValidate(s string) bool {
|
||||
t := s[:len(s)-1]
|
||||
c, err := a.generate(t)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rune(s[len(s)-1]) == c
|
||||
}
|
||||
48
lib/protocol/luhn_test.go
Normal file
48
lib/protocol/luhn_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGenerate(t *testing.T) {
|
||||
// Base 6 Luhn
|
||||
a := luhnAlphabet("abcdef")
|
||||
c, err := a.generate("abcdef")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c != 'e' {
|
||||
t.Errorf("Incorrect check digit %c != e", c)
|
||||
}
|
||||
|
||||
// Base 10 Luhn
|
||||
a = luhnAlphabet("0123456789")
|
||||
c, err = a.generate("7992739871")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c != '3' {
|
||||
t.Errorf("Incorrect check digit %c != 3", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidString(t *testing.T) {
|
||||
a := luhnAlphabet("ABC")
|
||||
_, err := a.generate("7992739871")
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
a := luhnAlphabet("abcdef")
|
||||
if !a.luhnValidate("abcdefe") {
|
||||
t.Errorf("Incorrect validation response for abcdefe")
|
||||
}
|
||||
if a.luhnValidate("abcdefd") {
|
||||
t.Errorf("Incorrect validation response for abcdefd")
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the standard ata block size (128 KiB)
|
||||
// BlockSize is the standard data block size (128 KiB)
|
||||
BlockSize = 128 << 10
|
||||
|
||||
// MaxMessageLen is the largest message size allowed on the wire. (500 MB)
|
||||
|
||||
438
lib/watchaggregator/aggregator.go
Normal file
438
lib/watchaggregator/aggregator.go
Normal file
@@ -0,0 +1,438 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package watchaggregator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
// Not meant to be changed, but must be changeable for tests
|
||||
var (
|
||||
maxFiles = 512
|
||||
maxFilesPerDir = 128
|
||||
)
|
||||
|
||||
// aggregatedEvent represents potentially multiple events at and/or recursively
|
||||
// below one path until it times out and a scan is scheduled.
|
||||
type aggregatedEvent struct {
|
||||
firstModTime time.Time
|
||||
lastModTime time.Time
|
||||
evType fs.EventType
|
||||
}
|
||||
|
||||
// Stores pointers to both aggregated events directly within this directory and
|
||||
// child directories recursively containing aggregated events themselves.
|
||||
type eventDir struct {
|
||||
events map[string]*aggregatedEvent
|
||||
dirs map[string]*eventDir
|
||||
}
|
||||
|
||||
func newEventDir() *eventDir {
|
||||
return &eventDir{
|
||||
events: make(map[string]*aggregatedEvent),
|
||||
dirs: make(map[string]*eventDir),
|
||||
}
|
||||
}
|
||||
|
||||
func (dir *eventDir) eventCount() int {
|
||||
count := len(dir.events)
|
||||
for _, dir := range dir.dirs {
|
||||
count += dir.eventCount()
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (dir *eventDir) childCount() int {
|
||||
return len(dir.events) + len(dir.dirs)
|
||||
}
|
||||
|
||||
func (dir *eventDir) firstModTime() time.Time {
|
||||
if dir.childCount() == 0 {
|
||||
panic("bug: firstModTime must not be used on empty eventDir")
|
||||
}
|
||||
firstModTime := time.Now()
|
||||
for _, childDir := range dir.dirs {
|
||||
dirTime := childDir.firstModTime()
|
||||
if dirTime.Before(firstModTime) {
|
||||
firstModTime = dirTime
|
||||
}
|
||||
}
|
||||
for _, event := range dir.events {
|
||||
if event.firstModTime.Before(firstModTime) {
|
||||
firstModTime = event.firstModTime
|
||||
}
|
||||
}
|
||||
return firstModTime
|
||||
}
|
||||
|
||||
func (dir *eventDir) eventType() fs.EventType {
|
||||
if dir.childCount() == 0 {
|
||||
panic("bug: eventType must not be used on empty eventDir")
|
||||
}
|
||||
var evType fs.EventType
|
||||
for _, childDir := range dir.dirs {
|
||||
evType |= childDir.eventType()
|
||||
if evType == fs.Mixed {
|
||||
return fs.Mixed
|
||||
}
|
||||
}
|
||||
for _, event := range dir.events {
|
||||
evType |= event.evType
|
||||
if evType == fs.Mixed {
|
||||
return fs.Mixed
|
||||
}
|
||||
}
|
||||
return evType
|
||||
}
|
||||
|
||||
type aggregator struct {
|
||||
folderCfg config.FolderConfiguration
|
||||
folderCfgUpdate chan config.FolderConfiguration
|
||||
// Time after which an event is scheduled for scanning when no modifications occur.
|
||||
notifyDelay time.Duration
|
||||
// Time after which an event is scheduled for scanning even though modifications occur.
|
||||
notifyTimeout time.Duration
|
||||
notifyTimer *time.Timer
|
||||
notifyTimerNeedsReset bool
|
||||
notifyTimerResetChan chan time.Duration
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func new(folderCfg config.FolderConfiguration, ctx context.Context) *aggregator {
|
||||
a := &aggregator{
|
||||
folderCfgUpdate: make(chan config.FolderConfiguration),
|
||||
notifyTimerNeedsReset: false,
|
||||
notifyTimerResetChan: make(chan time.Duration),
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
a.updateConfig(folderCfg)
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func Aggregate(in <-chan fs.Event, out chan<- []string, folderCfg config.FolderConfiguration, cfg *config.Wrapper, ctx context.Context) {
|
||||
a := new(folderCfg, ctx)
|
||||
|
||||
// Necessary for unit tests where the backend is mocked
|
||||
go a.mainLoop(in, out, cfg)
|
||||
}
|
||||
|
||||
func (a *aggregator) mainLoop(in <-chan fs.Event, out chan<- []string, cfg *config.Wrapper) {
|
||||
a.notifyTimer = time.NewTimer(a.notifyDelay)
|
||||
defer a.notifyTimer.Stop()
|
||||
|
||||
inProgress := make(map[string]struct{})
|
||||
inProgressItemSubscription := events.Default.Subscribe(events.ItemStarted | events.ItemFinished)
|
||||
|
||||
cfg.Subscribe(a)
|
||||
|
||||
rootEventDir := newEventDir()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-in:
|
||||
a.newEvent(event, rootEventDir, inProgress)
|
||||
case event := <-inProgressItemSubscription.C():
|
||||
updateInProgressSet(event, inProgress)
|
||||
case <-a.notifyTimer.C:
|
||||
a.actOnTimer(rootEventDir, out)
|
||||
case interval := <-a.notifyTimerResetChan:
|
||||
a.resetNotifyTimer(interval)
|
||||
case folderCfg := <-a.folderCfgUpdate:
|
||||
a.updateConfig(folderCfg)
|
||||
case <-a.ctx.Done():
|
||||
cfg.Unsubscribe(a)
|
||||
l.Debugln(a, "Stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *aggregator) newEvent(event fs.Event, rootEventDir *eventDir, inProgress map[string]struct{}) {
|
||||
if _, ok := rootEventDir.events["."]; ok {
|
||||
l.Debugln(a, "Will scan entire folder anyway; dropping:", event.Name)
|
||||
return
|
||||
}
|
||||
if _, ok := inProgress[event.Name]; ok {
|
||||
l.Debugln(a, "Skipping path we modified:", event.Name)
|
||||
return
|
||||
}
|
||||
a.aggregateEvent(event, time.Now(), rootEventDir)
|
||||
}
|
||||
|
||||
func (a *aggregator) aggregateEvent(event fs.Event, evTime time.Time, rootEventDir *eventDir) {
|
||||
if event.Name == "." || rootEventDir.eventCount() == maxFiles {
|
||||
l.Debugln(a, "Scan entire folder")
|
||||
firstModTime := evTime
|
||||
if rootEventDir.childCount() != 0 {
|
||||
event.Type |= rootEventDir.eventType()
|
||||
firstModTime = rootEventDir.firstModTime()
|
||||
}
|
||||
rootEventDir.dirs = make(map[string]*eventDir)
|
||||
rootEventDir.events = make(map[string]*aggregatedEvent)
|
||||
rootEventDir.events["."] = &aggregatedEvent{
|
||||
firstModTime: firstModTime,
|
||||
lastModTime: evTime,
|
||||
evType: event.Type,
|
||||
}
|
||||
a.resetNotifyTimerIfNeeded()
|
||||
return
|
||||
}
|
||||
|
||||
parentDir := rootEventDir
|
||||
|
||||
// Check if any parent directory is already tracked or will exceed
|
||||
// events per directory limit bottom up
|
||||
pathSegments := strings.Split(filepath.ToSlash(event.Name), "/")
|
||||
|
||||
// As root dir cannot be further aggregated, allow up to maxFiles
|
||||
// children.
|
||||
localMaxFilesPerDir := maxFiles
|
||||
var currPath string
|
||||
for i, name := range pathSegments[:len(pathSegments)-1] {
|
||||
currPath = filepath.Join(currPath, name)
|
||||
|
||||
if ev, ok := parentDir.events[name]; ok {
|
||||
ev.lastModTime = evTime
|
||||
ev.evType |= event.Type
|
||||
l.Debugf("%v Parent %s (type %s) already tracked: %s", a, currPath, ev.evType, event.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if parentDir.childCount() == localMaxFilesPerDir {
|
||||
l.Debugf("%v Parent dir %s already has %d children, tracking it instead: %s", a, currPath, localMaxFilesPerDir, event.Name)
|
||||
event.Name = filepath.Dir(currPath)
|
||||
a.aggregateEvent(event, evTime, rootEventDir)
|
||||
return
|
||||
}
|
||||
|
||||
// If there are no events below path, but we need to recurse
|
||||
// into that path, create eventDir at path.
|
||||
if newParent, ok := parentDir.dirs[name]; ok {
|
||||
parentDir = newParent
|
||||
} else {
|
||||
l.Debugln(a, "Creating eventDir at:", currPath)
|
||||
newParent = newEventDir()
|
||||
parentDir.dirs[name] = newParent
|
||||
parentDir = newParent
|
||||
}
|
||||
|
||||
// Reset allowed children count to maxFilesPerDir for non-root
|
||||
if i == 0 {
|
||||
localMaxFilesPerDir = maxFilesPerDir
|
||||
}
|
||||
}
|
||||
|
||||
name := pathSegments[len(pathSegments)-1]
|
||||
|
||||
if ev, ok := parentDir.events[name]; ok {
|
||||
ev.lastModTime = evTime
|
||||
ev.evType |= event.Type
|
||||
l.Debugf("%v Already tracked (type %v): %s", a, ev.evType, event.Name)
|
||||
return
|
||||
}
|
||||
|
||||
childDir, ok := parentDir.dirs[name]
|
||||
|
||||
// If a dir existed at path, it would be removed from dirs, thus
|
||||
// childCount would not increase.
|
||||
if !ok && parentDir.childCount() == localMaxFilesPerDir {
|
||||
l.Debugf("%v Parent dir already has %d children, tracking it instead: %s", a, localMaxFilesPerDir, event.Name)
|
||||
event.Name = filepath.Dir(event.Name)
|
||||
a.aggregateEvent(event, evTime, rootEventDir)
|
||||
return
|
||||
}
|
||||
|
||||
firstModTime := evTime
|
||||
if ok {
|
||||
firstModTime = childDir.firstModTime()
|
||||
event.Type |= childDir.eventType()
|
||||
delete(parentDir.dirs, name)
|
||||
}
|
||||
l.Debugf("%v Tracking (type %v): %s", a, event.Type, event.Name)
|
||||
parentDir.events[name] = &aggregatedEvent{
|
||||
firstModTime: firstModTime,
|
||||
lastModTime: evTime,
|
||||
evType: event.Type,
|
||||
}
|
||||
a.resetNotifyTimerIfNeeded()
|
||||
}
|
||||
|
||||
func (a *aggregator) resetNotifyTimerIfNeeded() {
|
||||
if a.notifyTimerNeedsReset {
|
||||
a.resetNotifyTimer(a.notifyDelay)
|
||||
}
|
||||
}
|
||||
|
||||
// resetNotifyTimer should only ever be called when notifyTimer has stopped
|
||||
// and notifyTimer.C been read from. Otherwise, call resetNotifyTimerIfNeeded.
|
||||
func (a *aggregator) resetNotifyTimer(duration time.Duration) {
|
||||
l.Debugln(a, "Resetting notifyTimer to", duration.String())
|
||||
a.notifyTimerNeedsReset = false
|
||||
a.notifyTimer.Reset(duration)
|
||||
}
|
||||
|
||||
func (a *aggregator) actOnTimer(rootEventDir *eventDir, out chan<- []string) {
|
||||
eventCount := rootEventDir.eventCount()
|
||||
if eventCount == 0 {
|
||||
l.Debugln(a, "No tracked events, waiting for new event.")
|
||||
a.notifyTimerNeedsReset = true
|
||||
return
|
||||
}
|
||||
oldevents := a.popOldEvents(rootEventDir, ".", time.Now())
|
||||
if len(oldevents) == 0 {
|
||||
l.Debugln(a, "No old fs events")
|
||||
a.resetNotifyTimer(a.notifyDelay)
|
||||
return
|
||||
}
|
||||
// Sending to channel might block for a long time, but we need to keep
|
||||
// reading from notify backend channel to avoid overflow
|
||||
go a.notify(oldevents, out)
|
||||
}
|
||||
|
||||
// Schedule scan for given events dispatching deletes last and reset notification
|
||||
// afterwards to set up for the next scan scheduling.
|
||||
func (a *aggregator) notify(oldEvents map[string]*aggregatedEvent, out chan<- []string) {
|
||||
timeBeforeSending := time.Now()
|
||||
l.Debugf("%v Notifying about %d fs events", a, len(oldEvents))
|
||||
separatedBatches := make(map[fs.EventType][]string)
|
||||
for path, event := range oldEvents {
|
||||
separatedBatches[event.evType] = append(separatedBatches[event.evType], path)
|
||||
}
|
||||
for _, evType := range [3]fs.EventType{fs.NonRemove, fs.Mixed, fs.Remove} {
|
||||
currBatch := separatedBatches[evType]
|
||||
if len(currBatch) != 0 {
|
||||
select {
|
||||
case out <- currBatch:
|
||||
case <-a.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// If sending to channel blocked for a long time,
|
||||
// shorten next notifyDelay accordingly.
|
||||
duration := time.Since(timeBeforeSending)
|
||||
buffer := time.Millisecond
|
||||
var nextDelay time.Duration
|
||||
switch {
|
||||
case duration < a.notifyDelay/10:
|
||||
nextDelay = a.notifyDelay
|
||||
case duration+buffer > a.notifyDelay:
|
||||
nextDelay = buffer
|
||||
default:
|
||||
nextDelay = a.notifyDelay - duration
|
||||
}
|
||||
select {
|
||||
case a.notifyTimerResetChan <- nextDelay:
|
||||
case <-a.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// popOldEvents finds events that should be scheduled for scanning recursively in dirs,
|
||||
// removes those events and empty eventDirs and returns a map with all the removed
|
||||
// events referenced by their filesystem path
|
||||
func (a *aggregator) popOldEvents(dir *eventDir, dirPath string, currTime time.Time) map[string]*aggregatedEvent {
|
||||
oldEvents := make(map[string]*aggregatedEvent)
|
||||
for childName, childDir := range dir.dirs {
|
||||
for evPath, event := range a.popOldEvents(childDir, filepath.Join(dirPath, childName), currTime) {
|
||||
oldEvents[evPath] = event
|
||||
}
|
||||
if childDir.childCount() == 0 {
|
||||
delete(dir.dirs, childName)
|
||||
}
|
||||
}
|
||||
for name, event := range dir.events {
|
||||
if a.isOld(event, currTime) {
|
||||
oldEvents[filepath.Join(dirPath, name)] = event
|
||||
delete(dir.events, name)
|
||||
}
|
||||
}
|
||||
return oldEvents
|
||||
}
|
||||
|
||||
func (a *aggregator) isOld(ev *aggregatedEvent, currTime time.Time) bool {
|
||||
// Deletes should always be scanned last, therefore they are always
|
||||
// delayed by letting them time out (see below).
|
||||
// An event that has not registered any new modifications recently is scanned.
|
||||
// a.notifyDelay is the user facing value signifying the normal delay between
|
||||
// a picking up a modification and scanning it. As scheduling scans happens at
|
||||
// regular intervals of a.notifyDelay the delay of a single event is not exactly
|
||||
// a.notifyDelay, but lies in in the range of 0.5 to 1.5 times a.notifyDelay.
|
||||
if ev.evType == fs.NonRemove && 2*currTime.Sub(ev.lastModTime) > a.notifyDelay {
|
||||
return true
|
||||
}
|
||||
// When an event registers repeat modifications or involves removals it
|
||||
// is delayed to reduce resource usage, but after a certain time (notifyTimeout)
|
||||
// passed it is scanned anyway.
|
||||
return currTime.Sub(ev.firstModTime) > a.notifyTimeout
|
||||
}
|
||||
|
||||
func (a *aggregator) String() string {
|
||||
return fmt.Sprintf("aggregator/%s:", a.folderCfg.Description())
|
||||
}
|
||||
|
||||
func (a *aggregator) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *aggregator) CommitConfiguration(from, to config.Configuration) bool {
|
||||
for _, folderCfg := range to.Folders {
|
||||
if folderCfg.ID == a.folderCfg.ID {
|
||||
select {
|
||||
case a.folderCfgUpdate <- folderCfg:
|
||||
case <-a.ctx.Done():
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Nothing to do, model will soon stop this
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *aggregator) updateConfig(folderCfg config.FolderConfiguration) {
|
||||
a.notifyDelay = time.Duration(folderCfg.FSWatcherDelayS) * time.Second
|
||||
a.notifyTimeout = notifyTimeout(folderCfg.FSWatcherDelayS)
|
||||
a.folderCfg = folderCfg
|
||||
}
|
||||
|
||||
func updateInProgressSet(event events.Event, inProgress map[string]struct{}) {
|
||||
if event.Type == events.ItemStarted {
|
||||
path := event.Data.(map[string]string)["item"]
|
||||
inProgress[path] = struct{}{}
|
||||
} else if event.Type == events.ItemFinished {
|
||||
path := event.Data.(map[string]interface{})["item"].(string)
|
||||
delete(inProgress, path)
|
||||
}
|
||||
}
|
||||
|
||||
// Events that involve removals or continuously receive new modifications are
|
||||
// delayed but must time out at some point. The following numbers come out of thin
|
||||
// air, they were just considered as a sensible compromise between fast updates and
|
||||
// saving resources. For short delays the timeout is 6 times the delay, capped at 1
|
||||
// minute. For delays longer than 1 minute, the delay and timeout are equal.
|
||||
func notifyTimeout(eventDelayS int) time.Duration {
|
||||
shortDelayS := 10
|
||||
shortDelayMultiplicator := 6
|
||||
longDelayS := 60
|
||||
longDelayTimeout := time.Duration(1) * time.Minute
|
||||
if eventDelayS < shortDelayS {
|
||||
return time.Duration(eventDelayS*shortDelayMultiplicator) * time.Second
|
||||
}
|
||||
if eventDelayS < longDelayS {
|
||||
return longDelayTimeout
|
||||
}
|
||||
return time.Duration(eventDelayS) * time.Second
|
||||
}
|
||||
281
lib/watchaggregator/aggregator_test.go
Normal file
281
lib/watchaggregator/aggregator_test.go
Normal file
@@ -0,0 +1,281 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package watchaggregator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
maxFiles = 32
|
||||
maxFilesPerDir = 8
|
||||
defer func() {
|
||||
maxFiles = 512
|
||||
maxFilesPerDir = 128
|
||||
}()
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
const (
|
||||
testNotifyDelayS = 1
|
||||
testNotifyTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
folderRoot = filepath.Clean("/home/someuser/syncthing")
|
||||
defaultFolderCfg = config.FolderConfiguration{
|
||||
FilesystemType: fs.FilesystemTypeBasic,
|
||||
Path: folderRoot,
|
||||
FSWatcherDelayS: testNotifyDelayS,
|
||||
}
|
||||
defaultCfg = config.Wrap("", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{defaultFolderCfg},
|
||||
})
|
||||
)
|
||||
|
||||
type expectedBatch struct {
|
||||
paths []string
|
||||
afterMs int
|
||||
beforeMs int
|
||||
}
|
||||
|
||||
// TestAggregate checks whether maxFilesPerDir+1 events in one dir are
|
||||
// aggregated to parent dir
|
||||
func TestAggregate(t *testing.T) {
|
||||
evDir := newEventDir()
|
||||
inProgress := make(map[string]struct{})
|
||||
|
||||
folderCfg := defaultFolderCfg.Copy()
|
||||
folderCfg.ID = "Aggregate"
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
a := new(folderCfg, ctx)
|
||||
|
||||
// checks whether maxFilesPerDir events in one dir are kept as is
|
||||
for i := 0; i < maxFilesPerDir; i++ {
|
||||
a.newEvent(fs.Event{filepath.Join("parent", strconv.Itoa(i)), fs.NonRemove}, evDir, inProgress)
|
||||
}
|
||||
if len(getEventPaths(evDir, ".", a)) != maxFilesPerDir {
|
||||
t.Errorf("Unexpected number of events stored")
|
||||
}
|
||||
|
||||
// checks whether maxFilesPerDir+1 events in one dir are aggregated to parent dir
|
||||
a.newEvent(fs.Event{filepath.Join("parent", "new"), fs.NonRemove}, evDir, inProgress)
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"parent"})
|
||||
|
||||
// checks that adding an event below "parent" does not change anything
|
||||
a.newEvent(fs.Event{filepath.Join("parent", "extra"), fs.NonRemove}, evDir, inProgress)
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"parent"})
|
||||
|
||||
// again test aggregation in "parent" but with event in subdirs
|
||||
evDir = newEventDir()
|
||||
for i := 0; i < maxFilesPerDir; i++ {
|
||||
a.newEvent(fs.Event{filepath.Join("parent", strconv.Itoa(i)), fs.NonRemove}, evDir, inProgress)
|
||||
}
|
||||
a.newEvent(fs.Event{filepath.Join("parent", "sub", "new"), fs.NonRemove}, evDir, inProgress)
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"parent"})
|
||||
|
||||
// test aggregation in root
|
||||
evDir = newEventDir()
|
||||
for i := 0; i < maxFiles; i++ {
|
||||
a.newEvent(fs.Event{strconv.Itoa(i), fs.NonRemove}, evDir, inProgress)
|
||||
}
|
||||
if len(getEventPaths(evDir, ".", a)) != maxFiles {
|
||||
t.Errorf("Unexpected number of events stored in root")
|
||||
}
|
||||
a.newEvent(fs.Event{filepath.Join("parent", "sub", "new"), fs.NonRemove}, evDir, inProgress)
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"."})
|
||||
|
||||
// checks that adding an event when "." is already stored is a noop
|
||||
a.newEvent(fs.Event{"anythingelse", fs.NonRemove}, evDir, inProgress)
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"."})
|
||||
|
||||
// TestOverflow checks that the entire folder is scanned when maxFiles is reached
|
||||
evDir = newEventDir()
|
||||
filesPerDir := maxFilesPerDir / 2
|
||||
dirs := make([]string, maxFiles/filesPerDir+1)
|
||||
for i := 0; i < maxFiles/filesPerDir+1; i++ {
|
||||
dirs[i] = "dir" + strconv.Itoa(i)
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
for i := 0; i < filesPerDir; i++ {
|
||||
a.newEvent(fs.Event{filepath.Join(dir, strconv.Itoa(i)), fs.NonRemove}, evDir, inProgress)
|
||||
}
|
||||
}
|
||||
compareBatchToExpected(t, getEventPaths(evDir, ".", a), []string{"."})
|
||||
}
|
||||
|
||||
// TestInProgress checks that ignoring files currently edited by Syncthing works
|
||||
func TestInProgress(t *testing.T) {
|
||||
testCase := func(c chan<- fs.Event) {
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"item": "inprogress",
|
||||
})
|
||||
sleepMs(100)
|
||||
c <- fs.Event{Name: "inprogress", Type: fs.NonRemove}
|
||||
sleepMs(1000)
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"item": "inprogress",
|
||||
})
|
||||
sleepMs(100)
|
||||
c <- fs.Event{Name: "notinprogress", Type: fs.NonRemove}
|
||||
sleepMs(800)
|
||||
}
|
||||
|
||||
expectedBatches := []expectedBatch{
|
||||
{[]string{"notinprogress"}, 2000, 3500},
|
||||
}
|
||||
|
||||
testScenario(t, "InProgress", testCase, expectedBatches)
|
||||
}
|
||||
|
||||
// TestDelay checks that recurring changes to the same path are delayed
|
||||
// and different types separated and ordered correctly
|
||||
func TestDelay(t *testing.T) {
|
||||
file := filepath.Join("parent", "file")
|
||||
delayed := "delayed"
|
||||
del := "deleted"
|
||||
both := filepath.Join("parent", "sub", "both")
|
||||
testCase := func(c chan<- fs.Event) {
|
||||
sleepMs(200)
|
||||
c <- fs.Event{Name: file, Type: fs.NonRemove}
|
||||
delay := time.Duration(300) * time.Millisecond
|
||||
timer := time.NewTimer(delay)
|
||||
<-timer.C
|
||||
timer.Reset(delay)
|
||||
c <- fs.Event{Name: delayed, Type: fs.NonRemove}
|
||||
c <- fs.Event{Name: both, Type: fs.NonRemove}
|
||||
c <- fs.Event{Name: both, Type: fs.Remove}
|
||||
c <- fs.Event{Name: del, Type: fs.Remove}
|
||||
for i := 0; i < 9; i++ {
|
||||
<-timer.C
|
||||
timer.Reset(delay)
|
||||
c <- fs.Event{Name: delayed, Type: fs.NonRemove}
|
||||
}
|
||||
<-timer.C
|
||||
}
|
||||
|
||||
// batches that we expect to receive with time interval in milliseconds
|
||||
expectedBatches := []expectedBatch{
|
||||
{[]string{file}, 500, 2500},
|
||||
{[]string{delayed}, 2500, 4500},
|
||||
{[]string{both}, 2500, 4500},
|
||||
{[]string{del}, 2500, 4500},
|
||||
{[]string{delayed}, 3600, 6500},
|
||||
}
|
||||
|
||||
testScenario(t, "Delay", testCase, expectedBatches)
|
||||
}
|
||||
|
||||
func getEventPaths(dir *eventDir, dirPath string, a *aggregator) []string {
|
||||
var paths []string
|
||||
for childName, childDir := range dir.dirs {
|
||||
for _, path := range getEventPaths(childDir, filepath.Join(dirPath, childName), a) {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
}
|
||||
for name := range dir.events {
|
||||
paths = append(paths, filepath.Join(dirPath, name))
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func sleepMs(ms int) {
|
||||
time.Sleep(time.Duration(ms) * time.Millisecond)
|
||||
}
|
||||
|
||||
func durationMs(ms int) time.Duration {
|
||||
return time.Duration(ms) * time.Millisecond
|
||||
}
|
||||
|
||||
func compareBatchToExpected(t *testing.T, batch []string, expectedPaths []string) {
|
||||
for _, expected := range expectedPaths {
|
||||
expected = filepath.Clean(expected)
|
||||
found := false
|
||||
for i, received := range batch {
|
||||
if expected == received {
|
||||
found = true
|
||||
batch = append(batch[:i], batch[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Did not receive event %s", expected)
|
||||
}
|
||||
}
|
||||
for _, received := range batch {
|
||||
t.Errorf("Received unexpected event %s", received)
|
||||
}
|
||||
}
|
||||
|
||||
func testScenario(t *testing.T, name string, testCase func(c chan<- fs.Event), expectedBatches []expectedBatch) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
eventChan := make(chan fs.Event)
|
||||
watchChan := make(chan []string)
|
||||
|
||||
folderCfg := defaultFolderCfg.Copy()
|
||||
folderCfg.ID = name
|
||||
a := new(folderCfg, ctx)
|
||||
a.notifyTimeout = testNotifyTimeout
|
||||
|
||||
startTime := time.Now()
|
||||
go a.mainLoop(eventChan, watchChan, defaultCfg)
|
||||
|
||||
sleepMs(10)
|
||||
go testAggregatorOutput(t, watchChan, expectedBatches, startTime, ctx)
|
||||
|
||||
testCase(eventChan)
|
||||
|
||||
timeout := time.NewTimer(time.Duration(expectedBatches[len(expectedBatches)-1].beforeMs+100) * time.Millisecond)
|
||||
<-timeout.C
|
||||
cancel()
|
||||
}
|
||||
|
||||
func testAggregatorOutput(t *testing.T, fsWatchChan <-chan []string, expectedBatches []expectedBatch, startTime time.Time, ctx context.Context) {
|
||||
var received []string
|
||||
var elapsedTime time.Duration
|
||||
batchIndex := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if batchIndex != len(expectedBatches) {
|
||||
t.Errorf("Received only %d batches (%d expected)", batchIndex, len(expectedBatches))
|
||||
}
|
||||
return
|
||||
case received = <-fsWatchChan:
|
||||
}
|
||||
|
||||
if batchIndex >= len(expectedBatches) {
|
||||
t.Errorf("Received batch %d (only %d expected)", batchIndex+1, len(expectedBatches))
|
||||
continue
|
||||
}
|
||||
|
||||
elapsedTime = time.Since(startTime)
|
||||
expected := expectedBatches[batchIndex]
|
||||
switch {
|
||||
case elapsedTime < durationMs(expected.afterMs):
|
||||
t.Errorf("Received batch %d after %v (too soon)", batchIndex+1, elapsedTime)
|
||||
|
||||
case elapsedTime > durationMs(expected.beforeMs):
|
||||
t.Errorf("Received batch %d after %v (too late)", batchIndex+1, elapsedTime)
|
||||
|
||||
case len(received) != len(expected.paths):
|
||||
t.Errorf("Received %v events instead of %v for batch %v", len(received), len(expected.paths), batchIndex+1)
|
||||
}
|
||||
compareBatchToExpected(t, received, expected.paths)
|
||||
batchIndex++
|
||||
}
|
||||
}
|
||||
24
lib/watchaggregator/debug.go
Normal file
24
lib/watchaggregator/debug.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package watchaggregator
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var facilityName = "watchaggregator"
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility(facilityName, "Filesystem event watcher")
|
||||
)
|
||||
|
||||
func init() {
|
||||
l.SetDebug(facilityName, strings.Contains(os.Getenv("STTRACE"), facilityName) || os.Getenv("STTRACE") == "all")
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STDISCOSRV" "1" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "STDISCOSRV" "1" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
stdiscosrv \- Syncthing Discovery Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STRELAYSRV" "1" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "STRELAYSRV" "1" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
strelaysrv \- Syncthing Relay Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v4
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-NETWORKING" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-NETWORKING" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-networking \- Firewall Setup
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-RELAY" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-RELAY" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-relay \- Relay Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-REST-API" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-REST-API" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-rest-api \- REST API
|
||||
.
|
||||
@@ -111,6 +111,7 @@ Returns the current configuration.
|
||||
}
|
||||
],
|
||||
"rescanIntervalS": 60,
|
||||
"longRescanIntervalS": 3600,
|
||||
"ignorePerms": false,
|
||||
"autoNormalize": true,
|
||||
"minDiskFreePct": 1,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-SECURITY" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-SECURITY" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-security \- Security Principles
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-STIGNORE" "5" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-STIGNORE" "5" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-stignore \- Prevent files from being synchronized to other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-VERSIONING" "7" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-VERSIONING" "7" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-versioning \- Keep automatic backups of deleted files by other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING" "1" "September 04, 2017" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING" "1" "September 08, 2017" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing \- Syncthing
|
||||
.
|
||||
|
||||
56
vendor/github.com/AudriusButkevicius/kcp-go/blacklist.go
generated
vendored
Normal file
56
vendor/github.com/AudriusButkevicius/kcp-go/blacklist.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
BlacklistDuration time.Duration
|
||||
blacklist = blacklistMap{
|
||||
entries: make(map[sessionKey]time.Time),
|
||||
}
|
||||
)
|
||||
|
||||
// a global map for blacklisting conversations
|
||||
type blacklistMap struct {
|
||||
entries map[sessionKey]time.Time
|
||||
reapAt time.Time
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func (m *blacklistMap) add(address string, conv uint32) {
|
||||
if BlacklistDuration == 0 {
|
||||
return
|
||||
}
|
||||
m.mut.Lock()
|
||||
timeout := time.Now().Add(BlacklistDuration)
|
||||
m.entries[sessionKey{
|
||||
addr: address,
|
||||
convID: conv,
|
||||
}] = timeout
|
||||
m.reap()
|
||||
m.mut.Unlock()
|
||||
}
|
||||
|
||||
func (m *blacklistMap) has(address string, conv uint32) bool {
|
||||
if BlacklistDuration == 0 {
|
||||
return false
|
||||
}
|
||||
m.mut.Lock()
|
||||
t, ok := m.entries[sessionKey{
|
||||
addr: address,
|
||||
convID: conv,
|
||||
}]
|
||||
m.mut.Unlock()
|
||||
return ok && t.After(time.Now())
|
||||
}
|
||||
|
||||
func (m *blacklistMap) reap() {
|
||||
now := time.Now()
|
||||
for k, t := range m.entries {
|
||||
if t.Before(now) {
|
||||
delete(m.entries, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
23
vendor/github.com/AudriusButkevicius/kcp-go/crypt.go
generated
vendored
23
vendor/github.com/AudriusButkevicius/kcp-go/crypt.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"crypto/sha1"
|
||||
|
||||
"github.com/templexxx/xor"
|
||||
"github.com/tjfoc/gmsm/sm4"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
"golang.org/x/crypto/cast5"
|
||||
@@ -55,6 +56,28 @@ func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
|
||||
copy(dst[:8], src[:8])
|
||||
}
|
||||
|
||||
type sm4BlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
|
||||
func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(sm4BlockCrypt)
|
||||
block, err := sm4.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, sm4.BlockSize)
|
||||
c.decbuf = make([]byte, 2*sm4.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type twofishBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
|
||||
8
vendor/github.com/AudriusButkevicius/kcp-go/fec.go
generated
vendored
8
vendor/github.com/AudriusButkevicius/kcp-go/fec.go
generated
vendored
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
"github.com/templexxx/reedsolomon"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,7 +52,7 @@ func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
|
||||
fec.dataShards = dataShards
|
||||
fec.parityShards = parityShards
|
||||
fec.shardSize = dataShards + parityShards
|
||||
enc, err := reedsolomon.New(dataShards, parityShards, reedsolomon.WithMaxGoroutines(1))
|
||||
enc, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
|
||||
xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:])
|
||||
}
|
||||
}
|
||||
if err := dec.codec.Reconstruct(shards); err == nil {
|
||||
if err := dec.codec.ReconstructData(shards); err == nil {
|
||||
for k := range shards[:dec.dataShards] {
|
||||
if !shardsflag[k] {
|
||||
recovered = append(recovered, shards[k])
|
||||
@@ -226,7 +226,7 @@ func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
|
||||
fec.headerOffset = offset
|
||||
fec.payloadOffset = fec.headerOffset + fecHeaderSize
|
||||
|
||||
enc, err := reedsolomon.New(dataShards, parityShards, reedsolomon.WithMaxGoroutines(1))
|
||||
enc, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
45
vendor/github.com/AudriusButkevicius/kcp-go/sess.go
generated
vendored
45
vendor/github.com/AudriusButkevicius/kcp-go/sess.go
generated
vendored
@@ -3,7 +3,6 @@ package kcp
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net"
|
||||
@@ -152,6 +151,7 @@ func newUDPSession(conv uint32, dataShards, parityShards int, l *Listener, conn
|
||||
}
|
||||
})
|
||||
sess.kcp.SetMtu(IKCP_MTU_DEF - sess.headerSize)
|
||||
blacklist.add(remote.String(), conv)
|
||||
|
||||
// add current session to the global updater,
|
||||
// which periodically calls sess.update()
|
||||
@@ -306,8 +306,10 @@ func (s *UDPSession) Close() error {
|
||||
// remove this session from updater & listener(if necessary)
|
||||
updater.removeSession(s)
|
||||
if s.l != nil { // notify listener
|
||||
key := fmt.Sprintf("%s/%d", s.remote.String(), s.kcp.conv)
|
||||
s.l.closeSession(key)
|
||||
s.l.closeSession(sessionKey{
|
||||
addr: s.remote.String(),
|
||||
convID: s.kcp.conv,
|
||||
})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
@@ -660,6 +662,11 @@ func (s *UDPSession) readLoop() {
|
||||
}
|
||||
|
||||
type (
|
||||
sessionKey struct {
|
||||
addr string
|
||||
convID uint32
|
||||
}
|
||||
|
||||
// Listener defines a server listening for connections
|
||||
Listener struct {
|
||||
block BlockCrypt // block encryption
|
||||
@@ -668,12 +675,12 @@ type (
|
||||
fecDecoder *fecDecoder // FEC mock initialization
|
||||
conn net.PacketConn // the underlying packet connection
|
||||
|
||||
sessions map[string]*UDPSession // all sessions accepted by this Listener
|
||||
chAccepts chan *UDPSession // Listen() backlog
|
||||
chSessionClosed chan string // session close queue
|
||||
headerSize int // the overall header size added before KCP frame
|
||||
die chan struct{} // notify the listener has closed
|
||||
rd atomic.Value // read deadline for Accept()
|
||||
sessions map[sessionKey]*UDPSession // all sessions accepted by this Listener
|
||||
chAccepts chan *UDPSession // Listen() backlog
|
||||
chSessionClosed chan sessionKey // session close queue
|
||||
headerSize int // the overall header size added before KCP frame
|
||||
die chan struct{} // notify the listener has closed
|
||||
rd atomic.Value // read deadline for Accept()
|
||||
wd atomic.Value
|
||||
}
|
||||
|
||||
@@ -687,7 +694,7 @@ type (
|
||||
// monitor incoming data for all connections of server
|
||||
func (l *Listener) monitor() {
|
||||
// cache last session
|
||||
var lastKey string
|
||||
var lastKey sessionKey
|
||||
var lastSession *UDPSession
|
||||
|
||||
chPacket := make(chan inPacket, qlen)
|
||||
@@ -728,8 +735,10 @@ func (l *Listener) monitor() {
|
||||
}
|
||||
|
||||
if convValid {
|
||||
addr := from.String()
|
||||
key := fmt.Sprintf("%s/%d", addr, conv)
|
||||
key := sessionKey{
|
||||
addr: from.String(),
|
||||
convID: conv,
|
||||
}
|
||||
var s *UDPSession
|
||||
var ok bool
|
||||
|
||||
@@ -739,11 +748,11 @@ func (l *Listener) monitor() {
|
||||
s, ok = lastSession, true
|
||||
} else if s, ok = l.sessions[key]; ok {
|
||||
lastSession = s
|
||||
lastKey = addr
|
||||
lastKey = key
|
||||
}
|
||||
|
||||
if !ok { // new session
|
||||
if len(l.chAccepts) < cap(l.chAccepts) && len(l.sessions) < 4096 { // do not let new session overwhelm accept queue and connection count
|
||||
if !blacklist.has(from.String(), conv) && len(l.chAccepts) < cap(l.chAccepts) && len(l.sessions) < 4096 { // do not let new session overwhelm accept queue and connection count
|
||||
s := newUDPSession(conv, l.dataShards, l.parityShards, l, l.conn, from, l.block)
|
||||
s.kcpInput(data)
|
||||
l.sessions[key] = s
|
||||
@@ -758,7 +767,7 @@ func (l *Listener) monitor() {
|
||||
xmitBuf.Put(raw)
|
||||
case key := <-l.chSessionClosed:
|
||||
if key == lastKey {
|
||||
lastKey = ""
|
||||
lastKey = sessionKey{}
|
||||
}
|
||||
delete(l.sessions, key)
|
||||
case <-l.die:
|
||||
@@ -856,7 +865,7 @@ func (l *Listener) Close() error {
|
||||
}
|
||||
|
||||
// closeSession notify the listener that a session has closed
|
||||
func (l *Listener) closeSession(key string) bool {
|
||||
func (l *Listener) closeSession(key sessionKey) bool {
|
||||
select {
|
||||
case l.chSessionClosed <- key:
|
||||
return true
|
||||
@@ -890,9 +899,9 @@ func ListenWithOptions(laddr string, block BlockCrypt, dataShards, parityShards
|
||||
func ServeConn(block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*Listener, error) {
|
||||
l := new(Listener)
|
||||
l.conn = conn
|
||||
l.sessions = make(map[string]*UDPSession)
|
||||
l.sessions = make(map[sessionKey]*UDPSession)
|
||||
l.chAccepts = make(chan *UDPSession, acceptBacklog)
|
||||
l.chSessionClosed = make(chan string)
|
||||
l.chSessionClosed = make(chan sessionKey)
|
||||
l.die = make(chan struct{})
|
||||
l.dataShards = dataShards
|
||||
l.parityShards = parityShards
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user