mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-18 18:58:51 -05:00
Compare commits
67 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c953cdc375 | ||
|
|
c20c17e3c6 | ||
|
|
1a1e35d998 | ||
|
|
8d2a31e38e | ||
|
|
fe377a166a | ||
|
|
5770d80bf9 | ||
|
|
7a16dbd31d | ||
|
|
926c88cfc4 | ||
|
|
29d010ec0e | ||
|
|
920274bce4 | ||
|
|
2ebd6ad77f | ||
|
|
79dd6918f2 | ||
|
|
79f7f50c4d | ||
|
|
987718baf8 | ||
|
|
4fb9c143ac | ||
|
|
ec62888539 | ||
|
|
8c34a76f7a | ||
|
|
6809d38cde | ||
|
|
69ae4aa024 | ||
|
|
8e8b867fba | ||
|
|
0a118d2979 | ||
|
|
8daaa5d0d2 | ||
|
|
eb14f85a57 | ||
|
|
c69c3c7c36 | ||
|
|
54911d44c5 | ||
|
|
c765f7be8d | ||
|
|
44bdaf3ac2 | ||
|
|
fc16e49cb0 | ||
|
|
f5a310ad64 | ||
|
|
01e50eb3fa | ||
|
|
bab7c8ebbf | ||
|
|
0725e3af38 | ||
|
|
dd7bb6c4b8 | ||
|
|
d41c131364 | ||
|
|
47f22ff3e5 | ||
|
|
744c2e82b5 | ||
|
|
ead7281c20 | ||
|
|
aa3ef49dd7 | ||
|
|
5c067661f4 | ||
|
|
226da976dc | ||
|
|
ba17cc0a11 | ||
|
|
9e0afb7d8a | ||
|
|
9e7d50bc76 | ||
|
|
d7d5687faa | ||
|
|
21eb098dd2 | ||
|
|
2f770f8bfb | ||
|
|
f09698d845 | ||
|
|
3fbcb024a7 | ||
|
|
b8c1c0e048 | ||
|
|
2d47242d54 | ||
|
|
66a7829eee | ||
|
|
9c67bd2550 | ||
|
|
f67c5a2fd6 | ||
|
|
263402f80a | ||
|
|
920a83ec7a | ||
|
|
3c2ac3522c | ||
|
|
9fdaa637a8 | ||
|
|
81a9d7f2b9 | ||
|
|
d8d3f05164 | ||
|
|
653be136ee | ||
|
|
398c356f22 | ||
|
|
542b76f687 | ||
|
|
abb8a1914a | ||
|
|
163d335078 | ||
|
|
0582836820 | ||
|
|
bb15776ae6 | ||
|
|
dde9d4c9eb |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -6,4 +6,3 @@ vendor/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
*.svg -diff
|
||||
*.pb.go -diff
|
||||
|
||||
6
AUTHORS
6
AUTHORS
@@ -6,7 +6,8 @@
|
||||
# The NICKS list is auto generated from this file.
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (simplypeachy) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
@@ -48,6 +49,7 @@ Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
@@ -62,6 +64,7 @@ Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
@@ -71,6 +74,7 @@ Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
|
||||
10
NICKS
10
NICKS
@@ -4,6 +4,8 @@
|
||||
0x010C <antoine.lamielle@0x010c.fr>
|
||||
0x010C <gh@0x010c.fr>
|
||||
acogdev <jake@acogdev.com>
|
||||
adelq <aqalieh95@gmail.com>
|
||||
adelq <adelq@users.noreply.github.com>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
@@ -63,6 +65,7 @@ kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
kralo <kralo@users.noreply.github.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
Kudalufi <kurt@va1der.ca>
|
||||
kwhite17 <kevinwhite1710@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com>
|
||||
letiemble <laurent.etiemble@monobjc.net>
|
||||
@@ -76,6 +79,7 @@ mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mpx <mark@kyne.com.au>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
norgeous <daniel@harte.me>
|
||||
norgeous <daniel@danielharte.co.uk>
|
||||
@@ -89,6 +93,9 @@ philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
ProactiveServices <aD@simplypeachy.co.uk>
|
||||
ProactiveServices <simplypeachy@users.noreply.github.com>
|
||||
ProactiveServices <ProactiveServices@users.noreply.github.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
@@ -99,8 +106,7 @@ rumpelsepp <rumpelsepp@sevenbyte.org>
|
||||
scienmind <scintertech@cryptolab.net>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
simplypeachy <aD@simplypeachy.co.uk>
|
||||
simplypeachy <simplypeachy@users.noreply.github.com>
|
||||
Smiley73 <heiko@zuerker.org>
|
||||
snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
|
||||
137
build.go
137
build.go
@@ -27,7 +27,6 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
@@ -154,6 +153,40 @@ var targets = map[string]target{
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
// fast linters complete in a fraction of a second and might as well be
|
||||
// run always as part of the build
|
||||
fastLinters = []string{
|
||||
"deadcode",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"vet",
|
||||
}
|
||||
|
||||
// slow linters take several seconds and are run only as part of the
|
||||
// "metalint" command.
|
||||
slowLinters = []string{
|
||||
"gosimple",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"unused",
|
||||
"varcheck",
|
||||
}
|
||||
|
||||
// Which parts of the tree to lint
|
||||
lintDirs = []string{".", "./lib/...", "./cmd/..."}
|
||||
|
||||
// Messages to ignore
|
||||
lintExcludes = []string{
|
||||
".pb.go",
|
||||
"should have comment",
|
||||
"protocol.Vector composite literal uses unkeyed fields",
|
||||
"cli.Requires composite literal uses unkeyed fields",
|
||||
"Use DialContext instead", // Go 1.7
|
||||
"os.SEEK_SET is deprecated", // Go 1.7
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// The "syncthing" target includes a few more files found in the "etc"
|
||||
// and "extra" dirs.
|
||||
@@ -203,8 +236,6 @@ func main() {
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
runCommand("install", targets["all"])
|
||||
runCommand("vet", target{})
|
||||
runCommand("lint", target{})
|
||||
} else {
|
||||
// with any command given but not a target, the target is
|
||||
// "syncthing". So "go run build.go install" is "go run build.go install
|
||||
@@ -242,6 +273,7 @@ func runCommand(cmd string, target target) {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
install(target, tags)
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "build":
|
||||
var tags []string
|
||||
@@ -249,6 +281,7 @@ func runCommand(cmd string, target target) {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
build(target, tags)
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "test":
|
||||
test("./lib/...", "./cmd/...")
|
||||
@@ -284,25 +317,14 @@ func runCommand(cmd string, target target) {
|
||||
clean()
|
||||
|
||||
case "vet":
|
||||
vet("build.go")
|
||||
vet("cmd", "lib")
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "lint":
|
||||
lint(".")
|
||||
lint("./cmd/...")
|
||||
lint("./lib/...")
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "metalint":
|
||||
if isGometalinterInstalled() {
|
||||
dirs := []string{".", "./cmd/...", "./lib/..."}
|
||||
ok := gometalinter("deadcode", dirs, "test/util.go")
|
||||
ok = gometalinter("structcheck", dirs) && ok
|
||||
ok = gometalinter("varcheck", dirs) && ok
|
||||
ok = gometalinter("ineffassign", dirs) && ok
|
||||
if !ok {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
metalint(fastLinters, lintDirs)
|
||||
metalint(slowLinters, lintDirs)
|
||||
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
@@ -364,16 +386,22 @@ func setup() {
|
||||
"github.com/FiloSottile/gvt",
|
||||
"github.com/golang/lint/golint",
|
||||
"github.com/gordonklaus/ineffassign",
|
||||
"github.com/mdempsky/unconvert",
|
||||
"github.com/mitchellh/go-wordwrap",
|
||||
"github.com/opennota/check/cmd/...",
|
||||
"github.com/tsenart/deadcode",
|
||||
"golang.org/x/net/html",
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"honnef.co/go/simple/cmd/gosimple",
|
||||
"honnef.co/go/staticcheck/cmd/staticcheck",
|
||||
"honnef.co/go/unused/cmd/unused",
|
||||
}
|
||||
for _, pkg := range packages {
|
||||
fmt.Println(pkg)
|
||||
runPrint("go", "get", "-u", pkg)
|
||||
}
|
||||
|
||||
runPrint("go", "install", "-v", "./vendor/github.com/gogo/protobuf/protoc-gen-gogofast")
|
||||
}
|
||||
|
||||
func test(pkgs ...string) {
|
||||
@@ -1003,48 +1031,6 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
}
|
||||
|
||||
func vet(dirs ...string) {
|
||||
params := []string{"tool", "vet", "-all"}
|
||||
params = append(params, dirs...)
|
||||
bs, err := runError("go", params...)
|
||||
|
||||
if len(bs) > 0 {
|
||||
log.Printf("%s", bs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if exitStatus(err) == 3 {
|
||||
// Exit code 3, the "vet" tool is not installed
|
||||
return
|
||||
}
|
||||
|
||||
// A genuine error exit from the vet tool.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func lint(pkg string) {
|
||||
bs, err := runError("golint", pkg)
|
||||
if err != nil {
|
||||
log.Println(`- No golint, not linting. Try "go get -u github.com/golang/lint/golint".`)
|
||||
return
|
||||
}
|
||||
|
||||
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if analCommentPolicy.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
}
|
||||
}
|
||||
|
||||
func macosCodesign(file string) {
|
||||
if pass := os.Getenv("CODESIGN_KEYCHAIN_PASS"); pass != "" {
|
||||
bs, err := runError("security", "unlock-keychain", "-p", pass)
|
||||
@@ -1064,14 +1050,16 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func exitStatus(err error) int {
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := err.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
return ws.ExitStatus()
|
||||
func metalint(linters []string, dirs []string) {
|
||||
ok := true
|
||||
if isGometalinterInstalled() {
|
||||
if !gometalinter(linters, dirs, lintExcludes...) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
if !ok {
|
||||
log.Fatal("Build succeeded, but there were lint warnings")
|
||||
}
|
||||
}
|
||||
|
||||
func isGometalinterInstalled() bool {
|
||||
@@ -1082,10 +1070,12 @@ func isGometalinterInstalled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func gometalinter(linter string, dirs []string, excludes ...string) bool {
|
||||
params := []string{"--disable-all"}
|
||||
params = append(params, fmt.Sprintf("--deadline=%ds", 60))
|
||||
params = append(params, "--enable="+linter)
|
||||
func gometalinter(linters []string, dirs []string, excludes ...string) bool {
|
||||
params := []string{"--disable-all", "--concurrency=2", "--deadline=300s"}
|
||||
|
||||
for _, linter := range linters {
|
||||
params = append(params, "--enable="+linter)
|
||||
}
|
||||
|
||||
for _, exclude := range excludes {
|
||||
params = append(params, "--exclude="+exclude)
|
||||
@@ -1098,14 +1088,19 @@ func gometalinter(linter string, dirs []string, excludes ...string) bool {
|
||||
bs, _ := runError("gometalinter", params...)
|
||||
|
||||
nerr := 0
|
||||
lines := make(map[string]struct{})
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
if _, ok := lines[line]; ok {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
if strings.Contains(line, "executable file not found") {
|
||||
log.Println(` - Try "go run build.go setup" to install missing tools`)
|
||||
}
|
||||
lines[line] = struct{}{}
|
||||
nerr++
|
||||
}
|
||||
|
||||
|
||||
19
cmd/stcli/LICENSE
Normal file
19
cmd/stcli/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
10
cmd/stcli/README.md
Normal file
10
cmd/stcli/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
syncthing-cli
|
||||
=============
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing-cli/lastBuild/)
|
||||
|
||||
A CLI that talks to the Syncthing REST interface.
|
||||
|
||||
`go get github.com/syncthing/syncthing-cli`
|
||||
|
||||
Or download the [latest build](http://build.syncthing.net/job/syncthing-cli/lastSuccessfulBuild/artifact/).
|
||||
115
cmd/stcli/client.go
Normal file
115
cmd/stcli/client.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
httpClient http.Client
|
||||
endpoint string
|
||||
apikey string
|
||||
username string
|
||||
password string
|
||||
id string
|
||||
csrf string
|
||||
}
|
||||
|
||||
var instance *APIClient
|
||||
|
||||
func getClient(c *cli.Context) *APIClient {
|
||||
if instance != nil {
|
||||
return instance
|
||||
}
|
||||
endpoint := c.GlobalString("endpoint")
|
||||
if !strings.HasPrefix(endpoint, "http") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: c.GlobalBool("insecure"),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := APIClient{
|
||||
httpClient: httpClient,
|
||||
endpoint: endpoint,
|
||||
apikey: c.GlobalString("apikey"),
|
||||
username: c.GlobalString("username"),
|
||||
password: c.GlobalString("password"),
|
||||
}
|
||||
|
||||
if client.apikey == "" {
|
||||
request, err := http.NewRequest("GET", client.endpoint, nil)
|
||||
die(err)
|
||||
response := client.handleRequest(request)
|
||||
client.id = response.Header.Get("X-Syncthing-ID")
|
||||
if client.id == "" {
|
||||
die("Failed to get device ID")
|
||||
}
|
||||
for _, item := range response.Cookies() {
|
||||
if item.Name == "CSRF-Token-"+client.id[:5] {
|
||||
client.csrf = item.Value
|
||||
goto csrffound
|
||||
}
|
||||
}
|
||||
die("Failed to get CSRF token")
|
||||
csrffound:
|
||||
}
|
||||
instance = &client
|
||||
return &client
|
||||
}
|
||||
|
||||
func (client *APIClient) handleRequest(request *http.Request) *http.Response {
|
||||
if client.apikey != "" {
|
||||
request.Header.Set("X-API-Key", client.apikey)
|
||||
}
|
||||
if client.username != "" || client.password != "" {
|
||||
request.SetBasicAuth(client.username, client.password)
|
||||
}
|
||||
if client.csrf != "" {
|
||||
request.Header.Set("X-CSRF-Token-"+client.id[:5], client.csrf)
|
||||
}
|
||||
|
||||
response, err := client.httpClient.Do(request)
|
||||
die(err)
|
||||
|
||||
if response.StatusCode == 404 {
|
||||
die("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 401 {
|
||||
die("Invalid username or password")
|
||||
} else if response.StatusCode == 403 {
|
||||
if client.apikey == "" {
|
||||
die("Invalid CSRF token")
|
||||
}
|
||||
die("Invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
body := strings.TrimSpace(string(responseToBArray(response)))
|
||||
if body != "" {
|
||||
die(body)
|
||||
}
|
||||
die("Unknown HTTP status returned: " + response.Status)
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func httpGet(c *cli.Context, url string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("GET", client.endpoint+"/rest/"+url, nil)
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
|
||||
func httpPost(c *cli.Context, url string, body string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("POST", client.endpoint+"/rest/"+url, bytes.NewBufferString(body))
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
188
cmd/stcli/cmd_devices.go
Normal file
188
cmd/stcli/cmd_devices.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "devices",
|
||||
HideHelp: true,
|
||||
Usage: "Device command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List registered devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: devicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new device",
|
||||
Requires: &cli.Requires{"device id", "device name?"},
|
||||
Action: devicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing device",
|
||||
Requires: &cli.Requires{"device id"},
|
||||
Action: devicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property"},
|
||||
Action: devicesGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property", "value..."},
|
||||
Action: devicesSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func devicesList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, device := range cfg.Devices {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", device.DeviceID, "\t")
|
||||
fmt.Fprintln(writer, "Name:\t", device.Name, "\t(name)")
|
||||
fmt.Fprintln(writer, "Address:\t", strings.Join(device.Addresses, " "), "\t(address)")
|
||||
fmt.Fprintln(writer, "Compression:\t", device.Compression, "\t(compression)")
|
||||
fmt.Fprintln(writer, "Certificate name:\t", device.CertName, "\t(certname)")
|
||||
fmt.Fprintln(writer, "Introducer:\t", device.Introducer, "\t(introducer)")
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func devicesAdd(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
|
||||
newDevice := config.DeviceConfiguration{
|
||||
DeviceID: id,
|
||||
Name: nid,
|
||||
Addresses: []string{"dynamic"},
|
||||
}
|
||||
|
||||
if len(c.Args()) > 1 {
|
||||
newDevice.Name = c.Args()[1]
|
||||
}
|
||||
|
||||
if len(c.Args()) > 2 {
|
||||
addresses := c.Args()[2:]
|
||||
for _, item := range addresses {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
newDevice.Addresses = addresses
|
||||
}
|
||||
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
die("Device " + nid + " already exists")
|
||||
}
|
||||
}
|
||||
cfg.Devices = append(cfg.Devices, newDevice)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func devicesRemove(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
if nid == getMyID(c) {
|
||||
die("Cannot remove yourself")
|
||||
}
|
||||
cfg := getConfig(c)
|
||||
for i, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
last := len(cfg.Devices) - 1
|
||||
cfg.Devices[i] = cfg.Devices[last]
|
||||
cfg.Devices = cfg.Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesGet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
fmt.Println(device.Name)
|
||||
case "address":
|
||||
fmt.Println(strings.Join(device.Addresses, "\n"))
|
||||
case "compression":
|
||||
fmt.Println(device.Compression.String())
|
||||
case "certname":
|
||||
fmt.Println(device.CertName)
|
||||
case "introducer":
|
||||
fmt.Println(device.Introducer)
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesSet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
config := getConfig(c)
|
||||
for i, device := range config.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
config.Devices[i].Name = strings.Join(c.Args()[2:], " ")
|
||||
case "address":
|
||||
for _, item := range c.Args()[2:] {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
config.Devices[i].Addresses = c.Args()[2:]
|
||||
case "compression":
|
||||
err := config.Devices[i].Compression.UnmarshalText([]byte(c.Args()[2]))
|
||||
die(err)
|
||||
case "certname":
|
||||
config.Devices[i].CertName = strings.Join(c.Args()[2:], " ")
|
||||
case "introducer":
|
||||
config.Devices[i].Introducer = parseBool(c.Args()[2])
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
setConfig(c, config)
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
67
cmd/stcli/cmd_errors.go
Normal file
67
cmd/stcli/cmd_errors.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "errors",
|
||||
HideHelp: true,
|
||||
Usage: "Error command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: errorsShow,
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
Requires: &cli.Requires{"error message..."},
|
||||
Action: errorsPush,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Clear pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/error/clear"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func errorsShow(c *cli.Context) {
|
||||
response := httpGet(c, "system/error")
|
||||
var data map[string][]map[string]interface{}
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
writer := newTableWriter()
|
||||
for _, item := range data["errors"] {
|
||||
time := item["time"].(string)[:19]
|
||||
time = strings.Replace(time, "T", " ", 1)
|
||||
err := item["error"].(string)
|
||||
err = strings.TrimSpace(err)
|
||||
fmt.Fprintln(writer, time+":\t"+err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) {
|
||||
err := strings.Join(c.Args(), " ")
|
||||
response := httpPost(c, "system/error", strings.TrimSpace(err))
|
||||
if response.StatusCode != 200 {
|
||||
err = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
}
|
||||
351
cmd/stcli/cmd_folders.go
Normal file
351
cmd/stcli/cmd_folders.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "folders",
|
||||
HideHelp: true,
|
||||
Usage: "Folder command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List available folders",
|
||||
Requires: &cli.Requires{},
|
||||
Action: foldersList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new folder",
|
||||
Requires: &cli.Requires{"folder id", "directory"},
|
||||
Action: foldersAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersRemove,
|
||||
},
|
||||
{
|
||||
Name: "override",
|
||||
Usage: "Override changes from other nodes for a master folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersOverride,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property", "value..."},
|
||||
Action: foldersSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersUnset,
|
||||
},
|
||||
{
|
||||
Name: "devices",
|
||||
Usage: "Folder devices command group",
|
||||
HideHelp: true,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List of devices which the folder is shared with",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Share a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Unshare a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Unshare a folder with all devices",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesClear,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func foldersList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, folder := range cfg.Folders {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", folder.ID, "\t")
|
||||
fmt.Fprintln(writer, "Path:\t", folder.RawPath, "\t(directory)")
|
||||
fmt.Fprintln(writer, "Folder type:\t", folder.Type, "\t(type)")
|
||||
fmt.Fprintln(writer, "Ignore permissions:\t", folder.IgnorePerms, "\t(permissions)")
|
||||
fmt.Fprintln(writer, "Rescan interval in seconds:\t", folder.RescanIntervalS, "\t(rescan)")
|
||||
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Fprintln(writer, "Versioning:\t", folder.Versioning.Type, "\t(versioning)")
|
||||
for key, value := range folder.Versioning.Params {
|
||||
fmt.Fprintf(writer, "Versioning %s:\t %s \t(versioning-%s)\n", key, value, key)
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func foldersAdd(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
abs, err := filepath.Abs(c.Args()[1])
|
||||
die(err)
|
||||
folder := config.FolderConfiguration{
|
||||
ID: c.Args()[0],
|
||||
RawPath: filepath.Clean(abs),
|
||||
}
|
||||
cfg.Folders = append(cfg.Folders, folder)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func foldersRemove(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID == rid {
|
||||
last := len(cfg.Folders) - 1
|
||||
cfg.Folders[i] = cfg.Folders[last]
|
||||
cfg.Folders = cfg.Folders[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID == rid && folder.Type == config.FolderTypeSendOnly {
|
||||
response := httpPost(c, "db/override", "")
|
||||
if response.StatusCode != 200 {
|
||||
err := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found or folder not master")
|
||||
}
|
||||
|
||||
func foldersGet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
value, ok := folder.Versioning.Params[arg]
|
||||
if ok {
|
||||
fmt.Println(value)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
fmt.Println(folder.RawPath)
|
||||
case "type":
|
||||
fmt.Println(folder.Type)
|
||||
case "permissions":
|
||||
fmt.Println(folder.IgnorePerms)
|
||||
case "rescan":
|
||||
fmt.Println(folder.RescanIntervalS)
|
||||
case "versioning":
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Println(folder.Versioning.Type)
|
||||
}
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, type, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersSet(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
val := strings.Join(c.Args()[2:], " ")
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
cfg.Folders[i].Versioning.Params[arg[11:]] = val
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
cfg.Folders[i].RawPath = val
|
||||
case "type":
|
||||
var t config.FolderType
|
||||
if err := t.UnmarshalText([]byte(val)); err != nil {
|
||||
die("Invalid folder type: " + err.Error())
|
||||
}
|
||||
cfg.Folders[i].Type = t
|
||||
case "permissions":
|
||||
cfg.Folders[i].IgnorePerms = parseBool(val)
|
||||
case "rescan":
|
||||
cfg.Folders[i].RescanIntervalS = parseInt(val)
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = val
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, master, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersUnset(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
if _, ok := folder.Versioning.Params[arg]; ok {
|
||||
delete(cfg.Folders[i].Versioning.Params, arg)
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = ""
|
||||
cfg.Folders[i].Versioning.Params = make(map[string]string)
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesList(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
fmt.Println(device.DeviceID)
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesAdd(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
die("Device " + c.Args()[1] + " is already part of this folder")
|
||||
}
|
||||
}
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == nid {
|
||||
cfg.Folders[i].Devices = append(folder.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: device.DeviceID,
|
||||
})
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found in device list")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesRemove(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for ri, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for ni, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
last := len(folder.Devices) - 1
|
||||
cfg.Folders[ri].Devices[ni] = folder.Devices[last]
|
||||
cfg.Folders[ri].Devices = cfg.Folders[ri].Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesClear(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
cfg.Folders[i].Devices = []config.FolderDeviceConfiguration{}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
78
cmd/stcli/cmd_general.go
Normal file
78
cmd/stcli/cmd_general.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, []cli.Command{
|
||||
{
|
||||
Name: "id",
|
||||
Usage: "Get ID of the Syncthing client",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalID,
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Configuration status, whether or not a restart is required for changes to take effect",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalStatus,
|
||||
},
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/restart"),
|
||||
},
|
||||
{
|
||||
Name: "shutdown",
|
||||
Usage: "Shutdown syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/shutdown"),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/reset"),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/upgrade"),
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Syncthing client version",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalVersion,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
func generalID(c *cli.Context) {
|
||||
fmt.Println(getMyID(c))
|
||||
}
|
||||
|
||||
func generalStatus(c *cli.Context) {
|
||||
response := httpGet(c, "system/config/insync")
|
||||
var status struct{ ConfigInSync bool }
|
||||
json.Unmarshal(responseToBArray(response), &status)
|
||||
if !status.ConfigInSync {
|
||||
die("Config out of sync")
|
||||
}
|
||||
fmt.Println("Config in sync")
|
||||
}
|
||||
|
||||
func generalVersion(c *cli.Context) {
|
||||
response := httpGet(c, "system/version")
|
||||
version := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &version)
|
||||
prettyPrintJSON(version)
|
||||
}
|
||||
127
cmd/stcli/cmd_gui.go
Normal file
127
cmd/stcli/cmd_gui.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "gui",
|
||||
HideHelp: true,
|
||||
Usage: "GUI command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all GUI configuration settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: guiDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting", "value"},
|
||||
Action: guiSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiUnset,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func guiDump(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
writer := newTableWriter()
|
||||
fmt.Fprintln(writer, "Enabled:\t", cfg.Enabled, "\t(enabled)")
|
||||
fmt.Fprintln(writer, "Use HTTPS:\t", cfg.UseTLS(), "\t(tls)")
|
||||
fmt.Fprintln(writer, "Listen Addresses:\t", cfg.Address(), "\t(address)")
|
||||
if cfg.User != "" {
|
||||
fmt.Fprintln(writer, "Authentication User:\t", cfg.User, "\t(username)")
|
||||
fmt.Fprintln(writer, "Authentication Password:\t", cfg.Password, "\t(password)")
|
||||
}
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Fprintln(writer, "API Key:\t", cfg.APIKey, "\t(apikey)")
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func guiGet(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
fmt.Println(cfg.Enabled)
|
||||
case "tls":
|
||||
fmt.Println(cfg.UseTLS())
|
||||
case "address":
|
||||
fmt.Println(cfg.Address())
|
||||
case "user":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.User)
|
||||
}
|
||||
case "password":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.Password)
|
||||
}
|
||||
case "apikey":
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Println(cfg.APIKey)
|
||||
}
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
}
|
||||
|
||||
func guiSet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
cfg.GUI.Enabled = parseBool(val)
|
||||
case "tls":
|
||||
cfg.GUI.RawUseTLS = parseBool(val)
|
||||
case "address":
|
||||
validAddress(val)
|
||||
cfg.GUI.RawAddress = val
|
||||
case "user":
|
||||
cfg.GUI.User = val
|
||||
case "password":
|
||||
cfg.GUI.Password = val
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = val
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func guiUnset(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "user":
|
||||
cfg.GUI.User = ""
|
||||
case "password":
|
||||
cfg.GUI.Password = ""
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = ""
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
173
cmd/stcli/cmd_options.go
Normal file
173
cmd/stcli/cmd_options.go
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "options",
|
||||
HideHelp: true,
|
||||
Usage: "Options command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all Syncthing option settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: optionsDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: optionsGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting", "value..."},
|
||||
Action: optionsSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func optionsDump(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
writer := newTableWriter()
|
||||
|
||||
fmt.Fprintln(writer, "Sync protocol listen addresses:\t", strings.Join(cfg.ListenAddresses, " "), "\t(addresses)")
|
||||
fmt.Fprintln(writer, "Global discovery enabled:\t", cfg.GlobalAnnEnabled, "\t(globalannenabled)")
|
||||
fmt.Fprintln(writer, "Global discovery servers:\t", strings.Join(cfg.GlobalAnnServers, " "), "\t(globalannserver)")
|
||||
|
||||
fmt.Fprintln(writer, "Local discovery enabled:\t", cfg.LocalAnnEnabled, "\t(localannenabled)")
|
||||
fmt.Fprintln(writer, "Local discovery port:\t", cfg.LocalAnnPort, "\t(localannport)")
|
||||
|
||||
fmt.Fprintln(writer, "Outgoing rate limit in KiB/s:\t", cfg.MaxSendKbps, "\t(maxsend)")
|
||||
fmt.Fprintln(writer, "Incoming rate limit in KiB/s:\t", cfg.MaxRecvKbps, "\t(maxrecv)")
|
||||
fmt.Fprintln(writer, "Reconnect interval in seconds:\t", cfg.ReconnectIntervalS, "\t(reconnect)")
|
||||
fmt.Fprintln(writer, "Start browser:\t", cfg.StartBrowser, "\t(browser)")
|
||||
fmt.Fprintln(writer, "Enable UPnP:\t", cfg.NATEnabled, "\t(nat)")
|
||||
fmt.Fprintln(writer, "UPnP Lease in minutes:\t", cfg.NATLeaseM, "\t(natlease)")
|
||||
fmt.Fprintln(writer, "UPnP Renewal period in minutes:\t", cfg.NATRenewalM, "\t(natrenew)")
|
||||
fmt.Fprintln(writer, "Restart on Wake Up:\t", cfg.RestartOnWakeup, "\t(wake)")
|
||||
|
||||
reporting := "unrecognized value"
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
reporting = "false"
|
||||
case 0:
|
||||
reporting = "undecided/false"
|
||||
case 1:
|
||||
reporting = "true"
|
||||
}
|
||||
fmt.Fprintln(writer, "Anonymous usage reporting:\t", reporting, "\t(reporting)")
|
||||
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func optionsGet(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
fmt.Println(strings.Join(cfg.ListenAddresses, "\n"))
|
||||
case "globalannenabled":
|
||||
fmt.Println(cfg.GlobalAnnEnabled)
|
||||
case "globalannservers":
|
||||
fmt.Println(strings.Join(cfg.GlobalAnnServers, "\n"))
|
||||
case "localannenabled":
|
||||
fmt.Println(cfg.LocalAnnEnabled)
|
||||
case "localannport":
|
||||
fmt.Println(cfg.LocalAnnPort)
|
||||
case "maxsend":
|
||||
fmt.Println(cfg.MaxSendKbps)
|
||||
case "maxrecv":
|
||||
fmt.Println(cfg.MaxRecvKbps)
|
||||
case "reconnect":
|
||||
fmt.Println(cfg.ReconnectIntervalS)
|
||||
case "browser":
|
||||
fmt.Println(cfg.StartBrowser)
|
||||
case "nat":
|
||||
fmt.Println(cfg.NATEnabled)
|
||||
case "natlease":
|
||||
fmt.Println(cfg.NATLeaseM)
|
||||
case "natrenew":
|
||||
fmt.Println(cfg.NATRenewalM)
|
||||
case "reporting":
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
fmt.Println("false")
|
||||
case 0:
|
||||
fmt.Println("undecided/false")
|
||||
case 1:
|
||||
fmt.Println("true")
|
||||
default:
|
||||
fmt.Println("unknown")
|
||||
}
|
||||
case "wake":
|
||||
fmt.Println(cfg.RestartOnWakeup)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
}
|
||||
|
||||
func optionsSet(c *cli.Context) {
|
||||
config := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.ListenAddresses = c.Args().Tail()
|
||||
case "globalannenabled":
|
||||
config.Options.GlobalAnnEnabled = parseBool(val)
|
||||
case "globalannserver":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.GlobalAnnServers = c.Args().Tail()
|
||||
case "localannenabled":
|
||||
config.Options.LocalAnnEnabled = parseBool(val)
|
||||
case "localannport":
|
||||
config.Options.LocalAnnPort = parsePort(val)
|
||||
case "maxsend":
|
||||
config.Options.MaxSendKbps = parseUint(val)
|
||||
case "maxrecv":
|
||||
config.Options.MaxRecvKbps = parseUint(val)
|
||||
case "reconnect":
|
||||
config.Options.ReconnectIntervalS = parseUint(val)
|
||||
case "browser":
|
||||
config.Options.StartBrowser = parseBool(val)
|
||||
case "nat":
|
||||
config.Options.NATEnabled = parseBool(val)
|
||||
case "natlease":
|
||||
config.Options.NATLeaseM = parseUint(val)
|
||||
case "natrenew":
|
||||
config.Options.NATRenewalM = parseUint(val)
|
||||
case "reporting":
|
||||
switch strings.ToLower(val) {
|
||||
case "u", "undecided", "unset":
|
||||
config.Options.URAccepted = 0
|
||||
default:
|
||||
boolvalue := parseBool(val)
|
||||
if boolvalue {
|
||||
config.Options.URAccepted = 1
|
||||
} else {
|
||||
config.Options.URAccepted = -1
|
||||
}
|
||||
}
|
||||
case "wake":
|
||||
config.Options.RestartOnWakeup = parseBool(val)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
setConfig(c, config)
|
||||
}
|
||||
72
cmd/stcli/cmd_report.go
Normal file
72
cmd/stcli/cmd_report.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "report",
|
||||
HideHelp: true,
|
||||
Usage: "Reporting command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Report system state",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportSystem,
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportConnections,
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Usage report",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportUsage,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func reportSystem(c *cli.Context) {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func reportConnections(c *cli.Context) {
|
||||
response := httpGet(c, "system/connections")
|
||||
data := make(map[string]map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
var overall map[string]interface{}
|
||||
for key, value := range data {
|
||||
if key == "total" {
|
||||
overall = value
|
||||
continue
|
||||
}
|
||||
value["Device ID"] = key
|
||||
prettyPrintJSON(value)
|
||||
fmt.Println()
|
||||
}
|
||||
if overall != nil {
|
||||
fmt.Println("=== Overall statistics ===")
|
||||
prettyPrintJSON(overall)
|
||||
}
|
||||
}
|
||||
|
||||
func reportUsage(c *cli.Context) {
|
||||
response := httpGet(c, "svc/report")
|
||||
report := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &report)
|
||||
prettyPrintJSON(report)
|
||||
}
|
||||
31
cmd/stcli/labels.go
Normal file
31
cmd/stcli/labels.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
var jsonAttributeLabels = map[string]string{
|
||||
"folderMaxMiB": "Largest folder size in MiB",
|
||||
"folderMaxFiles": "Largest folder file count",
|
||||
"longVersion": "Long version",
|
||||
"totMiB": "Total size in MiB",
|
||||
"totFiles": "Total files",
|
||||
"uniqueID": "Unique ID",
|
||||
"numFolders": "Folder count",
|
||||
"numDevices": "Device count",
|
||||
"memoryUsageMiB": "Memory usage in MiB",
|
||||
"memorySize": "Total memory in MiB",
|
||||
"sha256Perf": "SHA256 Benchmark",
|
||||
"At": "Last contacted",
|
||||
"Completion": "Percent complete",
|
||||
"InBytesTotal": "Total bytes received",
|
||||
"OutBytesTotal": "Total bytes sent",
|
||||
"ClientVersion": "Client version",
|
||||
"alloc": "Memory allocated in bytes",
|
||||
"sys": "Memory using in bytes",
|
||||
"cpuPercent": "CPU load in percent",
|
||||
"extAnnounceOK": "External announcments working",
|
||||
"goroutines": "Number of Go routines",
|
||||
"myID": "Client ID",
|
||||
"tilde": "Tilde expands to",
|
||||
"arch": "Architecture",
|
||||
"os": "OS",
|
||||
}
|
||||
63
cmd/stcli/main.go
Normal file
63
cmd/stcli/main.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type ByAlphabet []cli.Command
|
||||
|
||||
func (a ByAlphabet) Len() int { return len(a) }
|
||||
func (a ByAlphabet) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByAlphabet) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
|
||||
var cliCommands []cli.Command
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "syncthing-cli"
|
||||
app.Author = "Audrius Butkevičius"
|
||||
app.Email = "audrius.butkevicius@gmail.com"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = "0.1"
|
||||
app.HideHelp = true
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "endpoint, e",
|
||||
Value: "http://127.0.0.1:8384",
|
||||
Usage: "End point to connect to",
|
||||
EnvVar: "STENDPOINT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "apikey, k",
|
||||
Value: "",
|
||||
Usage: "API Key",
|
||||
EnvVar: "STAPIKEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "username, u",
|
||||
Value: "",
|
||||
Usage: "Username",
|
||||
EnvVar: "STUSERNAME",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "password, p",
|
||||
Value: "",
|
||||
Usage: "Password",
|
||||
EnvVar: "STPASSWORD",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure, i",
|
||||
Usage: "Do not verify SSL certificate",
|
||||
EnvVar: "STINSECURE",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(ByAlphabet(cliCommands))
|
||||
app.Commands = cliCommands
|
||||
app.RunAndExitOnError()
|
||||
}
|
||||
165
cmd/stcli/utils.go
Normal file
165
cmd/stcli/utils.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"unicode"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) []byte {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func die(vals ...interface{}) {
|
||||
if len(vals) > 1 || vals[0] != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintln(vals...))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedHTTPPost(url string) func(c *cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
httpPost(c, url, "")
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(json map[string]interface{}) {
|
||||
writer := newTableWriter()
|
||||
remap := make(map[string]interface{})
|
||||
for k, v := range json {
|
||||
key, ok := jsonAttributeLabels[k]
|
||||
if !ok {
|
||||
key = firstUpper(k)
|
||||
}
|
||||
remap[key] = v
|
||||
}
|
||||
|
||||
jsonKeys := make([]string, 0, len(remap))
|
||||
for key := range remap {
|
||||
jsonKeys = append(jsonKeys, key)
|
||||
}
|
||||
sort.Strings(jsonKeys)
|
||||
for _, k := range jsonKeys {
|
||||
var value string
|
||||
rvalue := remap[k]
|
||||
switch rvalue.(type) {
|
||||
case int, int16, int32, int64, uint, uint16, uint32, uint64, float32, float64:
|
||||
value = fmt.Sprintf("%.0f", rvalue)
|
||||
default:
|
||||
value = fmt.Sprint(rvalue)
|
||||
}
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(writer, k+":\t"+value)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func firstUpper(str string) string {
|
||||
for i, v := range str {
|
||||
return string(unicode.ToUpper(v)) + str[i+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newTableWriter() *tabwriter.Writer {
|
||||
writer := new(tabwriter.Writer)
|
||||
writer.Init(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
return writer
|
||||
}
|
||||
|
||||
func getMyID(c *cli.Context) string {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
return data["myID"].(string)
|
||||
}
|
||||
|
||||
func getConfig(c *cli.Context) config.Configuration {
|
||||
response := httpGet(c, "system/config")
|
||||
config := config.Configuration{}
|
||||
json.Unmarshal(responseToBArray(response), &config)
|
||||
return config
|
||||
}
|
||||
|
||||
func setConfig(c *cli.Context, cfg config.Configuration) {
|
||||
body, err := json.Marshal(cfg)
|
||||
die(err)
|
||||
response := httpPost(c, "system/config", string(body))
|
||||
if response.StatusCode != 200 {
|
||||
die("Unexpected status code", response.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func parseBool(input string) bool {
|
||||
val, err := strconv.ParseBool(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for a boolean")
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func parseInt(input string) int {
|
||||
val, err := strconv.ParseInt(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parseUint(input string) int {
|
||||
val, err := strconv.ParseUint(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an unsigned integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parsePort(input string) int {
|
||||
port := parseUint(input)
|
||||
if port < 1 || port > 65535 {
|
||||
die(input + " is not a valid port\nExpected value between 1 and 65535")
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func validAddress(input string) {
|
||||
tokens := strings.Split(input, ":")
|
||||
if len(tokens) != 2 {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
matched, err := regexp.MatchString("^[a-zA-Z0-9]+([-a-zA-Z0-9.]+[-a-zA-Z0-9]+)?$", tokens[0])
|
||||
die(err)
|
||||
if !matched {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
parsePort(tokens[1])
|
||||
}
|
||||
|
||||
func parseDeviceID(input string) protocol.DeviceID {
|
||||
device, err := protocol.DeviceIDFromString(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid device id")
|
||||
}
|
||||
return device
|
||||
}
|
||||
@@ -38,3 +38,7 @@ In all cases, the appropriate tables and indexes will be created at first
|
||||
startup. If it doesn't exit with an error, you're fine.
|
||||
|
||||
See `stdiscosrv -help` for other options.
|
||||
|
||||
##### Third-party attribution
|
||||
|
||||
[cznic/lldb](https://github.com/cznic/lldb), Copyright (C) 2014 The lldb Authors.
|
||||
|
||||
@@ -19,9 +19,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type querysrv struct {
|
||||
@@ -373,14 +373,14 @@ func (s *querysrv) limit(remote net.IP) bool {
|
||||
|
||||
bkt, ok := s.limiter.Get(key)
|
||||
if ok {
|
||||
bkt := bkt.(*ratelimit.Bucket)
|
||||
if bkt.TakeAvailable(1) != 1 {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit exceeded; ignore packet
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// One packet per ten seconds average rate, burst ten packets
|
||||
s.limiter.Add(key, ratelimit.NewBucket(10*time.Second/time.Duration(limitAvg), int64(limitBurst)))
|
||||
// limitAvg is in packets per ten seconds.
|
||||
s.limiter.Add(key, rate.NewLimiter(rate.Limit(limitAvg)/10, limitBurst))
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
@@ -66,7 +66,7 @@ func generateFiles(dir string, files, maxexp int, srcname string) error {
|
||||
}
|
||||
|
||||
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
src := io.LimitReader(&inifiteReader{fd}, int64(s))
|
||||
src := io.LimitReader(&inifiteReader{fd}, s)
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,12 +85,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
err = os.Chtimes(p1, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return os.Chtimes(p1, t, t)
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
|
||||
@@ -13,3 +13,9 @@ If you still want to run it, you can run `go get github.com/syncthing/relaypools
|
||||
from the build server.
|
||||
|
||||
See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
##### Third-party attributions
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
|
||||
@@ -23,14 +23,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/juju/ratelimit"
|
||||
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type location struct {
|
||||
@@ -65,12 +63,12 @@ var (
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst int64 = 10
|
||||
getLimitAvg = 1
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst int64 = 2
|
||||
postLimitAvg = 1
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 1
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 1
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
@@ -99,10 +97,10 @@ func main() {
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", 2, "Allowed average get request rate, per 10 s")
|
||||
flag.Int64Var(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", 2, "Allowed average post request rate, per minute")
|
||||
flag.Int64Var(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.IntVar(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
@@ -249,13 +247,13 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, int64(getLimitBurst)) {
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, int64(postLimitBurst)) {
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
@@ -446,7 +444,7 @@ func evict(relay relay) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, rate time.Duration, burst int64) bool {
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -456,14 +454,14 @@ func limit(addr string, cache *lru.Cache, lock sync.RWMutex, rate time.Duration,
|
||||
bkt, ok := cache.Get(host)
|
||||
lock.RUnlock()
|
||||
if ok {
|
||||
bkt := bkt.(*ratelimit.Bucket)
|
||||
if bkt.TakeAvailable(1) != 1 {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
lock.Lock()
|
||||
cache.Add(host, ratelimit.NewBucket(rate, burst))
|
||||
cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
|
||||
lock.Unlock()
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -12,17 +12,15 @@ from the build server.
|
||||
:exclamation:Warnings:exclamation: - Read or regret
|
||||
-----
|
||||
|
||||
By default, all relay servers will join the default public relay pool, which means that the relay server will be availble for public use, and **will consume your bandwidth** helping others to connect.
|
||||
By default, all relay servers will join to the default public relay pool, which means that the relay server will be availble for public use, and **will consume your bandwidth** helping others to connect.
|
||||
|
||||
If you wish to disable this behaviour, please specify `-pools=""` argument.
|
||||
If you wish to disable this behaviour, please specify the `-pools=""` argument.
|
||||
|
||||
Please note that `strelaysrv` is only usable by `syncthing` **version v0.12 and onwards**.
|
||||
|
||||
To run `strelaysrv` you need to have port 22067 available to the internet, which means you might need to allow it through your firewall if you **have a public IP, or setup a port-forwarding** (22067 to 22067) if you are behind a router.
|
||||
To run `strelaysrv` you need to have port 22067 available to the internet, which means you might need to port forward it and/or allow it through your firewall.
|
||||
|
||||
Furthermore, **by default strelaysrv will also expose a /status HTTP endpoint on port 22070**, which is used by the pool servers to peek at metrics of the strelaysrv, such as what are the current transfer rates, how many clients are connected, etc, etc. If you wish this information to be available, similarlly you might want to allow it through your firewall, or port-forward it (22070 to 22070) on your NAT device.
|
||||
|
||||
This is **not mandatory** for the strelaysrv to function, and is used only to gather metrics and present them in the overview page of the pool server, displaying stats about the specific relay.
|
||||
Furthermore, by default `strelaysrv` will also expose a /status HTTP endpoint on port 22070, which is used by the pool servers to read metrics of the `strelaysrv`, such as the current transfer rates, how many clients are connected, etc. If you wish this information to be available you may need to port forward and allow it through your firewall. This is not mandatory for the `strelaysrv` to function, and is used only to gather metrics and present them in the overview page of the pool server.
|
||||
|
||||
At the point of writing the endpoint output looks as follows:
|
||||
|
||||
@@ -66,7 +64,7 @@ If you wish to disable the /status endpoint, provide `-status-srv=""` as one of
|
||||
|
||||
Running for public use
|
||||
----
|
||||
Make sure you have a public IP with port 22067 open, or make sure you have port-forwarding (22067 to 22067) if you are behind a router.
|
||||
Make sure you have a public IP with port 22067 open, or have forwarded port 22067 if you are behind a NAT.
|
||||
|
||||
Run the `strelaysrv` with no arguments (or `-debug` if you want more output), and that should be enough for the server to join the public relay pool.
|
||||
You should see a message saying:
|
||||
@@ -79,37 +77,37 @@ See `strelaysrv -help` for other options, such as rate limits, timeout intervals
|
||||
Running for private use
|
||||
-----
|
||||
|
||||
Once you've started the `strelaysrv`, it will generate a key pair and print an URI:
|
||||
Once you've started the `strelaysrv`, it will generate a key pair and print a URI:
|
||||
```bash
|
||||
relay://:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
This URI contains partial address of the relay server, as well as it's options which in the future may be taken into account when choosing the best suitable relay out of multiple available.
|
||||
This URI contains a partial address of the relay server, as well as its options which in the future may be taken into account when choosing the most suitable relay.
|
||||
|
||||
Because `-listen` option was not used, the `strelaysrv` does not know it's external IP, therefore you should replace the host part of the URI with your public IP address on which the `strelaysrv` will be available:
|
||||
Because the `-listen` option was not used `strelaysrv` does not know its external IP, therefore you should replace the host part of the URI with your public IP address on which the `strelaysrv` will be available:
|
||||
|
||||
```bash
|
||||
relay://123.123.123.123:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
relay://192.0.2.1:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
If you do not care about certificate pinning (improved security) or do not care about passing verbose settings to the clients, you can shorten the URL to just the host part:
|
||||
|
||||
```bash
|
||||
relay://123.123.123.123:22067
|
||||
relay://192.0.2.1:22067
|
||||
```
|
||||
|
||||
This URI can then be used in `syncthing` as one of the relay servers.
|
||||
This URI can then be used in `syncthing` clients as one of the relay servers by adding the URI to the "Sync Protocol Listen Address" field, under Actions and Settings.
|
||||
|
||||
See `strelaysrv -help` for other options, such as rate limits, timeout intervals, etc.
|
||||
|
||||
Other items available in this repo
|
||||
----
|
||||
##### testutil
|
||||
A test utility which can be used to test connectivity of a relay server.
|
||||
You need to generate two x509 key pairs (key.pem and cert.pem), one for the client, another one for the server, in separate directories.
|
||||
A test utility which can be used to test the connectivity of a relay server.
|
||||
You need to generate two x509 key pairs (key.pem and cert.pem), one for the client and one for the server, in separate directories.
|
||||
Afterwards, start the client:
|
||||
```bash
|
||||
./testutil -relay="relay://uri.of.relay" -keys=certs/client/ -join
|
||||
./testutil -relay="relay://192.0.2.1:22067" -keys=certs/client/ -join
|
||||
```
|
||||
|
||||
This prints out the client ID:
|
||||
@@ -120,7 +118,7 @@ This prints out the client ID:
|
||||
In the other terminal run the following:
|
||||
|
||||
```bash
|
||||
./testutil -relay="relay://uri.of.relay" -keys=certs/server/ -connect=BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
./testutil -relay="relay://192.0.2.1:22067" -keys=certs/server/ -connect=BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
```
|
||||
|
||||
Which should then give you an interactive prompt, where you can type things in one terminal, and they get relayed to the other terminal.
|
||||
@@ -137,5 +135,3 @@ Relay related libraries used by this repo
|
||||
Only used by the testutil.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/client)
|
||||
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/nat"
|
||||
@@ -54,7 +54,6 @@ func init() {
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
proto string
|
||||
|
||||
sessionAddress []byte
|
||||
sessionPort uint16
|
||||
@@ -69,8 +68,8 @@ var (
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
descriptorLimit int64
|
||||
sessionLimiter *ratelimit.Bucket
|
||||
globalLimiter *ratelimit.Bucket
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
|
||||
statusAddr string
|
||||
poolAddrs string
|
||||
@@ -216,10 +215,10 @@ func main() {
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = ratelimit.NewBucketWithRate(float64(sessionLimitBps), int64(2*sessionLimitBps))
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = ratelimit.NewBucketWithRate(float64(globalLimitBps), int64(2*globalLimitBps))
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
|
||||
if statusAddr != "" {
|
||||
|
||||
@@ -7,15 +7,16 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,7 +27,7 @@ var (
|
||||
bytesProxied int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *ratelimit.Bucket) *session {
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
@@ -108,7 +109,7 @@ type session struct {
|
||||
clientkey []byte
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int64)
|
||||
rateLimit func(bytes int)
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
@@ -216,7 +217,7 @@ done:
|
||||
func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.serverid[:],
|
||||
Key: []byte(s.clientkey),
|
||||
Key: s.clientkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: false,
|
||||
@@ -226,7 +227,7 @@ func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
func (s *session) GetServerInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.clientid[:],
|
||||
Key: []byte(s.serverkey),
|
||||
Key: s.serverkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: true,
|
||||
@@ -268,7 +269,7 @@ func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
}
|
||||
|
||||
if s.rateLimit != nil {
|
||||
s.rateLimit(int64(n))
|
||||
s.rateLimit(n)
|
||||
}
|
||||
|
||||
c2.SetWriteDeadline(time.Now().Add(networkTimeout))
|
||||
@@ -283,7 +284,7 @@ func (s *session) String() string {
|
||||
return fmt.Sprintf("<%s/%s>", hex.EncodeToString(s.clientkey)[:5], hex.EncodeToString(s.serverkey)[:5])
|
||||
}
|
||||
|
||||
func makeRateLimitFunc(sessionRateLimit, globalRateLimit *ratelimit.Bucket) func(int64) {
|
||||
func makeRateLimitFunc(sessionRateLimit, globalRateLimit *rate.Limiter) func(int) {
|
||||
// This may be a case of super duper premature optimization... We build an
|
||||
// optimized function to do the rate limiting here based on what we need
|
||||
// to do and then use it in the loop.
|
||||
@@ -298,29 +299,55 @@ func makeRateLimitFunc(sessionRateLimit, globalRateLimit *ratelimit.Bucket) func
|
||||
|
||||
if sessionRateLimit == nil {
|
||||
// We only have a global limiter
|
||||
return func(bytes int64) {
|
||||
globalRateLimit.Wait(bytes)
|
||||
return func(bytes int) {
|
||||
take(bytes, globalRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
if globalRateLimit == nil {
|
||||
// We only have a session limiter
|
||||
return func(bytes int64) {
|
||||
sessionRateLimit.Wait(bytes)
|
||||
return func(bytes int) {
|
||||
take(bytes, sessionRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// We have both. Queue the bytes on both the global and session specific
|
||||
// rate limiters. Wait for both in parallell, so that the actual send
|
||||
// happens when both conditions are satisfied. In practice this just means
|
||||
// wait the longer of the two times.
|
||||
return func(bytes int64) {
|
||||
t0 := sessionRateLimit.Take(bytes)
|
||||
t1 := globalRateLimit.Take(bytes)
|
||||
if t0 > t1 {
|
||||
time.Sleep(t0)
|
||||
} else {
|
||||
time.Sleep(t1)
|
||||
}
|
||||
// rate limiters.
|
||||
return func(bytes int) {
|
||||
take(bytes, sessionRateLimit, globalRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// take is a utility function to consume tokens from a set of rate.Limiters.
|
||||
// Tokens are consumed in parallel on all limiters, respecting their
|
||||
// individual burst sizes.
|
||||
func take(tokens int, ls ...*rate.Limiter) {
|
||||
// minBurst is the smallest burst size supported by all limiters.
|
||||
minBurst := int(math.MaxInt32)
|
||||
for _, l := range ls {
|
||||
if burst := l.Burst(); burst < minBurst {
|
||||
minBurst = burst
|
||||
}
|
||||
}
|
||||
|
||||
for tokens > 0 {
|
||||
// chunk is how many tokens we can consume at a time
|
||||
chunk := tokens
|
||||
if chunk > minBurst {
|
||||
chunk = minBurst
|
||||
}
|
||||
|
||||
// maxDelay is the longest delay mandated by any of the limiters for
|
||||
// the chosen chunk size.
|
||||
var maxDelay time.Duration
|
||||
for _, l := range ls {
|
||||
res := l.ReserveN(time.Now(), chunk)
|
||||
if del := res.Delay(); del > maxDelay {
|
||||
maxDelay = del
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(maxDelay)
|
||||
tokens -= chunk
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
status["goMaxProcs"] = runtime.GOMAXPROCS(-1)
|
||||
status["goNumRoutine"] = runtime.NumGoroutine()
|
||||
status["kbps10s1m5m15m30m60m"] = []int64{
|
||||
rc.rate(10/10) * 8 / 1000,
|
||||
rc.rate(1) * 8 / 1000, // each interval is 10s
|
||||
rc.rate(60/10) * 8 / 1000,
|
||||
rc.rate(5*60/10) * 8 / 1000,
|
||||
rc.rate(15*60/10) * 8 / 1000,
|
||||
|
||||
@@ -82,8 +82,6 @@ type modelIntf interface {
|
||||
Availability(folder, file string, version protocol.Vector, block protocol.BlockInfo) []model.Availability
|
||||
GetIgnores(folder string) ([]string, []string, error)
|
||||
SetIgnores(folder string, content []string) error
|
||||
PauseDevice(device protocol.DeviceID)
|
||||
ResumeDevice(device protocol.DeviceID)
|
||||
DelayScan(folder string, next time.Duration)
|
||||
ScanFolder(folder string) error
|
||||
ScanFolders() map[string]error
|
||||
@@ -105,6 +103,7 @@ type configIntf interface {
|
||||
Subscribe(c config.Committer)
|
||||
Folders() map[string]config.FolderConfiguration
|
||||
Devices() map[protocol.DeviceID]config.DeviceConfiguration
|
||||
SetDevice(config.DeviceConfiguration) error
|
||||
Save() error
|
||||
ListenAddresses() []string
|
||||
RequiresRestart() bool
|
||||
@@ -258,21 +257,21 @@ func (s *apiService) Serve() {
|
||||
|
||||
// The POST handlers
|
||||
postRestMux := http.NewServeMux()
|
||||
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
||||
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
||||
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
||||
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
||||
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
||||
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
||||
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
||||
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
||||
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
||||
postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device
|
||||
postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
||||
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
||||
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
||||
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
||||
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
||||
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
||||
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
||||
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
||||
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
||||
postRestMux.HandleFunc("/rest/system/pause", s.makeDevicePauseHandler(true)) // device
|
||||
postRestMux.HandleFunc("/rest/system/resume", s.makeDevicePauseHandler(false)) // device
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
|
||||
// Debug endpoints, not for general use
|
||||
debugMux := http.NewServeMux()
|
||||
@@ -327,8 +326,10 @@ func (s *apiService) Serve() {
|
||||
handler = debugMiddleware(handler)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
Handler: handler,
|
||||
// ReadTimeout must be longer than SyncthingController $scope.refresh
|
||||
// interval to avoid HTTP keepalive/GUI refresh race.
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
s.fss = newFolderSummaryService(s.cfg, s.model)
|
||||
@@ -381,10 +382,8 @@ func (s *apiService) String() string {
|
||||
}
|
||||
|
||||
func (s *apiService) VerifyConfiguration(from, to config.Configuration) error {
|
||||
if _, err := net.ResolveTCPAddr("tcp", to.GUI.Address()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
_, err := net.ResolveTCPAddr("tcp", to.GUI.Address())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *apiService) CommitConfiguration(from, to config.Configuration) bool {
|
||||
@@ -1105,30 +1104,27 @@ func (s *apiService) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) postSystemPause(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
func (s *apiService) makeDevicePauseHandler(paused bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, ok := s.cfg.Devices()[device]
|
||||
if !ok {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
cfg.Paused = paused
|
||||
if err := s.cfg.SetDevice(cfg); err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
}
|
||||
}
|
||||
|
||||
s.model.PauseDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiService) postSystemResume(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
s.model.ResumeDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiService) postDBScan(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
@@ -140,11 +141,11 @@ show time only (2).
|
||||
Development Settings
|
||||
--------------------
|
||||
|
||||
The following environment variables modify syncthing's behavior in ways that
|
||||
The following environment variables modify Syncthing's behavior in ways that
|
||||
are mostly useful for developers. Use with care.
|
||||
|
||||
STNODEFAULTFOLDER Don't create a default folder when starting for the first
|
||||
time. This variable will be ignored anytime after the first
|
||||
time. This variable will be ignored anytime after the first
|
||||
run.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in
|
||||
@@ -153,8 +154,8 @@ are mostly useful for developers. Use with care.
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings listed below.
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start
|
||||
the profiler with HTTP access.
|
||||
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
|
||||
@@ -167,6 +168,20 @@ are mostly useful for developers. Use with care.
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
|
||||
STDEADLOCK Used for debugging internal deadlocks. Use only under
|
||||
direction of a developer.
|
||||
|
||||
STDEADLOCKTIMEOUT Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STDEADLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STNORESTART Equivalent to the -no-restart argument. Disable the
|
||||
Syncthing monitor process which handles restarts for some
|
||||
configuration changes, upgrades, crashes and also log file
|
||||
writing (stdout is still written).
|
||||
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
@@ -179,7 +194,7 @@ are mostly useful for developers. Use with care.
|
||||
|
||||
GOGC Percentage of heap growth at which to trigger GC. Default is
|
||||
100. Lower numbers keep peak memory usage down, at the price
|
||||
of CPU usage (ie. performance).
|
||||
of CPU usage (i.e. performance).
|
||||
|
||||
|
||||
Debugging Facilities
|
||||
@@ -199,7 +214,8 @@ var (
|
||||
|
||||
type RuntimeOptions struct {
|
||||
confDir string
|
||||
reset bool
|
||||
resetDatabase bool
|
||||
resetDeltaIdxs bool
|
||||
showVersion bool
|
||||
showPaths bool
|
||||
doUpgrade bool
|
||||
@@ -210,8 +226,10 @@ type RuntimeOptions struct {
|
||||
hideConsole bool
|
||||
logFile string
|
||||
auditEnabled bool
|
||||
auditFile string
|
||||
verbose bool
|
||||
paused bool
|
||||
unpaused bool
|
||||
guiAddress string
|
||||
guiAPIKey string
|
||||
generateDir string
|
||||
@@ -258,8 +276,9 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.IntVar(&options.logFlags, "logflags", options.logFlags, "Select information in log line prefix (see below)")
|
||||
flag.BoolVar(&options.noBrowser, "no-browser", false, "Do not start browser")
|
||||
flag.BoolVar(&options.browserOnly, "browser-only", false, "Open GUI in browser")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Do not restart; just exit")
|
||||
flag.BoolVar(&options.reset, "reset", false, "Reset the database")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Disable monitor process, managed restarts and log file writing")
|
||||
flag.BoolVar(&options.resetDatabase, "reset-database", false, "Reset the database, forcing a full rescan and resync")
|
||||
flag.BoolVar(&options.resetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
|
||||
flag.BoolVar(&options.doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&options.showVersion, "version", false, "Show version")
|
||||
@@ -267,8 +286,10 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&options.verbose, "verbose", false, "Print verbose log output")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices paused")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices and folders paused")
|
||||
flag.BoolVar(&options.unpaused, "unpaused", false, "Start with all devices and folders unpaused")
|
||||
flag.StringVar(&options.logFile, "logfile", options.logFile, "Log file name (use \"-\" for stdout)")
|
||||
flag.StringVar(&options.auditFile, "auditfile", options.auditFile, "Specify audit file (use \"-\" for stdout, \"--\" for stderr)")
|
||||
if runtime.GOOS == "windows" {
|
||||
// Allow user to hide the console window
|
||||
flag.BoolVar(&options.hideConsole, "no-console", false, "Hide console window")
|
||||
@@ -367,7 +388,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if options.reset {
|
||||
if options.resetDatabase {
|
||||
resetDB()
|
||||
return
|
||||
}
|
||||
@@ -541,7 +562,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.SetPrefix("[start] ")
|
||||
|
||||
if runtimeOptions.auditEnabled {
|
||||
startAuditing(mainService)
|
||||
startAuditing(mainService, runtimeOptions.auditFile)
|
||||
}
|
||||
|
||||
if runtimeOptions.verbose {
|
||||
@@ -554,8 +575,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// Event subscription for the API; must start early to catch the early
|
||||
// events. The LocalChangeDetected event might overwhelm the event
|
||||
// receiver in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected), 1000)
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected&^events.RemoteChangeDetected), 1000)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected|events.RemoteChangeDetected), 1000)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -620,6 +641,10 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
0xCCA8, // TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
0xCCA9, // TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
@@ -629,9 +654,6 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
},
|
||||
}
|
||||
|
||||
// If the read or write rate should be limited, set up a rate limiter for it.
|
||||
// This will be used on connections created in the connect and listen routines.
|
||||
|
||||
opts := cfg.Options()
|
||||
|
||||
if !opts.SymlinksEnabled {
|
||||
@@ -663,6 +685,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
if runtimeOptions.resetDeltaIdxs {
|
||||
l.Infoln("Reinitializing delta index IDs")
|
||||
ldb.DropDeltaIndexIDs()
|
||||
}
|
||||
|
||||
protectedFiles := []string{
|
||||
locations[locDatabase],
|
||||
locations[locConfigFile],
|
||||
@@ -697,14 +724,17 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
m.StartDeadlockDetector(20 * time.Minute)
|
||||
}
|
||||
|
||||
if runtimeOptions.paused {
|
||||
for device := range cfg.Devices() {
|
||||
m.PauseDevice(device)
|
||||
}
|
||||
if runtimeOptions.unpaused {
|
||||
setPauseState(cfg, false)
|
||||
} else if runtimeOptions.paused {
|
||||
setPauseState(cfg, true)
|
||||
}
|
||||
|
||||
// Add and start folders
|
||||
for _, folderCfg := range cfg.Folders() {
|
||||
if folderCfg.Paused {
|
||||
continue
|
||||
}
|
||||
m.AddFolder(folderCfg)
|
||||
m.StartFolder(folderCfg.ID)
|
||||
}
|
||||
@@ -915,11 +945,31 @@ func copyFile(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func startAuditing(mainService *suture.Supervisor) {
|
||||
auditFile := timestampedLoc(locAuditLog)
|
||||
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
l.Fatalln("Audit:", err)
|
||||
func startAuditing(mainService *suture.Supervisor, auditFile string) {
|
||||
|
||||
var fd io.Writer
|
||||
var err error
|
||||
var auditDest string
|
||||
var auditFlags int
|
||||
|
||||
if auditFile == "-" {
|
||||
fd = os.Stdout
|
||||
auditDest = "stdout"
|
||||
} else if auditFile == "--" {
|
||||
fd = os.Stderr
|
||||
auditDest = "stderr"
|
||||
} else {
|
||||
if auditFile == "" {
|
||||
auditFile = timestampedLoc(locAuditLog)
|
||||
auditFlags = os.O_WRONLY | os.O_CREATE | os.O_EXCL
|
||||
} else {
|
||||
auditFlags = os.O_WRONLY | os.O_CREATE | os.O_APPEND
|
||||
}
|
||||
fd, err = os.OpenFile(auditFile, auditFlags, 0600)
|
||||
if err != nil {
|
||||
l.Fatalln("Audit:", err)
|
||||
}
|
||||
auditDest = auditFile
|
||||
}
|
||||
|
||||
auditService := newAuditService(fd)
|
||||
@@ -929,7 +979,7 @@ func startAuditing(mainService *suture.Supervisor) {
|
||||
// ensure we capture all events from the start.
|
||||
auditService.WaitForStart()
|
||||
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
l.Infoln("Audit log in", auditDest)
|
||||
}
|
||||
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
@@ -1066,7 +1116,7 @@ func getFreePort(host string, ports ...int) (int, error) {
|
||||
}
|
||||
|
||||
func standbyMonitor() {
|
||||
restartDelay := time.Duration(60 * time.Second)
|
||||
restartDelay := 60 * time.Second
|
||||
now := time.Now()
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -1199,3 +1249,16 @@ func showPaths() {
|
||||
fmt.Printf("GUI override directory:\n\t%s\n\n", locations[locGUIAssets])
|
||||
fmt.Printf("Default sync folder directory:\n\t%s\n\n", locations[locDefFolder])
|
||||
}
|
||||
|
||||
func setPauseState(cfg *config.Wrapper, paused bool) {
|
||||
raw := cfg.RawCopy()
|
||||
for i := range raw.Devices {
|
||||
raw.Devices[i].Paused = paused
|
||||
}
|
||||
for i := range raw.Folders {
|
||||
raw.Folders[i].Paused = paused
|
||||
}
|
||||
if err := cfg.Replace(raw); err != nil {
|
||||
l.Fatalln("Cannot adjust paused state:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ func (c *mockedConfig) Devices() map[protocol.DeviceID]config.DeviceConfiguratio
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) SetDevice(config.DeviceConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -202,8 +202,24 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
*********************************************************************************
|
||||
* Crash due to corrupt database. *
|
||||
* *
|
||||
* This crash usually occurs due to one of the following reasons: *
|
||||
* - Syncthing being stopped abruptly (killed/loss of power) *
|
||||
* - Bad hardware (memory/disk issues) *
|
||||
* - Software that affects disk writes (SSD caching software and simillar) *
|
||||
* *
|
||||
* Please see the following URL for instructions on how to recover: *
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
*********************************************************************************
|
||||
`)
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
|
||||
@@ -134,7 +134,7 @@ func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
for _, cfg := range cfg.Folders() {
|
||||
rescanIntvs = append(rescanIntvs, cfg.RescanIntervalS)
|
||||
|
||||
if cfg.Type == config.FolderTypeReadOnly {
|
||||
if cfg.Type == config.FolderTypeSendOnly {
|
||||
folderUses["readonly"]++
|
||||
}
|
||||
if cfg.IgnorePerms {
|
||||
|
||||
@@ -94,9 +94,12 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
|
||||
case events.LocalChangeDetected:
|
||||
data := ev.Data.(map[string]string)
|
||||
// Local change detected in folder "foo": modified file /Users/jb/whatever
|
||||
return fmt.Sprintf("Local change detected in folder %q: %s %s %s", data["folder"], data["action"], data["type"], data["path"])
|
||||
|
||||
case events.RemoteChangeDetected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Remote change detected in folder %q: %s %s %s", data["folder"], data["action"], data["type"], data["path"])
|
||||
|
||||
case events.RemoteIndexUpdated:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Device %v sent an index update for %q with %d items", data["device"], data["folder"], data["items"])
|
||||
@@ -151,7 +154,7 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
if total > 0 {
|
||||
pct = 100 * current / total
|
||||
}
|
||||
return fmt.Sprintf("Scanning folder %q, %d%% done (%.01f MB/s)", folder, pct, rate)
|
||||
return fmt.Sprintf("Scanning folder %q, %d%% done (%.01f MiB/s)", folder, pct, rate)
|
||||
|
||||
case events.DevicePaused:
|
||||
data := ev.Data.(map[string]string)
|
||||
@@ -163,6 +166,18 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was resumed", device)
|
||||
|
||||
case events.FolderPaused:
|
||||
data := ev.Data.(map[string]string)
|
||||
id := data["id"]
|
||||
label := data["label"]
|
||||
return fmt.Sprintf("Folder %v (%v) was paused", id, label)
|
||||
|
||||
case events.FolderResumed:
|
||||
data := ev.Data.(map[string]string)
|
||||
id := data["id"]
|
||||
label := data["label"]
|
||||
return fmt.Sprintf("Folder %v (%v) was resumed", id, label)
|
||||
|
||||
case events.ListenAddressesChanged:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
address := data["address"]
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentar, quant s'utilitza al principi d'una línia",
|
||||
"Compression": "Compresió",
|
||||
"Configured": "Configured",
|
||||
"Configured": "Configurat",
|
||||
"Connection Error": "Error de connexió",
|
||||
"Connection Type": "Tipus de connexió",
|
||||
"Copied from elsewhere": "Copiat de qualsevol lloc",
|
||||
@@ -45,15 +45,15 @@
|
||||
"Device Name": "Nom del dispositiu",
|
||||
"Devices": "Dispositius",
|
||||
"Disconnected": "Desconnectat",
|
||||
"Discovered": "Discovered",
|
||||
"Discovered": "Descobert",
|
||||
"Discovery": "Descobriment",
|
||||
"Documentation": "Documentació",
|
||||
"Download Rate": "Velocitat de descàrrega",
|
||||
"Downloaded": "Descarregat",
|
||||
"Downloading": "Descarregant",
|
||||
"Edit": "Editar",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Editar Dispositiu",
|
||||
"Edit Folder": "Editar Carpeta",
|
||||
"Editing": "Editant",
|
||||
"Enable NAT traversal": "Permetre NAT transversal",
|
||||
"Enable Relaying": "Permetre Transmissions",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Últim escaneig",
|
||||
"Last seen": "Vist per última vegada",
|
||||
"Later": "Més tard",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Últim Canvi",
|
||||
"Listeners": "Escoltants",
|
||||
"Local Discovery": "Descobriment local",
|
||||
"Local State": "Estat local",
|
||||
@@ -138,7 +138,7 @@
|
||||
"Quick guide to supported patterns": "Guía ràpida de patrons suportats",
|
||||
"RAM Utilization": "Utilització de la RAM",
|
||||
"Random": "Aleatori",
|
||||
"Reduced by ignore patterns": "Reduced by ignore patterns",
|
||||
"Reduced by ignore patterns": "Reduït ignorant patrons",
|
||||
"Release Notes": "Notes de la versió",
|
||||
"Remote Devices": "Dispositius Remots",
|
||||
"Remove": "Eliminar",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Rastrejant",
|
||||
"Select the devices to share this folder with.": "Selecciona els dispositius amb els que compartir aquesta carpeta.",
|
||||
"Select the folders to share with this device.": "Selecciona les carpetes per a compartir amb aquest dispositiu.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Enviar i Rebre",
|
||||
"Send Only": "Enviar Solament",
|
||||
"Settings": "Ajustos",
|
||||
"Share": "Compartir",
|
||||
"Share Folder": "Compartir carpeta",
|
||||
@@ -236,8 +236,8 @@
|
||||
"Yes": "Sí",
|
||||
"You must keep at least one version.": "Es deu mantindre al menys una versió.",
|
||||
"days": "dies",
|
||||
"directories": "directories",
|
||||
"files": "files",
|
||||
"directories": "directoris",
|
||||
"files": "arxius",
|
||||
"full documentation": "Documentació completa",
|
||||
"items": "Elements",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartit la carpeta \"{{folder}}\".",
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"Actions": "Aktionen",
|
||||
"Add": "Hinzufügen",
|
||||
"Add Device": "Gerät hinzufügen",
|
||||
"Add Folder": "Verzeichnis hinzufügen",
|
||||
"Add Remote Device": "Fern-Gerät hinzufügen",
|
||||
"Add new folder?": "Neues Verzeichnis hinzufügen?",
|
||||
"Add Folder": "Ordner hinzufügen",
|
||||
"Add Remote Device": "Gerät hinzufügen",
|
||||
"Add new folder?": "Neuen Ordner hinzufügen?",
|
||||
"Address": "Adresse",
|
||||
"Addresses": "Adressen",
|
||||
"Advanced": "Erweitert",
|
||||
@@ -18,7 +18,7 @@
|
||||
"All Data": "Alle Daten",
|
||||
"Allow Anonymous Usage Reporting?": "Übertragung von anonymen Nutzungsberichten erlauben?",
|
||||
"Alphabetic": "Alphabetisch",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Verzeichnis entfernen.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Ordner entfernen.",
|
||||
"Anonymous Usage Reporting": "Anonymer Nutzungsbericht",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Alle Geräte, die beim Verteiler eingetragen sind, werden auch bei diesem Gerät eingetragen",
|
||||
"Automatic upgrades": "Automatische Updates aktivieren",
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "Heruntergeladen",
|
||||
"Downloading": "Lädt herunter",
|
||||
"Edit": "Bearbeiten",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Gerät bearbeiten",
|
||||
"Edit Folder": "Ordner bearbeiten",
|
||||
"Editing": "Bearbeitet",
|
||||
"Enable NAT traversal": "NAT-Durchdringung aktivieren",
|
||||
"Enable Relaying": "Weiterleitung aktivieren",
|
||||
@@ -65,15 +65,15 @@
|
||||
"File Pull Order": "Dateiübertragungsreihenfolge",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen zu verwenden.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Wenn Syncthing Dateien ersetzt oder löscht, werden sie in das .stversions Verzeichnis verschoben.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, datiert in das Verzeichnis .stversions verschoben.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Wenn Syncthing Dateien ersetzt oder löscht, werden sie in den Ordner .stversions verschoben.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, datiert in den Ordner .stversions verschoben.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind auf diesem Gerät schreibgeschützt. Auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
|
||||
"Folder": "Verzeichnis",
|
||||
"Folder ID": "Verzeichniskennung",
|
||||
"Folder Label": "Verzeichnisbezeichnung",
|
||||
"Folder Path": "Verzeichnispfad",
|
||||
"Folder Type": "Verzeichnistyp",
|
||||
"Folders": "Verzeichnisse",
|
||||
"Folder": "Ordner",
|
||||
"Folder ID": "Ordnerkennung",
|
||||
"Folder Label": "Ordnerbezeichnung",
|
||||
"Folder Path": "Ordnerpfad",
|
||||
"Folder Type": "Ordnertyp",
|
||||
"Folders": "Ordner",
|
||||
"GUI": "GUI",
|
||||
"GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche",
|
||||
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche",
|
||||
@@ -88,7 +88,7 @@
|
||||
"Ignore Patterns": "Ignoriermuster",
|
||||
"Ignore Permissions": "Berechtigungen ignorieren",
|
||||
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Eine falsche Konfiguration kann den Verzeichnisinhalt beschädigen und Syncthing in einen unausführbaren Zustand versetzen.",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Eine falsche Konfiguration kann den Ordnerinhalt beschädigen und Syncthing in einen unausführbaren Zustand versetzen.",
|
||||
"Introducer": "Verteilergerät",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (z.B. schließe nicht aus)",
|
||||
"Keep Versions": "Versionen erhalten",
|
||||
@@ -108,10 +108,10 @@
|
||||
"Metadata Only": "Nur Metadaten",
|
||||
"Minimum Free Disk Space": "Minimal freier Festplattenspeicher",
|
||||
"Move to top of queue": "An den Anfang der Warteschlange setzen",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Verschachteltes Maskenzeichen (wird für verschachtelte Verzeichnisse verwendet)",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Verschachteltes Maskenzeichen (wird für verschachtelte Ordner verwendet)",
|
||||
"Never": "Nie",
|
||||
"New Device": "Neues Gerät",
|
||||
"New Folder": "Neues Verzeichnis",
|
||||
"New Folder": "Neuer Ordner",
|
||||
"Newest First": "Neueste zuerst",
|
||||
"No": "Nein",
|
||||
"No File Versioning": "Keine Dateiversionierung",
|
||||
@@ -120,14 +120,14 @@
|
||||
"OK": "OK",
|
||||
"Off": "Aus",
|
||||
"Oldest First": "Älteste zuerst",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optionale beschreibende Bezeichnung des Verzeichnisses. Kann auf jedem Gerät unterschiedlich sein.",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optionale beschreibende Bezeichnung des Ordners. Kann auf jedem Gerät unterschiedlich sein.",
|
||||
"Options": "Optionen",
|
||||
"Out of Sync": "Nicht synchronisiert",
|
||||
"Out of Sync Items": "Nicht synchronisierte Objekte",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limit Datenrate (ausgehend) (KB/s)",
|
||||
"Override Changes": "Änderungen überschreiben",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Verzeichnis auf dem lokalen Gerät. Verzeichnis wird erzeugt, wenn es nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem alte Dateiversionen gespeichert werden sollen (ohne Angabe wird das Verzeichnis .stversions im Verzeichnis verwendet).",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Ordner auf dem lokalen Gerät. Ordner wird erzeugt, wenn er nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem alte Dateiversionen gespeichert werden sollen (ohne Angabe wird der Ordner .stversions im Ordner verwendet).",
|
||||
"Pause": "Pause",
|
||||
"Paused": "Pausiert",
|
||||
"Please consult the release notes before performing a major upgrade.": "Bitte lesen Sie die Veröffentlichungsnotizen bevor Sie eine neue Hauptversion installieren.",
|
||||
@@ -142,7 +142,7 @@
|
||||
"Release Notes": "Veröffentlichungsnotizen",
|
||||
"Remote Devices": "Fern-Geräte",
|
||||
"Remove": "Entfernen",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Erforderlicher Bezeichner für das Verzeichnis. Muss auf allen Verbund-Geräten gleich sein.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Erforderlicher Bezeichner für den Ordner. Muss auf allen Verbund-Geräten gleich sein.",
|
||||
"Rescan": "Neu scannen",
|
||||
"Rescan All": "Alle neu scannen",
|
||||
"Rescan Interval": "Scanintervall",
|
||||
@@ -154,25 +154,25 @@
|
||||
"Save": "Speichern",
|
||||
"Scan Time Remaining": "Zeit für Scan verbleibend",
|
||||
"Scanning": "Scannen",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du dieses Verzeichnis teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Verzeichnisse aus, die du mit diesem Gerät teilen möchtest",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du diesen Ordner teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Ordner aus, die Du mit diesem Gerät teilen möchtest",
|
||||
"Send & Receive": "Senden & empfangen",
|
||||
"Send Only": "Nur senden",
|
||||
"Settings": "Einstellungen",
|
||||
"Share": "Teilen",
|
||||
"Share Folder": "Teile Verzeichnis",
|
||||
"Share Folders With Device": "Teile Verzeichnisse mit diesem Gerät",
|
||||
"Share Folder": "Ordner teilen",
|
||||
"Share Folders With Device": "Ordner mit diesem Gerät teilen",
|
||||
"Share With Devices": "Teile mit diesen Geräten",
|
||||
"Share this folder?": "Dieses Verzeichnis teilen?",
|
||||
"Share this folder?": "Diesen Ordner teilen?",
|
||||
"Shared With": "Geteilt mit",
|
||||
"Show ID": "Kennung anzeigen",
|
||||
"Show ID": "Eigene Kennung",
|
||||
"Show QR": "Zeige QR Code",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Wird anstatt der Gerätekennung im Verbund-Status angezeigt. Wird als optionaler Standardname an andere Geräte bekannt gegeben.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Wird auf diesem Gerät als Gerätename angezeigt und an die anderen Geräte im Geräte-Verbund weitergegeben. Wenn kein Gerätename anegegeben wird, wird der Name des entfernten Gerätes genommen.",
|
||||
"Shutdown": "Herunterfahren",
|
||||
"Shutdown Complete": "Vollständig Heruntergefahren",
|
||||
"Simple File Versioning": "Einfache Dateiversionierung",
|
||||
"Single level wildcard (matches within a directory only)": "Einzelnes Maskenzeichen (wird für ein einzelnes Verzeichnis verwendet)",
|
||||
"Single level wildcard (matches within a directory only)": "Einzelnes Maskenzeichen (wird für einen einzelnen Ordner verwendet)",
|
||||
"Smallest First": "Kleinstes zuerst",
|
||||
"Source Code": "Quellcode",
|
||||
"Staggered File Versioning": "Stufenweise Dateiversionierung",
|
||||
@@ -193,12 +193,12 @@
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber noch nicht aktiviert. Syncthing muss neugestartet werden, um die neue Konfiguration zu übernehmen.",
|
||||
"The device ID cannot be blank.": "Die Gerätekennung darf nicht leer sein.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Die hier einzutragende Gerätekennung kann im Dialog \"Aktionen > Kennung anzeigen\" auf dem anderen Gerät gefunden werden. Leerzeichen und Bindestriche sind optional (werden ignoriert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird verwendet, um Statistiken über verwendete Betriebssysteme, Verzeichnis-Größen und Programm-Versionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird verwendet, um Statistiken über verwendete Betriebssysteme, Ordnergrößen und Programmversionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Gerätekennung scheint nicht gültig zu sein. Es sollte eine 52 oder 56 stellige Zeichenkette aus Buchstaben und Nummern sein. Leerzeichen und Bindestriche sind optional.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Der erste Kommandozeilenparameter ist der Verzeichnis-Pfad und der zweite Parameter ist der relative Pfad in diesem Verzeichnis.",
|
||||
"The folder ID cannot be blank.": "Die Verzeichniskennung darf nicht leer sein.",
|
||||
"The folder ID must be unique.": "Die Verzeichniskennung muss eindeutig sein.",
|
||||
"The folder path cannot be blank.": "Der Verzeichnispfad darf nicht leer sein.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Der erste Kommandozeilenparameter ist der Ordnerpfad und der zweite Parameter ist der relative Pfad in diesem Ordner.",
|
||||
"The folder ID cannot be blank.": "Die Ordnerkennung darf nicht leer sein.",
|
||||
"The folder ID must be unique.": "Die Ordnerkennung muss eindeutig sein.",
|
||||
"The folder path cannot be blank.": "Der Ordnerpfad darf nicht leer sein.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es wird in folgenden Abständen versioniert: In der ersten Stunde wird alle 30 Sekunden eine Version behalten, am ersten Tag eine jede Stunde, in den ersten 30 Tagen eine jeden Tag. Danach wird bis zum angegebenen Höchstalter eine Version pro Woche behalten.",
|
||||
"The following items could not be synchronized.": "Die folgenden Objekte konnten nicht synchronisiert werden.",
|
||||
"The maximum age must be a number and cannot be blank.": "Das Höchstalter muss angegeben werden und eine Zahl sein.",
|
||||
@@ -230,16 +230,16 @@
|
||||
"Version": "Version",
|
||||
"Versions Path": "Versionierungspfad",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Alte Dateiversionen werden automatisch gelöscht, wenn sie älter als das angegebene Höchstalter sind oder die angegebene Höchstzahl an Dateien erreicht ist.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warnung, dieser Pfad ist ein Unterverzeichnis des existierenden Verzeichnisses \"{{otherFolder}}\".",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warnung, dieser Pfad ist ein Unterordner des existierenden Ordners \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Beachte beim Hinzufügen eines neuen Gerätes, dass dieses Gerät auch auf den anderen Geräten hinzugefügt werden muss.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachte bitte beim Hinzufügen eines neuen Verzeichnisses, dass die Verzeichniskennung dazu verwendet wird, Verzeichnisse zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachte bitte beim Hinzufügen eines neuen Ordners, dass die Ordnerkennung dazu verwendet wird, Ordner zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"Yes": "Ja",
|
||||
"You must keep at least one version.": "Du musst mindestens eine Version behalten.",
|
||||
"days": "Tage",
|
||||
"directories": "Verzeichnisse",
|
||||
"directories": "Ordner",
|
||||
"files": "Dateien",
|
||||
"full documentation": "Komplette Dokumentation",
|
||||
"items": "Objekte",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} möchte das Verzeichnis \"{{folder}}\" teilen.",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} möchte das Verzeichnis \"{{folderlabel}}\" ({{folder}}) teilen."
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} möchte den Ordner \"{{folder}}\" teilen.",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} möchte den Ordner \"{{folderlabel}}\" ({{folder}}) teilen."
|
||||
}
|
||||
@@ -88,7 +88,7 @@
|
||||
"Ignore Patterns": "Πρότυπο για αγνόηση",
|
||||
"Ignore Permissions": "Αγνόησε τα δικαιώματα",
|
||||
"Incoming Rate Limit (KiB/s)": "Περιορισμός ταχύτητας λήψης (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Με μια εσφαλμένη ρύθμιση μπορεί να προκληθεί ζημιά στα περιεχόμενα των φακέλων και το Syncthing ενδέχεται να σταματήσει να λειτουργεί.",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Με μια εσφαλμένη ρύθμιση μπορεί να προκληθεί ζημιά στα περιεχόμενα των φακέλων και το Syncthing να σταματήσει να λειτουργεί.",
|
||||
"Introducer": "Βασικός κόμβος",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Αντιστροφή της δοσμένης συνθήκης (π.χ. να μην εξαιρείς) ",
|
||||
"Keep Versions": "Διατήρηση εκδόσεων",
|
||||
|
||||
@@ -52,6 +52,8 @@
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Edit",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Editing": "Editing",
|
||||
"Enable NAT traversal": "Enable NAT traversal",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
@@ -95,6 +97,7 @@
|
||||
"Last Scan": "Last Scan",
|
||||
"Last seen": "Last seen",
|
||||
"Later": "Later",
|
||||
"Latest Change": "Latest Change",
|
||||
"Listeners": "Listeners",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
@@ -153,6 +156,8 @@
|
||||
"Scanning": "Scanning",
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Send \u0026 Receive": "Send \u0026 Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Settings": "Settings",
|
||||
"Share": "Share",
|
||||
"Share Folder": "Share Folder",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Command": "Acción",
|
||||
"Comment, when used at the start of a line": "Comentar, cuando se usa al comienzo de una línea",
|
||||
"Compression": "Compresión",
|
||||
"Configured": "Configured",
|
||||
"Configured": "Configurado",
|
||||
"Connection Error": "Error de conexión",
|
||||
"Connection Type": "Tipo de conexión",
|
||||
"Copied from elsewhere": "Copiado de otro sitio",
|
||||
@@ -45,15 +45,15 @@
|
||||
"Device Name": "Nombre del Dispositivo",
|
||||
"Devices": "Dispositivos",
|
||||
"Disconnected": "Desconectado",
|
||||
"Discovered": "Discovered",
|
||||
"Discovered": "Descubierto",
|
||||
"Discovery": "Descubrimiento",
|
||||
"Documentation": "Documentación",
|
||||
"Download Rate": "Velocidad de descarga",
|
||||
"Downloaded": "Descargado",
|
||||
"Downloading": "Descargando",
|
||||
"Edit": "Editar",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Editar Dispositivo",
|
||||
"Edit Folder": "Editar Carpeta",
|
||||
"Editing": "Editando",
|
||||
"Enable NAT traversal": "Permitir NAT transversal",
|
||||
"Enable Relaying": "Habilitar Retransmisión",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Último escaneo",
|
||||
"Last seen": "Visto por última vez",
|
||||
"Later": "Más tarde",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Último Cambio",
|
||||
"Listeners": "Oyentes",
|
||||
"Local Discovery": "Descubrimiento local",
|
||||
"Local State": "Estado local",
|
||||
@@ -138,7 +138,7 @@
|
||||
"Quick guide to supported patterns": "Guía rápida de patrones soportados",
|
||||
"RAM Utilization": "Uso de RAM",
|
||||
"Random": "Aleatorio",
|
||||
"Reduced by ignore patterns": "Reduced by ignore patterns",
|
||||
"Reduced by ignore patterns": "Reducido por patrones de ignorar",
|
||||
"Release Notes": "Notas de la versión",
|
||||
"Remote Devices": "Otros dispositivos",
|
||||
"Remove": "Eliminar",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Analizando",
|
||||
"Select the devices to share this folder with.": "Selecciona los dispositivos con los que compartir esta carpeta.",
|
||||
"Select the folders to share with this device.": "Selecciona las carpetas para compartir con este dispositivo.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Enviar y Recibir",
|
||||
"Send Only": "Solo Enviar",
|
||||
"Settings": "Ajustes",
|
||||
"Share": "Compartir",
|
||||
"Share Folder": "Compartir carpeta",
|
||||
@@ -236,8 +236,8 @@
|
||||
"Yes": "Si",
|
||||
"You must keep at least one version.": "Debes mantener al menos una versión.",
|
||||
"days": "días",
|
||||
"directories": "directories",
|
||||
"files": "files",
|
||||
"directories": "directorios",
|
||||
"files": "archivos",
|
||||
"full documentation": "Documentación completa",
|
||||
"items": "Elementos",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} quiere compartir la carpeta \"{{folder}}\".",
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "Ladattu",
|
||||
"Downloading": "Ladataan",
|
||||
"Edit": "Muokkaa",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Muokkaa laitetta",
|
||||
"Edit Folder": "Muokkaa kansiota",
|
||||
"Editing": "Muokkaus",
|
||||
"Enable NAT traversal": "Aktivoi osoitteenmuunnoksen kierto",
|
||||
"Enable Relaying": "Aktivoi yhteyden välitys",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Viimeisin skannaus",
|
||||
"Last seen": "Nähty viimeksi",
|
||||
"Later": "Myöhemmin",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Viimeisin muutos",
|
||||
"Listeners": "Kuuntelijat",
|
||||
"Local Discovery": "Paikallinen etsintä",
|
||||
"Local State": "Paikallinen tila",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Skannataan",
|
||||
"Select the devices to share this folder with.": "Valitse laitteet, joiden kanssa tämä kansio jaetaan.",
|
||||
"Select the folders to share with this device.": "Valitse kansiot jaettavaksi tämän laitteen kanssa.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Lähetä & vastaanota",
|
||||
"Send Only": "Vain lähetys",
|
||||
"Settings": "Asetukset",
|
||||
"Share": "Jaa",
|
||||
"Share Folder": "Jaa kansio",
|
||||
|
||||
@@ -49,8 +49,8 @@
|
||||
"Discovery": "Découverte",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Débit de réception",
|
||||
"Downloaded": "Téléchargé",
|
||||
"Downloading": "Téléchargement",
|
||||
"Downloaded": "Reçu",
|
||||
"Downloading": "Réception",
|
||||
"Edit": "Modifier",
|
||||
"Edit Device": "Modifier l'appareil",
|
||||
"Edit Folder": "Modifier le partage",
|
||||
@@ -87,7 +87,7 @@
|
||||
"Ignore": "Ignorer",
|
||||
"Ignore Patterns": "Règles d'exclusion",
|
||||
"Ignore Permissions": "Ignorer les permissions",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite du débit entrant (KiB/s)",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite du débit de réception (Kio/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Une configuration incorrecte peut créer des dommages dans vos répertoires et mettre Syncthing hors-service.",
|
||||
"Introducer": "Appareil introducteur",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inverser la condition donnée (i.e. ne pas exclure)",
|
||||
@@ -124,7 +124,7 @@
|
||||
"Options": "Options",
|
||||
"Out of Sync": "Désynchronisé",
|
||||
"Out of Sync Items": "Éléments non synchronisés",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite du débit sortant (KiB/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite du débit d'envoi (Kio/s)",
|
||||
"Override Changes": "Écraser les changements",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin vers le répertoire dans l'appareil local. Il sera créé s'il n'existe pas. Vous pouvez entrer un chemin absolu (p.ex \"/home/moi/Sync/Exemple\") ou relatif à celui du programme (p.ex \"..\\Partages\\Exemple\" - utile pour installation portable). Le caractère tilde (~, ou ~+Espace sous Windows XP+Azerty) peut être utilisé comme raccourci vers",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Chemin où les versions doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire)",
|
||||
@@ -157,7 +157,7 @@
|
||||
"Select the devices to share this folder with.": "Synchroniser avec :",
|
||||
"Select the folders to share with this device.": "Sélectionner les partages auxquels participe cet appareil.",
|
||||
"Send & Receive": "Envoi & réception",
|
||||
"Send Only": "Envoi seulement",
|
||||
"Send Only": "Envoi (lecture seule)",
|
||||
"Settings": "Configuration",
|
||||
"Share": "Partager",
|
||||
"Share Folder": "Partager",
|
||||
|
||||
@@ -48,9 +48,9 @@
|
||||
"Discovered": "Découvert",
|
||||
"Discovery": "Découverte",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Vitesse de réception",
|
||||
"Downloaded": "Téléchargé",
|
||||
"Downloading": "Téléchargement",
|
||||
"Download Rate": "Débit de réception",
|
||||
"Downloaded": "Reçu",
|
||||
"Downloading": "Réception",
|
||||
"Edit": "Modifier",
|
||||
"Edit Device": "Modifier l'appareil",
|
||||
"Edit Folder": "Modifier le partage",
|
||||
@@ -87,7 +87,7 @@
|
||||
"Ignore": "Ignorer",
|
||||
"Ignore Patterns": "Exclusions ...",
|
||||
"Ignore Permissions": "Ignorer les permissions",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite du débit de réception (Ko/s)",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite du débit de réception (Kio/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Une configuration incorrecte peut créer des dommages dans vos répertoires et mettre Syncthing hors-service.",
|
||||
"Introducer": "Appareil introducteur",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inverser la condition donnée (i.e. ne pas exclure)",
|
||||
@@ -124,7 +124,7 @@
|
||||
"Options": "Options",
|
||||
"Out of Sync": "Désynchronisé",
|
||||
"Out of Sync Items": "Éléments non synchronisés",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite du débit d'émission (Ko/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite du débit d'envoi (Kio/s)",
|
||||
"Override Changes": "Écraser les changements",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin vers le répertoire dans l'appareil local. Il sera créé s'il n'existe pas. Vous pouvez entrer un chemin absolu (p.ex \"/home/moi/Sync/Exemple\") ou relatif à celui du programme (p.ex \"..\\Partages\\Exemple\" - utile pour installation portable). Le caractère tilde (~, ou ~+Espace sous Windows XP+Azerty) peut être utilisé comme raccourci vers",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Chemin où les copies doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire)",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Analyse en cours",
|
||||
"Select the devices to share this folder with.": "Synchroniser avec :",
|
||||
"Select the folders to share with this device.": "Sélectionner les partages auxquels participe cet appareil.",
|
||||
"Send & Receive": "Envoyer & recevoir",
|
||||
"Send Only": "Envoyer seulement",
|
||||
"Send & Receive": "Envoi & réception",
|
||||
"Send Only": "Envoi (lecture seule)",
|
||||
"Settings": "Configuration",
|
||||
"Share": "Partager",
|
||||
"Share Folder": "Partager",
|
||||
@@ -224,7 +224,7 @@
|
||||
"Upgrade": "Mettre à jour",
|
||||
"Upgrade To {%version%}": "Mettre à jour vers {{version}}",
|
||||
"Upgrading": "Mise à jour de Syncthing",
|
||||
"Upload Rate": "Vitesse d'émission",
|
||||
"Upload Rate": "Débit d'envoi",
|
||||
"Uptime": "Durée de fonctionnement",
|
||||
"Use HTTPS for GUI": "Utiliser l'HTTPS pour le GUI",
|
||||
"Version": "Version",
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "Ynladen",
|
||||
"Downloading": "Oan it ynladen",
|
||||
"Edit": "Bewurkje",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Apparaat Bewurkje",
|
||||
"Edit Folder": "Map Bewurkje",
|
||||
"Editing": "Bewurkjen",
|
||||
"Enable NAT traversal": "NAT-trochkruse ynskeakelje",
|
||||
"Enable Relaying": "Trochjaan tastean",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Lêst Skent",
|
||||
"Last seen": "Lêst sjoen",
|
||||
"Later": "Letter",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Meast Resinte Feroarings",
|
||||
"Listeners": "Harkers",
|
||||
"Local Discovery": "Lokale ûntdekking",
|
||||
"Local State": "Lokale tastân",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Oan it skennen",
|
||||
"Select the devices to share this folder with.": "Sykje de apparaten út om dizze map mei te dielen.",
|
||||
"Select the folders to share with this device.": "Sykje de mappen út om mei dit apparaat te dielen.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Stjoere & Untfange",
|
||||
"Send Only": "Allinnich Stjoere",
|
||||
"Settings": "Ynstellings",
|
||||
"Share": "Diele",
|
||||
"Share Folder": "Map diele",
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "Scaricato",
|
||||
"Downloading": "Scaricamento in corso",
|
||||
"Edit": "Modifica",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Modifica Dispositivo",
|
||||
"Edit Folder": "Modifica Cartella",
|
||||
"Editing": "Modifica di",
|
||||
"Enable NAT traversal": "Abilita NAT traversal",
|
||||
"Enable Relaying": "Abilita Reindirizzamento",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Scansione in corso",
|
||||
"Select the devices to share this folder with.": "Seleziona i dispositivi con i quali condividere questa cartella.",
|
||||
"Select the folders to share with this device.": "Seleziona le cartelle da condividere con questo dispositivo.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Invia & Ricevi",
|
||||
"Send Only": "Invia Soltanto",
|
||||
"Settings": "Impostazioni",
|
||||
"Share": "Condividi",
|
||||
"Share Folder": "Condividi la Cartella",
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"Add": "追加",
|
||||
"Add Device": "デバイスを追加",
|
||||
"Add Folder": "フォルダーを追加",
|
||||
"Add Remote Device": "他のデバイスを追加",
|
||||
"Add Remote Device": "接続先デバイスを追加",
|
||||
"Add new folder?": "新しいフォルダーとして追加しますか?",
|
||||
"Address": "アドレス",
|
||||
"Addresses": "アドレス",
|
||||
@@ -140,7 +140,7 @@
|
||||
"Random": "ランダム",
|
||||
"Reduced by ignore patterns": "無視パターン該当分を除く",
|
||||
"Release Notes": "リリースノート",
|
||||
"Remote Devices": "他のデバイス",
|
||||
"Remote Devices": "接続先デバイス",
|
||||
"Remove": "除去",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "フォルダーの識別子で、必須です。このフォルダーを共有する全てのデバイス上で同一でなくてはなりません。",
|
||||
"Rescan": "再スキャン",
|
||||
@@ -236,8 +236,8 @@
|
||||
"Yes": "はい",
|
||||
"You must keep at least one version.": "少なくとも一つのバージョンを保存してください。",
|
||||
"days": "日",
|
||||
"directories": "ディレクトリ",
|
||||
"files": "ファイル",
|
||||
"directories": "個のディレクトリ",
|
||||
"files": "個のファイル",
|
||||
"full documentation": "詳細なマニュアル",
|
||||
"items": "項目",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} がフォルダー \"{{folder}}\" を共有するよう求めています。",
|
||||
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "다운로드됨",
|
||||
"Downloading": "다운로드 중",
|
||||
"Edit": "편집",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "기기 편집",
|
||||
"Edit Folder": "폴더 편집",
|
||||
"Editing": "편집",
|
||||
"Enable NAT traversal": "NAT traversal 활성화",
|
||||
"Enable Relaying": "Relaying 활성화",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "탐색중",
|
||||
"Select the devices to share this folder with.": "이 폴더를 공유할 장치를 선택합니다.",
|
||||
"Select the folders to share with this device.": "이 장치와 공유할 폴더를 선택합니다.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "송신 & 수신",
|
||||
"Send Only": "송신 전용",
|
||||
"Settings": "설정",
|
||||
"Share": "공유",
|
||||
"Share Folder": "폴더 공유",
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
"Local State": "Lokal tilstand",
|
||||
"Local State (Total)": "Lokal tilstand (Total)",
|
||||
"Major Upgrade": "Storoppgradering",
|
||||
"Master": "Styrende",
|
||||
"Master": "Beskyttet",
|
||||
"Maximum Age": "Maksimal levetid",
|
||||
"Metadata Only": "Kun metadata",
|
||||
"Minimum Free Disk Space": "Nødvendig ledig diskplass",
|
||||
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Verificando",
|
||||
"Select the devices to share this folder with.": "Selecione os dispositivos com os quais esta pasta será compartilhada.",
|
||||
"Select the folders to share with this device.": "Selecione as pastas a serem compartilhadas com este dispositivo.",
|
||||
"Send & Receive": "Enviar e receber",
|
||||
"Send Only": "Somente enviar",
|
||||
"Send & Receive": "Envia e recebe",
|
||||
"Send Only": "Somente envia",
|
||||
"Settings": "Configurações",
|
||||
"Share": "Compartilhar",
|
||||
"Share Folder": "Compartilhar pasta",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Senaste skanning",
|
||||
"Last seen": "Senast sedd",
|
||||
"Later": "Senare",
|
||||
"Latest Change": "Senast ändrad",
|
||||
"Latest Change": "Senaste ändring",
|
||||
"Listeners": "Lyssnare",
|
||||
"Local Discovery": "Lokal annonsering",
|
||||
"Local State": "Lokal status",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Command": "Komut",
|
||||
"Comment, when used at the start of a line": "Satır başında kullanıldığında açıklama özelliği taşır",
|
||||
"Compression": "Sıkıştırma",
|
||||
"Configured": "Configured",
|
||||
"Configured": "Yapılandırıldı",
|
||||
"Connection Error": "Bağlantı hatası",
|
||||
"Connection Type": "Bağlantı Türü",
|
||||
"Copied from elsewhere": "Başka bir yerden kopyalanmış",
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "İndirilmiş",
|
||||
"Downloading": "İndiriliyor",
|
||||
"Edit": "Düzenle",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Aygıtı Düzenle",
|
||||
"Edit Folder": "Klasörü Düzenle",
|
||||
"Editing": "Düzenleniyor",
|
||||
"Enable NAT traversal": "Enable NAT traversal",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Son Tarama",
|
||||
"Last seen": "Son görülme",
|
||||
"Later": "Sonra",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Son Değişim",
|
||||
"Listeners": "Dinleyiciler",
|
||||
"Local Discovery": "Yerel Discovery",
|
||||
"Local State": "Yerel Durum",
|
||||
@@ -142,7 +142,7 @@
|
||||
"Release Notes": "Sürüm Notları",
|
||||
"Remote Devices": "Uzak Aygıtlar",
|
||||
"Remove": "Kaldır",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Required identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Klasör için tanımlayıcı gereklidir. Tüm küme cihazlarda aynı olmalıdır.",
|
||||
"Rescan": "Tekrar Tara",
|
||||
"Rescan All": "Tümünü Tekrar Tara",
|
||||
"Rescan Interval": "Tarama Aralığı",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Taranıyor",
|
||||
"Select the devices to share this folder with.": "Bu klasörü paylaşacağın aygıtları seç.",
|
||||
"Select the folders to share with this device.": "Bu aygıtla paylaşılacak klasörleri seç.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Gönder & Al",
|
||||
"Send Only": "Yalnızca Gönder",
|
||||
"Settings": "Ayarlar",
|
||||
"Share": "Paylaş",
|
||||
"Share Folder": "Paylaşım Klasörü",
|
||||
@@ -189,7 +189,7 @@
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing uygulaması çökmüş olabilir, veya internet bağlantınızda bir sorun var. Tekrar deniyor....",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing isteminizi işleme alırken bir sorunla karşılaştı. Lütfen sayfanızı yenileyin veya sorun devam ediyorsa Syncthing'i yeniden başlatın.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "Syncthing yönetici arayüzü parolasız olarak uzaktan erişime izin verilecek şekilde yapılandırıldı.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "The aggregated statistics are publicly available at the URL below.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Toplanan istatistikler halka açık biçimde aşağıdaki adrestedir.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Yapılandırma kaydedildi ancak etkinleştirilmedi. Etkinleştirmek için Syncthing yeniden başlatılmalı.",
|
||||
"The device ID cannot be blank.": "Aygıt ID boş olamaz.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Buraya girilecek olan aygıt ID'si diğer aygıtlarda, \"Eylemler > ID Göster\" penceresinde bulunabilir. Boşluklar ve çizgiler isteğe bağlıdır (yoksayılmış).",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Command": "Команда",
|
||||
"Comment, when used at the start of a line": "Коментар, якщо використовується на початку рядка",
|
||||
"Compression": "Стиснення",
|
||||
"Configured": "Configured",
|
||||
"Configured": "Налаштовано",
|
||||
"Connection Error": "Помилка з’єднання",
|
||||
"Connection Type": "Тип з*єднання",
|
||||
"Copied from elsewhere": "Скопійовано з іншого місця",
|
||||
@@ -45,15 +45,15 @@
|
||||
"Device Name": "Назва пристрою",
|
||||
"Devices": "Пристрої",
|
||||
"Disconnected": "З’єднання відсутнє",
|
||||
"Discovered": "Discovered",
|
||||
"Discovered": "Виявлено",
|
||||
"Discovery": "Сервери координації NAT",
|
||||
"Documentation": "Документація",
|
||||
"Download Rate": "Швидкість завантаження",
|
||||
"Downloaded": "Завантажено",
|
||||
"Downloading": "Завантаження",
|
||||
"Edit": "Редагувати",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Налаштування пристрою",
|
||||
"Edit Folder": "Налаштування директорії",
|
||||
"Editing": "Редагування",
|
||||
"Enable NAT traversal": "Увімкнути NAT traversal",
|
||||
"Enable Relaying": "Увімкнути ретрансляцію (relaying)",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Останнє сканування",
|
||||
"Last seen": "З’являвся останній раз",
|
||||
"Later": "Пізніше",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Найостанніша зміна",
|
||||
"Listeners": "Приймачі (TCP & Relay)",
|
||||
"Local Discovery": "Локальне виявлення (LAN)",
|
||||
"Local State": "Локальний статус",
|
||||
@@ -138,7 +138,7 @@
|
||||
"Quick guide to supported patterns": "Швидкий посібник по шаблонам, що підтримуються",
|
||||
"RAM Utilization": "Використання RAM",
|
||||
"Random": "Випадково",
|
||||
"Reduced by ignore patterns": "Reduced by ignore patterns",
|
||||
"Reduced by ignore patterns": "Зменшено шаблонами ігнорування",
|
||||
"Release Notes": "Примітки до випуску",
|
||||
"Remote Devices": "Віддалені пристрої",
|
||||
"Remove": "Видалити",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Сканування",
|
||||
"Select the devices to share this folder with.": "Оберіть пристрої, які матимуть доступ до цієї директорії.",
|
||||
"Select the folders to share with this device.": "Оберіть директорії до яких матиме доступ цей пристрій.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Відправити та отримати",
|
||||
"Send Only": "Лише відправити",
|
||||
"Settings": "Налаштування",
|
||||
"Share": "Розповсюдити ",
|
||||
"Share Folder": "Розповсюдити каталог",
|
||||
@@ -236,8 +236,8 @@
|
||||
"Yes": "Так",
|
||||
"You must keep at least one version.": "Ви повинні зберігати щонайменше одну версію.",
|
||||
"days": "днів",
|
||||
"directories": "directories",
|
||||
"files": "files",
|
||||
"directories": "директорії",
|
||||
"files": "файли",
|
||||
"full documentation": "повна документація",
|
||||
"items": "елементи",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} хоче поділитися директорією \"{{folder}}\".",
|
||||
|
||||
@@ -271,6 +271,7 @@
|
||||
<span class="fa fa-fw" ng-class="[folder.type == 'readonly' ? 'fa-lock' : 'fa-folder']"></span>
|
||||
</div>
|
||||
<div class="panel-status pull-right text-{{folderClass(folder)}}" ng-switch="folderStatus(folder)">
|
||||
<span ng-switch-when="paused"><span class="hidden-xs" translate>Paused</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="unknown"><span class="hidden-xs" translate>Unknown</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="unshared"><span class="hidden-xs" translate>Unshared</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="stopped"><span class="hidden-xs" translate>Stopped</span><span class="visible-xs">◼</span></span>
|
||||
@@ -307,11 +308,11 @@
|
||||
<span tooltip data-original-title="{{folder.path}}">{{folder.path}}</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-if="model[folder.id].invalid || model[folder.id].error">
|
||||
<tr ng-if="!folder.paused && (model[folder.id].invalid || model[folder.id].error)">
|
||||
<th><span class="fa fa-fw fa-exclamation-triangle"></span> <span translate>Error</span></th>
|
||||
<td class="text-right">{{model[folder.id].invalid || model[folder.id].error}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr ng-if="!folder.paused">
|
||||
<th><span class="fa fa-fw fa-globe"></span> <span translate>Global State</span></th>
|
||||
<td class="text-right">
|
||||
<span tooltip data-original-title="{{model[folder.id].globalFiles | alwaysNumber}} {{'files' | translate}}, {{model[folder.id].globalDirectories | alwaysNumber}} {{'directories' | translate}}, ~{{model[folder.id].globalBytes | binary}}B">
|
||||
@@ -321,7 +322,7 @@
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr ng-if="!folder.paused">
|
||||
<th><span class="fa fa-fw fa-home"></span> <span translate>Local State</span></th>
|
||||
<td class="text-right">
|
||||
<span tooltip data-original-title="{{model[folder.id].localFiles | alwaysNumber}} {{'files' | translate}}, {{model[folder.id].localDirectories | alwaysNumber}} {{'directories' | translate}}, ~{{model[folder.id].localBytes | binary}}B">
|
||||
@@ -354,7 +355,7 @@
|
||||
<tr ng-if="folder.type != 'readwrite'">
|
||||
<th><span class="fa fa-fw fa-lock"></span> <span translate>Folder Type</span></th>
|
||||
<td class="text-right">
|
||||
<span ng-if="folder.type == 'readonly'" translate>Master</span>
|
||||
<span ng-if="folder.type == 'readonly'" translate>Send Only</span>
|
||||
<span ng-if="folder.type != 'readonly'">{{ folder.type.charAt(0).toUpperCase() + folder.type.slice(1) }}</span>
|
||||
</td>
|
||||
</tr>
|
||||
@@ -417,6 +418,12 @@
|
||||
<span class="fa fa-arrow-circle-up"></span> <span translate>Override Changes</span>
|
||||
</button>
|
||||
<span class="pull-right">
|
||||
<button ng-if="!folder.paused" type="button" class="btn btn-sm btn-default" ng-click="setFolderPause(folder.id, true)">
|
||||
<span class="fa fa-pause"></span> <span translate>Pause</span>
|
||||
</button>
|
||||
<button ng-if="folder.paused" type="button" class="btn btn-sm btn-default" ng-click="setFolderPause(folder.id, false)">
|
||||
<span class="fa fa-play"></span> <span translate>Resume</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="rescanFolder(folder.id)" ng-show="['idle', 'stopped', 'unshared'].indexOf(folderStatus(folder)) > -1">
|
||||
<span class="fa fa-refresh"></span> <span translate>Rescan</span>
|
||||
</button>
|
||||
@@ -579,6 +586,10 @@
|
||||
<th><span class="fa fa-fw fa-thumbs-o-up"></span> <span translate>Introducer</span></th>
|
||||
<td translate class="text-right">Yes</td>
|
||||
</tr>
|
||||
<tr ng-if="deviceCfg.introducedBy">
|
||||
<th><span class="fa fa-fw fa-meh-o"></span> <span translate>Introduced By</span></th>
|
||||
<td translate class="text-right">{{ deviceName(findDevice(deviceCfg.introducedBy)) || deviceCfg.introducedBy.substring(0, 5) }}</td>
|
||||
</tr>
|
||||
<tr ng-if="connections[deviceCfg.deviceID].clientVersion">
|
||||
<th><span class="fa fa-fw fa-tag"></span> <span translate>Version</span></th>
|
||||
<td class="text-right">{{connections[deviceCfg.deviceID].clientVersion}}</td>
|
||||
@@ -597,10 +608,10 @@
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<span class="pull-right">
|
||||
<button ng-if="!connections[deviceCfg.deviceID].paused" type="button" class="btn btn-sm btn-default" ng-click="pauseDevice(deviceCfg.deviceID)">
|
||||
<button ng-if="!deviceCfg.paused" type="button" class="btn btn-sm btn-default" ng-click="setDevicePause(deviceCfg.deviceID, true)">
|
||||
<span class="fa fa-pause"></span> <span translate>Pause</span>
|
||||
</button>
|
||||
<button ng-if="connections[deviceCfg.deviceID].paused" type="button" class="btn btn-sm btn-default" ng-click="resumeDevice(deviceCfg.deviceID)">
|
||||
<button ng-if="deviceCfg.paused" type="button" class="btn btn-sm btn-default" ng-click="setDevicePause(deviceCfg.deviceID, false)">
|
||||
<span class="fa fa-play"></span> <span translate>Resume</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="editDevice(deviceCfg)">
|
||||
@@ -613,9 +624,14 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<button type="button" class="btn btn-sm btn-default pull-right" ng-click="addDevice()">
|
||||
<span class="fa fa-plus"></span> <span translate>Add Remote Device</span>
|
||||
</button>
|
||||
<span class="pull-right">
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="globalChanges()">
|
||||
<span class="fa fa-fw fa-history"></span> <span translate>Global Changes</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="addDevice()">
|
||||
<span class="fa fa-plus"></span> <span translate>Add Remote Device</span>
|
||||
</button>
|
||||
</span>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -647,6 +663,7 @@
|
||||
<ng-include src="'syncthing/core/shutdownDialogView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/idqrModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/editDeviceModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/globalChangesModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/folder/editFolderModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/folder/editIgnoresModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/settings/settingsModalView.html'"></ng-include>
|
||||
|
||||
@@ -9,10 +9,10 @@
|
||||
</h1>
|
||||
<hr/>
|
||||
|
||||
<p translate>Copyright © 2014-2016 the following Contributors:</p>
|
||||
<p translate>Copyright © 2014-2017 the following Contributors:</p>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Laurent Etiemble, Leo Arias, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Roman Zaynetdinov, Scott Klupfel, Simon Frei, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Adel Qalieh, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Heiko Zuerker, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Kurt Fitzner, Laurent Etiemble, Leo Arias, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mark Pulford, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Roman Zaynetdinov, Scott Klupfel, Simon Frei, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
@@ -20,16 +20,22 @@ Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male,
|
||||
<p translate>Syncthing includes the following software or portions thereof:</p>
|
||||
<ul class="list-unstyled two-columns">
|
||||
<li><a href="https://golang.org">The Go Programming Language</a>, Copyright © 2012 The Go Authors.</li>
|
||||
<li><a href="https://github.com/sasha-s/go-deadlock">sasha-s/go-deadlock</a>, Copyright © 2016 sasha-s</li>
|
||||
<li><a href="https://github.com/rcrowley/go-metrics">rcrowley/go-metrics</a>, Copyright © 2012 Richard Crowley.</li>
|
||||
<li><a href="https://github.com/minio/sha256-simd">minio/sha256-simd</a>, Copyright © 2016 Minio, Inc.</li>
|
||||
<li><a href="https://github.com/jackpal/gateway">jackpal/gateway</a>, Copyright © 2010 Jack Palevich.</li>
|
||||
<li><a href="https://github.com/gogo/protobuf">gogo/protobuf</a>, Copyright © 2013 The GoGo Authors.</li>
|
||||
<li><a href="https://github.com/gobwas/glob">gobwas/glob</a>, Copyright © 2016 Sergey Kamardin.</li>
|
||||
<li><a href="https://github.com/d4l3k/messagediff">d4l3k/messagediff</a>, Copyright © 2015 Tristan Rice.</li>
|
||||
<li><a href="https://github.com/bkaradzic/go-lz4">bkaradzic/go-lz4</a>, Copyright © 2011-2012 Branimir Karadzic, 2013 Damian Gryski.</li>
|
||||
<li><a href="https://github.com/kardianos/osext">kardianos/osext</a>, Copyright © 2012 Daniel Theophanes.</li>
|
||||
<li><a href="https://github.com/golang/snappy">golang/snappy</a>, Copyright © 2011 The Snappy-Go Authors.</li>
|
||||
<li><a href="https://github.com/juju/ratelimit">juju/ratelimit</a>, Copyright © 2015 Canonical Ltd.</li>
|
||||
<li><a href="https://github.com/thejerf/suture">thejerf/suture</a>, Copyright © 2014-2015 Barracuda Networks, Inc.</li>
|
||||
<li><a href="https://github.com/syndtr/goleveldb">syndtr/goleveldb</a>, Copyright © 2012, Suryandaru Triandana</li>
|
||||
<li><a href="https://github.com/syndtr/goleveldb">syndtr/goleveldb</a>, Copyright © 2012 Suryandaru Triandana.</li>
|
||||
<li><a href="https://github.com/vitrun/qart">vitrun/qart</a>, Copyright © The Go Authors.</li>
|
||||
<li><a href="https://angularjs.org/">AngularJS</a>, Copyright © 2010-2015 Google, Inc.</li>
|
||||
<li><a href="http://getbootstrap.com/">Bootstrap</a>, Copyright © 2011-2015 Twitter, Inc.</li>
|
||||
<li><a href="http://fontawesome.io/">Font Awesome</a>, Copyright © 2015 Dave Gandy</li>
|
||||
<li><a href="https://angularjs.org/">AngularJS</a>, Copyright © 2010-2016 Google, Inc.</li>
|
||||
<li><a href="http://getbootstrap.com/">Bootstrap</a>, Copyright © 2011-2016 Twitter, Inc.</li>
|
||||
<li>Font Awesome by Dave Gandy - <a href="http://fontawesome.io/">http://fontawesome.io</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
|
||||
@@ -77,6 +77,8 @@ angular.module('syncthing.core')
|
||||
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
|
||||
FOLDER_ERRORS: 'FolderErrors', // Emitted when a folder has errors preventing a full sync
|
||||
FOLDER_SCAN_PROGRESS: 'FolderScanProgress', // Emitted every ScanProgressIntervalS seconds, indicating how far into the scan it is at.
|
||||
FOLDER_PAUSED: 'FolderPaused', // Emitted when a folder is paused
|
||||
FOLDER_RESUMED: 'FolderResumed', // Emitted when a folder is resumed
|
||||
|
||||
start: function() {
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
|
||||
@@ -51,6 +51,7 @@ angular.module('syncthing.core')
|
||||
$scope.failedPageSize = 10;
|
||||
$scope.scanProgress = {};
|
||||
$scope.themes = [];
|
||||
$scope.globalChangeEvents = {};
|
||||
|
||||
$scope.localStateTotal = {
|
||||
bytes: 0,
|
||||
@@ -91,6 +92,7 @@ angular.module('syncthing.core')
|
||||
refreshConnectionStats();
|
||||
refreshDeviceStats();
|
||||
refreshFolderStats();
|
||||
refreshGlobalChanges();
|
||||
refreshThemes();
|
||||
|
||||
$http.get(urlbase + '/system/version').success(function (data) {
|
||||
@@ -186,6 +188,7 @@ angular.module('syncthing.core')
|
||||
|
||||
$scope.$on(Events.LOCAL_INDEX_UPDATED, function (event, arg) {
|
||||
refreshFolderStats();
|
||||
refreshGlobalChanges();
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_DISCONNECTED, function (event, arg) {
|
||||
@@ -233,14 +236,6 @@ angular.module('syncthing.core')
|
||||
$scope.deviceRejections[arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_PAUSED, function (event, arg) {
|
||||
$scope.connections[arg.data.device].paused = true;
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_RESUMED, function (event, arg) {
|
||||
$scope.connections[arg.data.device].paused = false;
|
||||
});
|
||||
|
||||
$scope.$on(Events.FOLDER_REJECTED, function (event, arg) {
|
||||
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
|
||||
});
|
||||
@@ -629,6 +624,15 @@ angular.module('syncthing.core')
|
||||
}).error($scope.emitHTTPError);
|
||||
}, 2500);
|
||||
|
||||
var refreshGlobalChanges = debounce(function () {
|
||||
$http.get(urlbase + "/events/disk?limit=25").success(function (data) {
|
||||
data = data.reverse();
|
||||
$scope.globalChangeEvents = data;
|
||||
|
||||
console.log("refreshGlobalChanges", data);
|
||||
}).error($scope.emitHTTPError);
|
||||
}, 2500);
|
||||
|
||||
$scope.refresh = function () {
|
||||
refreshSystem();
|
||||
refreshDiscoveryCache();
|
||||
@@ -641,6 +645,10 @@ angular.module('syncthing.core')
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
if (folderCfg.paused) {
|
||||
return 'paused';
|
||||
}
|
||||
|
||||
// after restart syncthing process state may be empty
|
||||
if (!$scope.model[folderCfg.id].state) {
|
||||
return 'unknown';
|
||||
@@ -674,6 +682,9 @@ angular.module('syncthing.core')
|
||||
if (status === 'idle') {
|
||||
return 'success';
|
||||
}
|
||||
if (status == 'paused') {
|
||||
return 'default';
|
||||
}
|
||||
if (status === 'syncing' || status === 'scanning') {
|
||||
return 'primary';
|
||||
}
|
||||
@@ -790,7 +801,7 @@ angular.module('syncthing.core')
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
if ($scope.connections[deviceCfg.deviceID].paused) {
|
||||
if (deviceCfg.paused) {
|
||||
return 'paused';
|
||||
}
|
||||
|
||||
@@ -816,7 +827,7 @@ angular.module('syncthing.core')
|
||||
return 'info';
|
||||
}
|
||||
|
||||
if ($scope.connections[deviceCfg.deviceID].paused) {
|
||||
if (deviceCfg.paused) {
|
||||
return 'default';
|
||||
}
|
||||
|
||||
@@ -912,6 +923,16 @@ angular.module('syncthing.core')
|
||||
return '';
|
||||
};
|
||||
|
||||
$scope.friendlyNameFromShort = function (shortID) {
|
||||
var matches = $scope.devices.filter(function (n) {
|
||||
return n.deviceID.substr(0, 7) === shortID;
|
||||
});
|
||||
if (matches.length !== 1) {
|
||||
return shortID;
|
||||
}
|
||||
return matches[0].name;
|
||||
};
|
||||
|
||||
$scope.findDevice = function (deviceID) {
|
||||
var matches = $scope.devices.filter(function (n) {
|
||||
return n.deviceID === deviceID;
|
||||
@@ -943,12 +964,23 @@ angular.module('syncthing.core')
|
||||
return device.deviceID.substr(0, 6);
|
||||
};
|
||||
|
||||
$scope.pauseDevice = function (device) {
|
||||
$http.post(urlbase + "/system/pause?device=" + device);
|
||||
$scope.setDevicePause = function (device, pause) {
|
||||
$scope.devices.forEach(function (cfg) {
|
||||
if (cfg.deviceID == device) {
|
||||
cfg.paused = pause;
|
||||
}
|
||||
});
|
||||
$scope.config.devices = $scope.devices;
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.resumeDevice = function (device) {
|
||||
$http.post(urlbase + "/system/resume?device=" + device);
|
||||
$scope.setFolderPause = function (folder, pause) {
|
||||
var cfg = $scope.folders[folder];
|
||||
if (cfg) {
|
||||
cfg.paused = pause;
|
||||
$scope.config.folders = folderList($scope.folders);
|
||||
$scope.saveConfig();
|
||||
}
|
||||
};
|
||||
|
||||
$scope.editSettings = function () {
|
||||
@@ -1268,7 +1300,11 @@ angular.module('syncthing.core')
|
||||
$scope.folderEditor = form;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.globalChanges = function () {
|
||||
$('#globalChanges').modal();
|
||||
};
|
||||
|
||||
$scope.editFolder = function (folderCfg) {
|
||||
$scope.currentFolder = angular.copy(folderCfg);
|
||||
|
||||
27
gui/default/syncthing/device/globalChangesModalView.html
Normal file
27
gui/default/syncthing/device/globalChangesModalView.html
Normal file
@@ -0,0 +1,27 @@
|
||||
<style> th, td { padding: 6px; } </style>
|
||||
<modal id="globalChanges" status="default" icon="{{'history'}}" heading="{{'Global Changes' | translate}}" large="yes" closeable="yes">
|
||||
<div class="modal-body">
|
||||
<table>
|
||||
<tr>
|
||||
<th translate>Device</th>
|
||||
<th translate>Action</th>
|
||||
<th translate>Type</th>
|
||||
<th translate>Path</th>
|
||||
<th translate>Time</th>
|
||||
</tr>
|
||||
<tr ng-repeat="changeEvent in globalChangeEvents">
|
||||
<td ng-if="changeEvent.data.modifiedBy">{{friendlyNameFromShort(changeEvent.data.modifiedBy)}}</td>
|
||||
<td ng-if="!changeEvent.data.modifiedBy"><span translate>Unknown</span></td>
|
||||
<td>{{changeEvent.data.action}}</td>
|
||||
<td>{{changeEvent.data.type}}</td>
|
||||
<td>{{changeEvent.data.path}}</td>
|
||||
<td>{{changeEvent.time | date:'medium'}}</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal">
|
||||
<span class="fa fa-times"></span> <span translate>Close</span>
|
||||
</button>
|
||||
</div>
|
||||
</model>
|
||||
@@ -84,10 +84,10 @@
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<label translate>Folder Type</label>
|
||||
<a href="https://docs.syncthing.net/users/foldermaster.html" target="_blank"><span class="fa fa-book"></span> <span translate>Help</span></a>
|
||||
<a href="https://docs.syncthing.net/users/foldertypes.html" target="_blank"><span class="fa fa-book"></span> <span translate>Help</span></a>
|
||||
<select class="form-control" ng-model="currentFolder.type">
|
||||
<option value="readwrite" translate>Normal</option>
|
||||
<option value="readonly" translate>Master</option>
|
||||
<option value="readwrite" translate>Send & Receive</option>
|
||||
<option value="readonly" translate>Send Only</option>
|
||||
</select>
|
||||
<p ng-if="currentFolder.type == 'readonly'" translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
|
||||
@@ -55,3 +55,15 @@ go run build.go -goarch armhf snap
|
||||
go run build.go -goarch arm64 snap
|
||||
|
||||
mv *.snap "$WORKSPACE"
|
||||
|
||||
if [[ -d /usr/local/oldgo ]]; then
|
||||
echo
|
||||
echo Building with minimum supported Go version
|
||||
export GOROOT=/usr/local/oldgo
|
||||
export PATH="$GOROOT/bin:$PATH"
|
||||
go version
|
||||
echo
|
||||
|
||||
rm -rf "$GOPATH/pkg"
|
||||
go run build.go install all # only compile, don't run lints and stuff
|
||||
fi
|
||||
|
||||
@@ -42,6 +42,8 @@ func (validationError) String() string {
|
||||
}
|
||||
|
||||
func TestReplaceCommit(t *testing.T) {
|
||||
t.Skip("broken, fails randomly, #3834")
|
||||
|
||||
w := Wrap("/dev/null", Configuration{Version: 0})
|
||||
if w.RawCopy().Version != 0 {
|
||||
t.Fatal("Config incorrect")
|
||||
|
||||
@@ -407,9 +407,9 @@ func convertV13V14(cfg *Configuration) {
|
||||
|
||||
for i, fcfg := range cfg.Folders {
|
||||
if fcfg.DeprecatedReadOnly {
|
||||
cfg.Folders[i].Type = FolderTypeReadOnly
|
||||
cfg.Folders[i].Type = FolderTypeSendOnly
|
||||
} else {
|
||||
cfg.Folders[i].Type = FolderTypeReadWrite
|
||||
cfg.Folders[i].Type = FolderTypeSendReceive
|
||||
}
|
||||
cfg.Folders[i].DeprecatedReadOnly = false
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestDeviceConfig(t *testing.T) {
|
||||
ID: "test",
|
||||
RawPath: "testdata",
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||
Type: FolderTypeReadOnly,
|
||||
Type: FolderTypeSendOnly,
|
||||
RescanIntervalS: 600,
|
||||
Copiers: 0,
|
||||
Pullers: 0,
|
||||
@@ -108,6 +108,7 @@ func TestDeviceConfig(t *testing.T) {
|
||||
Versioning: VersioningConfiguration{
|
||||
Params: map[string]string{},
|
||||
},
|
||||
WeakHashThresholdPct: 25,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ type DeviceConfiguration struct {
|
||||
Introducer bool `xml:"introducer,attr" json:"introducer"`
|
||||
SkipIntroductionRemovals bool `xml:"skipIntroductionRemovals,attr" json:"skipIntroductionRemovals"`
|
||||
IntroducedBy protocol.DeviceID `xml:"introducedBy,attr" json:"introducedBy"`
|
||||
Paused bool `xml:"paused" json:"paused"`
|
||||
}
|
||||
|
||||
func NewDeviceConfiguration(id protocol.DeviceID, name string) DeviceConfiguration {
|
||||
|
||||
@@ -40,6 +40,8 @@ type FolderConfiguration struct {
|
||||
DisableSparseFiles bool `xml:"disableSparseFiles" json:"disableSparseFiles"`
|
||||
DisableTempIndexes bool `xml:"disableTempIndexes" json:"disableTempIndexes"`
|
||||
Fsync bool `xml:"fsync" json:"fsync"`
|
||||
Paused bool `xml:"paused" json:"paused"`
|
||||
WeakHashThresholdPct int `xml:"weakHashThresholdPct" json:"weakHashThresholdPct"` // Use weak hash if more than X percent of the file has changed. Set to -1 to always use weak hash.
|
||||
|
||||
cachedPath string
|
||||
|
||||
@@ -98,13 +100,13 @@ func (f *FolderConfiguration) CreateMarker() error {
|
||||
|
||||
func (f *FolderConfiguration) HasMarker() bool {
|
||||
_, err := os.Stat(filepath.Join(f.Path(), ".stfolder"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (f FolderConfiguration) Description() string {
|
||||
if f.Label == "" {
|
||||
return f.ID
|
||||
}
|
||||
return fmt.Sprintf("%q (%s)", f.Label, f.ID)
|
||||
}
|
||||
|
||||
@@ -144,6 +146,10 @@ func (f *FolderConfiguration) prepare() {
|
||||
if f.Versioning.Params == nil {
|
||||
f.Versioning.Params = make(map[string]string)
|
||||
}
|
||||
|
||||
if f.WeakHashThresholdPct == 0 {
|
||||
f.WeakHashThresholdPct = 25
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) cleanedPath() string {
|
||||
|
||||
@@ -9,15 +9,15 @@ package config
|
||||
type FolderType int
|
||||
|
||||
const (
|
||||
FolderTypeReadWrite FolderType = iota // default is readwrite
|
||||
FolderTypeReadOnly
|
||||
FolderTypeSendReceive FolderType = iota // default is sendreceive
|
||||
FolderTypeSendOnly
|
||||
)
|
||||
|
||||
func (t FolderType) String() string {
|
||||
switch t {
|
||||
case FolderTypeReadWrite:
|
||||
case FolderTypeSendReceive:
|
||||
return "readwrite"
|
||||
case FolderTypeReadOnly:
|
||||
case FolderTypeSendOnly:
|
||||
return "readonly"
|
||||
default:
|
||||
return "unknown"
|
||||
@@ -30,12 +30,12 @@ func (t FolderType) MarshalText() ([]byte, error) {
|
||||
|
||||
func (t *FolderType) UnmarshalText(bs []byte) error {
|
||||
switch string(bs) {
|
||||
case "readwrite":
|
||||
*t = FolderTypeReadWrite
|
||||
case "readonly":
|
||||
*t = FolderTypeReadOnly
|
||||
case "readwrite", "sendreceive":
|
||||
*t = FolderTypeSendReceive
|
||||
case "readonly", "sendonly":
|
||||
*t = FolderTypeSendOnly
|
||||
default:
|
||||
*t = FolderTypeReadWrite
|
||||
*t = FolderTypeSendReceive
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ type OptionsConfiguration struct {
|
||||
OverwriteRemoteDevNames bool `xml:"overwriteRemoteDeviceNamesOnConnect" json:"overwriteRemoteDeviceNamesOnConnect" default:"false"`
|
||||
TempIndexMinBlocks int `xml:"tempIndexMinBlocks" json:"tempIndexMinBlocks" default:"10"`
|
||||
UnackedNotificationIDs []string `xml:"unackedNotificationID" json:"unackedNotificationIDs"`
|
||||
TrafficClass int `xml:"trafficClass" json:"trafficClass"`
|
||||
|
||||
DeprecatedUPnPEnabled bool `xml:"upnpEnabled,omitempty" json:"-"`
|
||||
DeprecatedUPnPLeaseM int `xml:"upnpLeaseMinutes,omitempty" json:"-"`
|
||||
|
||||
@@ -323,6 +323,18 @@ func (w *Wrapper) Device(id protocol.DeviceID) (DeviceConfiguration, bool) {
|
||||
return DeviceConfiguration{}, false
|
||||
}
|
||||
|
||||
// Folder returns the configuration for the given folder and an "ok" bool.
|
||||
func (w *Wrapper) Folder(id string) (FolderConfiguration, bool) {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
for _, folder := range w.cfg.Folders {
|
||||
if folder.ID == id {
|
||||
return folder, true
|
||||
}
|
||||
}
|
||||
return FolderConfiguration{}, false
|
||||
}
|
||||
|
||||
// Save writes the configuration to disk, and generates a ConfigSaved event.
|
||||
func (w *Wrapper) Save() error {
|
||||
fd, err := osutil.CreateAtomic(w.path)
|
||||
|
||||
26
lib/connections/connections_test.go
Normal file
26
lib/connections/connections_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
|
||||
import "testing"
|
||||
import "net/url"
|
||||
|
||||
func TestFixupPort(t *testing.T) {
|
||||
cases := [][2]string{
|
||||
{"tcp://1.2.3.4:5", "tcp://1.2.3.4:5"},
|
||||
{"tcp://1.2.3.4:", "tcp://1.2.3.4:22000"},
|
||||
{"tcp://1.2.3.4", "tcp://1.2.3.4:22000"},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
u0, _ := url.Parse(tc[0])
|
||||
u1 := fixupPort(u0).String()
|
||||
if u1 != tc[1] {
|
||||
t.Errorf("fixupPort(%q) => %q, expected %q", tc[0], u1, tc[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// The existence of this file means we get 0% test coverage rather than no
|
||||
// test coverage at all. Remove when implementing an actual test.
|
||||
|
||||
package connections
|
||||
@@ -1,33 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type LimitedReader struct {
|
||||
reader io.Reader
|
||||
bucket *ratelimit.Bucket
|
||||
}
|
||||
|
||||
func NewReadLimiter(r io.Reader, b *ratelimit.Bucket) *LimitedReader {
|
||||
return &LimitedReader{
|
||||
reader: r,
|
||||
bucket: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *LimitedReader) Read(buf []byte) (int, error) {
|
||||
n, err := r.reader.Read(buf)
|
||||
if r.bucket != nil {
|
||||
r.bucket.Wait(int64(n))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type LimitedWriter struct {
|
||||
writer io.Writer
|
||||
bucket *ratelimit.Bucket
|
||||
}
|
||||
|
||||
func NewWriteLimiter(w io.Writer, b *ratelimit.Bucket) *LimitedWriter {
|
||||
return &LimitedWriter{
|
||||
writer: w,
|
||||
bucket: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *LimitedWriter) Write(buf []byte) (int, error) {
|
||||
if w.bucket != nil {
|
||||
w.bucket.Wait(int64(len(buf)))
|
||||
}
|
||||
return w.writer.Write(buf)
|
||||
}
|
||||
164
lib/connections/limiter.go
Normal file
164
lib/connections/limiter.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// limiter manages a read and write rate limit, reacting to config changes
|
||||
// as appropriate.
|
||||
type limiter struct {
|
||||
write *rate.Limiter
|
||||
read *rate.Limiter
|
||||
limitsLAN atomicBool
|
||||
}
|
||||
|
||||
const limiterBurstSize = 4 * 128 << 10
|
||||
|
||||
func newLimiter(cfg *config.Wrapper) *limiter {
|
||||
l := &limiter{
|
||||
write: rate.NewLimiter(rate.Inf, limiterBurstSize),
|
||||
read: rate.NewLimiter(rate.Inf, limiterBurstSize),
|
||||
}
|
||||
cfg.Subscribe(l)
|
||||
prev := config.Configuration{Options: config.OptionsConfiguration{MaxRecvKbps: -1, MaxSendKbps: -1}}
|
||||
l.CommitConfiguration(prev, cfg.RawCopy())
|
||||
return l
|
||||
}
|
||||
|
||||
func (lim *limiter) newReadLimiter(r io.Reader, isLAN bool) io.Reader {
|
||||
return &limitedReader{reader: r, limiter: lim, isLAN: isLAN}
|
||||
}
|
||||
|
||||
func (lim *limiter) newWriteLimiter(w io.Writer, isLAN bool) io.Writer {
|
||||
return &limitedWriter{writer: w, limiter: lim, isLAN: isLAN}
|
||||
}
|
||||
|
||||
func (lim *limiter) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lim *limiter) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if from.Options.MaxRecvKbps == to.Options.MaxRecvKbps &&
|
||||
from.Options.MaxSendKbps == to.Options.MaxSendKbps &&
|
||||
from.Options.LimitBandwidthInLan == to.Options.LimitBandwidthInLan {
|
||||
return true
|
||||
}
|
||||
|
||||
// The rate variables are in KiB/s in the config (despite the camel casing
|
||||
// of the name). We multiply by 1024 to get bytes/s.
|
||||
|
||||
if to.Options.MaxRecvKbps <= 0 {
|
||||
lim.read.SetLimit(rate.Inf)
|
||||
} else {
|
||||
lim.read.SetLimit(1024 * rate.Limit(to.Options.MaxRecvKbps))
|
||||
}
|
||||
|
||||
if to.Options.MaxSendKbps < 0 {
|
||||
lim.write.SetLimit(rate.Inf)
|
||||
} else {
|
||||
lim.write.SetLimit(1024 * rate.Limit(to.Options.MaxSendKbps))
|
||||
}
|
||||
|
||||
lim.limitsLAN.set(to.Options.LimitBandwidthInLan)
|
||||
|
||||
sendLimitStr := "is unlimited"
|
||||
recvLimitStr := "is unlimited"
|
||||
if to.Options.MaxSendKbps > 0 {
|
||||
sendLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxSendKbps)
|
||||
}
|
||||
if to.Options.MaxRecvKbps > 0 {
|
||||
recvLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxRecvKbps)
|
||||
}
|
||||
l.Infof("Send rate %s, receive rate %s", sendLimitStr, recvLimitStr)
|
||||
|
||||
if to.Options.LimitBandwidthInLan {
|
||||
l.Infoln("Rate limits apply to LAN connections")
|
||||
} else {
|
||||
l.Infoln("Rate limits do not apply to LAN connections")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (lim *limiter) String() string {
|
||||
// required by config.Committer interface
|
||||
return "connections.limiter"
|
||||
}
|
||||
|
||||
// limitedReader is a rate limited io.Reader
|
||||
type limitedReader struct {
|
||||
reader io.Reader
|
||||
limiter *limiter
|
||||
isLAN bool
|
||||
}
|
||||
|
||||
func (r *limitedReader) Read(buf []byte) (int, error) {
|
||||
n, err := r.reader.Read(buf)
|
||||
if !r.isLAN || r.limiter.limitsLAN.get() {
|
||||
take(r.limiter.read, n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// limitedWriter is a rate limited io.Writer
|
||||
type limitedWriter struct {
|
||||
writer io.Writer
|
||||
limiter *limiter
|
||||
isLAN bool
|
||||
}
|
||||
|
||||
func (w *limitedWriter) Write(buf []byte) (int, error) {
|
||||
if !w.isLAN || w.limiter.limitsLAN.get() {
|
||||
take(w.limiter.write, len(buf))
|
||||
}
|
||||
return w.writer.Write(buf)
|
||||
}
|
||||
|
||||
// take is a utility function to consume tokens from a rate.Limiter. No call
|
||||
// to WaitN can be larger than the limiter burst size so we split it up into
|
||||
// several calls when necessary.
|
||||
func take(l *rate.Limiter, tokens int) {
|
||||
if tokens < limiterBurstSize {
|
||||
// This is the by far more common case so we get it out of the way
|
||||
// early.
|
||||
l.WaitN(context.TODO(), tokens)
|
||||
return
|
||||
}
|
||||
|
||||
for tokens > 0 {
|
||||
// Consume limiterBurstSize tokens at a time until we're done.
|
||||
if tokens > limiterBurstSize {
|
||||
l.WaitN(context.TODO(), limiterBurstSize)
|
||||
tokens -= limiterBurstSize
|
||||
} else {
|
||||
l.WaitN(context.TODO(), tokens)
|
||||
tokens = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
func (b *atomicBool) set(v bool) {
|
||||
if v {
|
||||
atomic.StoreInt32((*int32)(b), 1)
|
||||
} else {
|
||||
atomic.StoreInt32((*int32)(b), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *atomicBool) get() bool {
|
||||
return atomic.LoadInt32((*int32)(b)) != 0
|
||||
}
|
||||
@@ -45,6 +45,11 @@ func (d *relayDialer) Dial(id protocol.DeviceID, uri *url.URL) (internalConn, er
|
||||
return internalConn{}, err
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, d.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
var tc *tls.Conn
|
||||
if inv.ServerSocket {
|
||||
tc = tls.Server(conn, d.tlsCfg)
|
||||
|
||||
@@ -29,6 +29,7 @@ type relayListener struct {
|
||||
onAddressesChangedNotifier
|
||||
|
||||
uri *url.URL
|
||||
cfg *config.Wrapper
|
||||
tlsCfg *tls.Config
|
||||
conns chan internalConn
|
||||
factory listenerFactory
|
||||
@@ -79,6 +80,11 @@ func (t *relayListener) Serve() {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, t.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
var tc *tls.Conn
|
||||
if inv.ServerSocket {
|
||||
tc = tls.Server(conn, t.tlsCfg)
|
||||
@@ -170,6 +176,7 @@ type relayListenerFactory struct{}
|
||||
func (f *relayListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
return &relayListener{
|
||||
uri: uri,
|
||||
cfg: cfg,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: conns,
|
||||
factory: f,
|
||||
|
||||
@@ -10,12 +10,10 @@ import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
@@ -29,6 +27,7 @@ import (
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
|
||||
"github.com/thejerf/suture"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,10 +36,36 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
perDeviceWarningRate = 1.0 / (15 * 60) // Once per 15 minutes
|
||||
perDeviceWarningIntv = 15 * time.Minute
|
||||
tlsHandshakeTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// From go/src/crypto/tls/cipher_suites.go
|
||||
var tlsCipherSuiteNames = map[uint16]string{
|
||||
0x0005: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
0x000a: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
0x002f: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
0x0035: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
0x003c: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
0x009c: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
0x009d: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
0xc007: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
0xc009: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
0xc00a: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
0xc011: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
0xc012: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
0xc013: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
0xc014: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
0xc023: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
0xc027: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
0xc02f: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
0xc02b: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
0xc030: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
0xc02c: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
0xcca8: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
0xcca9: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// Service listens and dials all configured unconnected devices, via supported
|
||||
// dialers. Successful connections are handed to the model.
|
||||
type Service struct {
|
||||
@@ -54,8 +79,7 @@ type Service struct {
|
||||
bepProtocolName string
|
||||
tlsDefaultCommonName string
|
||||
lans []*net.IPNet
|
||||
writeRateLimit *ratelimit.Bucket
|
||||
readRateLimit *ratelimit.Bucket
|
||||
limiter *limiter
|
||||
natService *nat.Service
|
||||
natServiceToken *suture.ServiceToken
|
||||
|
||||
@@ -86,6 +110,7 @@ func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *
|
||||
bepProtocolName: bepProtocolName,
|
||||
tlsDefaultCommonName: tlsDefaultCommonName,
|
||||
lans: lans,
|
||||
limiter: newLimiter(cfg),
|
||||
natService: nat.NewService(myID, cfg),
|
||||
|
||||
listenersMut: sync.NewRWMutex(),
|
||||
@@ -109,17 +134,6 @@ func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *
|
||||
}
|
||||
cfg.Subscribe(service)
|
||||
|
||||
// The rate variables are in KiB/s in the UI (despite the camel casing
|
||||
// of the name). We multiply by 1024 here to get B/s.
|
||||
options := service.cfg.Options()
|
||||
if options.MaxSendKbps > 0 {
|
||||
service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps))
|
||||
}
|
||||
|
||||
if options.MaxRecvKbps > 0 {
|
||||
service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps))
|
||||
}
|
||||
|
||||
// There are several moving parts here; one routine per listening address
|
||||
// (handled in configuration changing) to handle incoming connections,
|
||||
// one routine to periodically attempt outgoing connections, one routine to
|
||||
@@ -253,27 +267,18 @@ next:
|
||||
continue next
|
||||
}
|
||||
|
||||
// If rate limiting is set, and based on the address we should
|
||||
// limit the connection, then we wrap it in a limiter.
|
||||
|
||||
limit := s.shouldLimit(c.RemoteAddr())
|
||||
|
||||
wr := io.Writer(c)
|
||||
if limit && s.writeRateLimit != nil {
|
||||
wr = NewWriteLimiter(c, s.writeRateLimit)
|
||||
}
|
||||
|
||||
rd := io.Reader(c)
|
||||
if limit && s.readRateLimit != nil {
|
||||
rd = NewReadLimiter(c, s.readRateLimit)
|
||||
}
|
||||
// Wrap the connection in rate limiters. The limiter itself will
|
||||
// keep up with config changes to the rate and whether or not LAN
|
||||
// connections are limited.
|
||||
isLAN := s.isLAN(c.RemoteAddr())
|
||||
wr := s.limiter.newWriteLimiter(c, isLAN)
|
||||
rd := s.limiter.newReadLimiter(c, isLAN)
|
||||
|
||||
name := fmt.Sprintf("%s-%s (%s)", c.LocalAddr(), c.RemoteAddr(), c.Type())
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
|
||||
modelConn := completeConn{c, protoConn}
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
l.Debugf("cipher suite: %04X in lan: %t", c.ConnectionState().CipherSuite, !limit)
|
||||
l.Infof("Established secure connection to %s at %s (%s)", remoteID, name, tlsCipherSuiteNames[c.ConnectionState().CipherSuite])
|
||||
|
||||
s.model.AddConnection(modelConn, hello)
|
||||
s.curConMut.Lock()
|
||||
@@ -318,8 +323,7 @@ func (s *Service) connect() {
|
||||
continue
|
||||
}
|
||||
|
||||
paused := s.model.IsPaused(deviceID)
|
||||
if paused {
|
||||
if deviceCfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -410,21 +414,17 @@ func (s *Service) connect() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) shouldLimit(addr net.Addr) bool {
|
||||
if s.cfg.Options().LimitBandwidthInLan {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) isLAN(addr net.Addr) bool {
|
||||
tcpaddr, ok := addr.(*net.TCPAddr)
|
||||
if !ok {
|
||||
return true
|
||||
return false
|
||||
}
|
||||
for _, lan := range s.lans {
|
||||
if lan.Contains(tcpaddr.IP) {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
}
|
||||
return !tcpaddr.IP.IsLoopback()
|
||||
return tcpaddr.IP.IsLoopback()
|
||||
}
|
||||
|
||||
func (s *Service) createListener(factory listenerFactory, uri *url.URL) bool {
|
||||
@@ -620,7 +620,7 @@ func urlsToStrings(urls []*url.URL) []string {
|
||||
return strings
|
||||
}
|
||||
|
||||
var warningLimiters = make(map[protocol.DeviceID]*ratelimit.Bucket)
|
||||
var warningLimiters = make(map[protocol.DeviceID]*rate.Limiter)
|
||||
var warningLimitersMut = sync.NewMutex()
|
||||
|
||||
func warningFor(dev protocol.DeviceID, msg string) {
|
||||
@@ -628,10 +628,10 @@ func warningFor(dev protocol.DeviceID, msg string) {
|
||||
defer warningLimitersMut.Unlock()
|
||||
lim, ok := warningLimiters[dev]
|
||||
if !ok {
|
||||
lim = ratelimit.NewBucketWithRate(perDeviceWarningRate, 1)
|
||||
lim = rate.NewLimiter(rate.Every(perDeviceWarningIntv), 1)
|
||||
warningLimiters[dev] = lim
|
||||
}
|
||||
if lim.TakeAvailable(1) == 1 {
|
||||
if lim.Allow() {
|
||||
l.Warnln(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +116,6 @@ type Model interface {
|
||||
protocol.Model
|
||||
AddConnection(conn Connection, hello protocol.HelloResult)
|
||||
ConnectedTo(remoteID protocol.DeviceID) bool
|
||||
IsPaused(remoteID protocol.DeviceID) bool
|
||||
OnHello(protocol.DeviceID, net.Addr, protocol.HelloResult) error
|
||||
GetHello(protocol.DeviceID) protocol.HelloIntf
|
||||
}
|
||||
|
||||
@@ -39,6 +39,16 @@ func (d *tcpDialer) Dial(id protocol.DeviceID, uri *url.URL) (internalConn, erro
|
||||
return internalConn{}, err
|
||||
}
|
||||
|
||||
err = dialer.SetTCPOptions(conn)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, d.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
tc := tls.Client(conn, d.tlsCfg)
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
|
||||
@@ -30,6 +30,7 @@ type tcpListener struct {
|
||||
onAddressesChangedNotifier
|
||||
|
||||
uri *url.URL
|
||||
cfg *config.Wrapper
|
||||
tlsCfg *tls.Config
|
||||
stop chan struct{}
|
||||
conns chan internalConn
|
||||
@@ -107,6 +108,11 @@ func (t *tcpListener) Serve() {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, t.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
tc := tls.Server(conn, t.tlsCfg)
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
@@ -176,6 +182,7 @@ type tcpListenerFactory struct{}
|
||||
func (f *tcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
return &tcpListener{
|
||||
uri: fixupPort(uri),
|
||||
cfg: cfg,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: conns,
|
||||
natService: natService,
|
||||
@@ -192,7 +199,7 @@ func fixupPort(uri *url.URL) *url.URL {
|
||||
copyURI := *uri
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
|
||||
if err != nil && strings.Contains(err.Error(), "missing port") {
|
||||
// addr is on the form "1.2.3.4"
|
||||
copyURI.Host = net.JoinHostPort(uri.Host, "22000")
|
||||
} else if err == nil && port == "" {
|
||||
|
||||
@@ -209,7 +209,7 @@ func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
|
||||
}
|
||||
o[0] = KeyTypeBlock
|
||||
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
|
||||
copy(o[keyPrefixLen+keyFolderLen:], []byte(hash))
|
||||
copy(o[keyPrefixLen+keyFolderLen:], hash)
|
||||
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
|
||||
return o
|
||||
}
|
||||
|
||||
@@ -58,10 +58,7 @@ func setup() (*Instance, *BlockFinder) {
|
||||
func dbEmpty(db *Instance) bool {
|
||||
iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
|
||||
defer iter.Release()
|
||||
if iter.Next() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return !iter.Next()
|
||||
}
|
||||
|
||||
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore // this is a really tedious test for an old issue
|
||||
// this is a really tedious test for an old issue
|
||||
// +build ignore
|
||||
|
||||
package db_test
|
||||
|
||||
|
||||
@@ -526,9 +526,9 @@ func (db *Instance) ListFolders() []string {
|
||||
|
||||
folderExists := make(map[string]bool)
|
||||
for dbi.Next() {
|
||||
folder := string(db.globalKeyFolder(dbi.Key()))
|
||||
if !folderExists[folder] {
|
||||
folderExists[folder] = true
|
||||
folder, ok := db.globalKeyFolder(dbi.Key())
|
||||
if ok && !folderExists[string(folder)] {
|
||||
folderExists[string(folder)] = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -558,8 +558,8 @@ func (db *Instance) dropFolder(folder []byte) {
|
||||
// Remove all items related to the given folder from the global bucket
|
||||
dbi = t.NewIterator(util.BytesPrefix([]byte{KeyTypeGlobal}), nil)
|
||||
for dbi.Next() {
|
||||
itemFolder := db.globalKeyFolder(dbi.Key())
|
||||
if bytes.Equal(folder, itemFolder) {
|
||||
itemFolder, ok := db.globalKeyFolder(dbi.Key())
|
||||
if ok && bytes.Equal(folder, itemFolder) {
|
||||
db.Delete(dbi.Key(), nil)
|
||||
}
|
||||
}
|
||||
@@ -635,7 +635,7 @@ func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte
|
||||
k[0] = KeyTypeDevice
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
|
||||
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], []byte(file))
|
||||
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], file)
|
||||
return k[:reqLen]
|
||||
}
|
||||
|
||||
@@ -670,7 +670,7 @@ func (db *Instance) globalKey(folder, file []byte) []byte {
|
||||
k := make([]byte, keyPrefixLen+keyFolderLen+len(file))
|
||||
k[0] = KeyTypeGlobal
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
||||
copy(k[keyPrefixLen+keyFolderLen:], []byte(file))
|
||||
copy(k[keyPrefixLen+keyFolderLen:], file)
|
||||
return k
|
||||
}
|
||||
|
||||
@@ -680,12 +680,8 @@ func (db *Instance) globalKeyName(key []byte) []byte {
|
||||
}
|
||||
|
||||
// globalKeyFolder returns the folder name from the key
|
||||
func (db *Instance) globalKeyFolder(key []byte) []byte {
|
||||
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
||||
if !ok {
|
||||
panic("bug: lookup of nonexistent folder ID")
|
||||
}
|
||||
return folder
|
||||
func (db *Instance) globalKeyFolder(key []byte) ([]byte, bool) {
|
||||
return db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
|
||||
}
|
||||
|
||||
func (db *Instance) getIndexID(device, folder []byte) protocol.IndexID {
|
||||
|
||||
@@ -45,7 +45,10 @@ func TestGlobalKey(t *testing.T) {
|
||||
|
||||
key := db.globalKey(fld, name)
|
||||
|
||||
fld2 := db.globalKeyFolder(key)
|
||||
fld2, ok := db.globalKeyFolder(key)
|
||||
if !ok {
|
||||
t.Error("should have been found")
|
||||
}
|
||||
if !bytes.Equal(fld2, fld) {
|
||||
t.Errorf("wrong folder %q != %q", fld2, fld)
|
||||
}
|
||||
@@ -53,4 +56,9 @@ func TestGlobalKey(t *testing.T) {
|
||||
if !bytes.Equal(name2, name) {
|
||||
t.Errorf("wrong name %q != %q", name2, name)
|
||||
}
|
||||
|
||||
_, ok = db.globalKeyFolder([]byte{1, 2, 3, 4, 5})
|
||||
if ok {
|
||||
t.Error("should not have been found")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../script/protofmt.go structs.proto
|
||||
//go:generate protoc -I ../../../../../ -I ../../../../gogo/protobuf/protobuf -I . --gogofast_out=. structs.proto
|
||||
//go:generate protoc -I ../../../../../ -I ../../vendor/ -I ../../vendor/github.com/gogo/protobuf/protobuf -I . --gogofast_out=. structs.proto
|
||||
|
||||
package db
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import protocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
import github_com_syncthing_syncthing_lib_protocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -30,7 +32,9 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.GoGoProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type FileVersion struct {
|
||||
Version protocol.Vector `protobuf:"bytes,1,opt,name=version" json:"version"`
|
||||
@@ -52,18 +56,19 @@ func (*VersionList) Descriptor() ([]byte, []int) { return fileDescriptorStructs,
|
||||
|
||||
// Must be the same as FileInfo but without the blocks field
|
||||
type FileInfoTruncated struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Type protocol.FileInfoType `protobuf:"varint,2,opt,name=type,proto3,enum=protocol.FileInfoType" json:"type,omitempty"`
|
||||
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modified_s,omitempty"`
|
||||
ModifiedNs int32 `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3" json:"modified_ns,omitempty"`
|
||||
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
|
||||
Invalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid,omitempty"`
|
||||
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"no_permissions,omitempty"`
|
||||
Version protocol.Vector `protobuf:"bytes,9,opt,name=version" json:"version"`
|
||||
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence,omitempty"`
|
||||
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Type protocol.FileInfoType `protobuf:"varint,2,opt,name=type,proto3,enum=protocol.FileInfoType" json:"type,omitempty"`
|
||||
Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modified_s,omitempty"`
|
||||
ModifiedNs int32 `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3" json:"modified_ns,omitempty"`
|
||||
ModifiedBy github_com_syncthing_syncthing_lib_protocol.ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=github.com/syncthing/syncthing/lib/protocol.ShortID" json:"modified_by"`
|
||||
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
|
||||
Invalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid,omitempty"`
|
||||
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"no_permissions,omitempty"`
|
||||
Version protocol.Vector `protobuf:"bytes,9,opt,name=version" json:"version"`
|
||||
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence,omitempty"`
|
||||
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FileInfoTruncated) Reset() { *m = FileInfoTruncated{} }
|
||||
@@ -75,59 +80,59 @@ func init() {
|
||||
proto.RegisterType((*VersionList)(nil), "db.VersionList")
|
||||
proto.RegisterType((*FileInfoTruncated)(nil), "db.FileInfoTruncated")
|
||||
}
|
||||
func (m *FileVersion) Marshal() (data []byte, err error) {
|
||||
func (m *FileVersion) Marshal() (dAtA []byte, err error) {
|
||||
size := m.ProtoSize()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *FileVersion) MarshalTo(data []byte) (int, error) {
|
||||
func (m *FileVersion) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Version.ProtoSize()))
|
||||
n1, err := m.Version.MarshalTo(data[i:])
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Version.ProtoSize()))
|
||||
n1, err := m.Version.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
if len(m.Device) > 0 {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(len(m.Device)))
|
||||
i += copy(data[i:], m.Device)
|
||||
i = encodeVarintStructs(dAtA, i, uint64(len(m.Device)))
|
||||
i += copy(dAtA[i:], m.Device)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *VersionList) Marshal() (data []byte, err error) {
|
||||
func (m *VersionList) Marshal() (dAtA []byte, err error) {
|
||||
size := m.ProtoSize()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *VersionList) MarshalTo(data []byte) (int, error) {
|
||||
func (m *VersionList) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Versions) > 0 {
|
||||
for _, msg := range m.Versions {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(msg.ProtoSize()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
i = encodeVarintStructs(dAtA, i, uint64(msg.ProtoSize()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -137,131 +142,136 @@ func (m *VersionList) MarshalTo(data []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *FileInfoTruncated) Marshal() (data []byte, err error) {
|
||||
func (m *FileInfoTruncated) Marshal() (dAtA []byte, err error) {
|
||||
size := m.ProtoSize()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *FileInfoTruncated) MarshalTo(data []byte) (int, error) {
|
||||
func (m *FileInfoTruncated) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
i = encodeVarintStructs(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if m.Type != 0 {
|
||||
data[i] = 0x10
|
||||
dAtA[i] = 0x10
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Type))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Type))
|
||||
}
|
||||
if m.Size != 0 {
|
||||
data[i] = 0x18
|
||||
dAtA[i] = 0x18
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Size))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Size))
|
||||
}
|
||||
if m.Permissions != 0 {
|
||||
data[i] = 0x20
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Permissions))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Permissions))
|
||||
}
|
||||
if m.ModifiedS != 0 {
|
||||
data[i] = 0x28
|
||||
dAtA[i] = 0x28
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.ModifiedS))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedS))
|
||||
}
|
||||
if m.Deleted {
|
||||
data[i] = 0x30
|
||||
dAtA[i] = 0x30
|
||||
i++
|
||||
if m.Deleted {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.Invalid {
|
||||
data[i] = 0x38
|
||||
dAtA[i] = 0x38
|
||||
i++
|
||||
if m.Invalid {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.NoPermissions {
|
||||
data[i] = 0x40
|
||||
dAtA[i] = 0x40
|
||||
i++
|
||||
if m.NoPermissions {
|
||||
data[i] = 1
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
data[i] = 0x4a
|
||||
dAtA[i] = 0x4a
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Version.ProtoSize()))
|
||||
n2, err := m.Version.MarshalTo(data[i:])
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Version.ProtoSize()))
|
||||
n2, err := m.Version.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
if m.Sequence != 0 {
|
||||
data[i] = 0x50
|
||||
dAtA[i] = 0x50
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.Sequence))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.Sequence))
|
||||
}
|
||||
if m.ModifiedNs != 0 {
|
||||
data[i] = 0x58
|
||||
dAtA[i] = 0x58
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.ModifiedNs))
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
dAtA[i] = 0x60
|
||||
i++
|
||||
i = encodeVarintStructs(dAtA, i, uint64(m.ModifiedBy))
|
||||
}
|
||||
if len(m.SymlinkTarget) > 0 {
|
||||
data[i] = 0x8a
|
||||
dAtA[i] = 0x8a
|
||||
i++
|
||||
data[i] = 0x1
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(len(m.SymlinkTarget)))
|
||||
i += copy(data[i:], m.SymlinkTarget)
|
||||
i = encodeVarintStructs(dAtA, i, uint64(len(m.SymlinkTarget)))
|
||||
i += copy(dAtA[i:], m.SymlinkTarget)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Structs(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Structs(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Structs(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Structs(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintStructs(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintStructs(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *FileVersion) ProtoSize() (n int) {
|
||||
@@ -324,6 +334,9 @@ func (m *FileInfoTruncated) ProtoSize() (n int) {
|
||||
if m.ModifiedNs != 0 {
|
||||
n += 1 + sovStructs(uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
n += 1 + sovStructs(uint64(m.ModifiedBy))
|
||||
}
|
||||
l = len(m.SymlinkTarget)
|
||||
if l > 0 {
|
||||
n += 2 + l + sovStructs(uint64(l))
|
||||
@@ -344,8 +357,8 @@ func sovStructs(x uint64) (n int) {
|
||||
func sozStructs(x uint64) (n int) {
|
||||
return sovStructs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *FileVersion) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -357,7 +370,7 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -385,7 +398,7 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -399,7 +412,7 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Version.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@@ -415,7 +428,7 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -429,14 +442,14 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Device = append(m.Device[:0], data[iNdEx:postIndex]...)
|
||||
m.Device = append(m.Device[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Device == nil {
|
||||
m.Device = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipStructs(data[iNdEx:])
|
||||
skippy, err := skipStructs(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -455,8 +468,8 @@ func (m *FileVersion) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *VersionList) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *VersionList) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -468,7 +481,7 @@ func (m *VersionList) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -496,7 +509,7 @@ func (m *VersionList) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -511,13 +524,13 @@ func (m *VersionList) Unmarshal(data []byte) error {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Versions = append(m.Versions, FileVersion{})
|
||||
if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipStructs(data[iNdEx:])
|
||||
skippy, err := skipStructs(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -536,8 +549,8 @@ func (m *VersionList) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *FileInfoTruncated) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -549,7 +562,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -577,7 +590,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -592,7 +605,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(data[iNdEx:postIndex])
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
@@ -606,7 +619,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= (protocol.FileInfoType(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -625,7 +638,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Size |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -644,7 +657,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Permissions |= (uint32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -663,7 +676,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ModifiedS |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -682,7 +695,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -702,7 +715,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -722,7 +735,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -742,7 +755,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -756,7 +769,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Version.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@@ -772,7 +785,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Sequence |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -791,13 +804,32 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ModifiedNs |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 12:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
|
||||
}
|
||||
m.ModifiedBy = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowStructs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ModifiedBy |= (github_com_syncthing_syncthing_lib_protocol.ShortID(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 17:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SymlinkTarget", wireType)
|
||||
@@ -810,7 +842,7 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -825,11 +857,11 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.SymlinkTarget = string(data[iNdEx:postIndex])
|
||||
m.SymlinkTarget = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipStructs(data[iNdEx:])
|
||||
skippy, err := skipStructs(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -848,8 +880,8 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipStructs(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipStructs(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -860,7 +892,7 @@ func skipStructs(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -878,7 +910,7 @@ func skipStructs(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -895,7 +927,7 @@ func skipStructs(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -918,7 +950,7 @@ func skipStructs(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -929,7 +961,7 @@ func skipStructs(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipStructs(data[start:])
|
||||
next, err := skipStructs(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -953,34 +985,39 @@ var (
|
||||
ErrIntOverflowStructs = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("structs.proto", fileDescriptorStructs) }
|
||||
|
||||
var fileDescriptorStructs = []byte{
|
||||
// 442 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x51, 0x5d, 0x8b, 0xd3, 0x40,
|
||||
0x14, 0x6d, 0xb7, 0xd9, 0x7e, 0xdc, 0xd8, 0xd5, 0x1d, 0x64, 0x19, 0x0a, 0xb6, 0x4b, 0x41, 0x10,
|
||||
0xc1, 0x54, 0x57, 0x7c, 0xf1, 0x71, 0x1f, 0x16, 0x04, 0x11, 0x19, 0x97, 0xf5, 0xb1, 0x34, 0x99,
|
||||
0xdb, 0xec, 0x60, 0x32, 0x53, 0x33, 0x93, 0x42, 0xfd, 0x25, 0xbe, 0xb9, 0x3f, 0xa7, 0x8f, 0xfe,
|
||||
0x02, 0xd1, 0xfa, 0x47, 0x9c, 0xce, 0xa4, 0x31, 0x8f, 0xfb, 0x10, 0xb8, 0xe7, 0x9e, 0x73, 0xee,
|
||||
0x3d, 0x93, 0x0b, 0x43, 0x6d, 0x8a, 0x32, 0x31, 0x3a, 0x5a, 0x15, 0xca, 0x28, 0x72, 0xc4, 0xe3,
|
||||
0xd1, 0x8b, 0x54, 0x98, 0xdb, 0x32, 0x8e, 0x12, 0x95, 0xcf, 0x52, 0x95, 0xaa, 0x99, 0xa3, 0xe2,
|
||||
0x72, 0xe9, 0x90, 0x03, 0xae, 0xf2, 0x96, 0xd1, 0x9b, 0x86, 0x5c, 0x6f, 0x64, 0x62, 0x6e, 0x85,
|
||||
0x4c, 0x1b, 0x55, 0x26, 0x62, 0x3f, 0x21, 0x51, 0xd9, 0x2c, 0xc6, 0x95, 0xb7, 0x4d, 0x3f, 0x43,
|
||||
0x78, 0x25, 0x32, 0xbc, 0xc1, 0x42, 0x0b, 0x25, 0xc9, 0x4b, 0xe8, 0xad, 0x7d, 0x49, 0xdb, 0xe7,
|
||||
0xed, 0x67, 0xe1, 0xc5, 0xa3, 0xe8, 0x60, 0x8a, 0x6e, 0x30, 0x31, 0xaa, 0xb8, 0x0c, 0xb6, 0xbf,
|
||||
0x26, 0x2d, 0x76, 0x90, 0x91, 0x33, 0xe8, 0x72, 0x5c, 0x8b, 0x04, 0xe9, 0x91, 0x35, 0x3c, 0x60,
|
||||
0x15, 0x9a, 0x5e, 0x41, 0x58, 0x0d, 0x7d, 0x2f, 0xb4, 0x21, 0xaf, 0xa0, 0x5f, 0x39, 0xb4, 0x9d,
|
||||
0xdc, 0xb1, 0x93, 0x1f, 0x46, 0x3c, 0x8e, 0x1a, 0xbb, 0xab, 0xc1, 0xb5, 0xec, 0x6d, 0xf0, 0xfd,
|
||||
0x6e, 0xd2, 0x9a, 0xfe, 0xe8, 0xc0, 0xe9, 0x5e, 0xf5, 0x4e, 0x2e, 0xd5, 0x75, 0x51, 0xca, 0x64,
|
||||
0x61, 0x90, 0x13, 0x02, 0x81, 0x5c, 0xe4, 0xe8, 0x42, 0x0e, 0x98, 0xab, 0xc9, 0x73, 0x08, 0xcc,
|
||||
0x66, 0xe5, 0x73, 0x9c, 0x5c, 0x9c, 0xfd, 0x0f, 0x5e, 0xdb, 0x2d, 0xcb, 0x9c, 0x66, 0xef, 0xd7,
|
||||
0xe2, 0x1b, 0xd2, 0x8e, 0xd5, 0x76, 0x98, 0xab, 0xc9, 0x39, 0x84, 0x2b, 0x2c, 0x72, 0xa1, 0x7d,
|
||||
0xca, 0xc0, 0x52, 0x43, 0xd6, 0x6c, 0x91, 0x27, 0x00, 0xb9, 0xe2, 0x62, 0x29, 0x90, 0xcf, 0x35,
|
||||
0x3d, 0x76, 0xde, 0xc1, 0xa1, 0xf3, 0x89, 0x50, 0xe8, 0x71, 0xcc, 0xd0, 0xe6, 0xa3, 0x5d, 0xcb,
|
||||
0xf5, 0xd9, 0x01, 0xee, 0x19, 0x21, 0xd7, 0x8b, 0x4c, 0x70, 0xda, 0xf3, 0x4c, 0x05, 0xc9, 0x53,
|
||||
0x38, 0x91, 0x6a, 0xde, 0xdc, 0xdb, 0x77, 0x82, 0xa1, 0x54, 0x1f, 0x1b, 0x9b, 0x1b, 0x77, 0x19,
|
||||
0xdc, 0xef, 0x2e, 0x23, 0xe8, 0x6b, 0xfc, 0x5a, 0xa2, 0xb4, 0x97, 0x01, 0x97, 0xb4, 0xc6, 0x64,
|
||||
0x02, 0x61, 0xfd, 0x0e, 0xbb, 0x31, 0xb4, 0xf4, 0x31, 0xab, 0x9f, 0xf6, 0x41, 0xef, 0x53, 0xe9,
|
||||
0x4d, 0x9e, 0x09, 0xf9, 0x65, 0x6e, 0x16, 0x45, 0x8a, 0x86, 0x9e, 0xba, 0x1f, 0x3d, 0xac, 0xba,
|
||||
0xd7, 0xae, 0xe9, 0x2f, 0x74, 0xf9, 0x78, 0xfb, 0x67, 0xdc, 0xda, 0xee, 0xc6, 0xed, 0x9f, 0xf6,
|
||||
0xfb, 0xbd, 0x1b, 0xb7, 0xee, 0xfe, 0x8e, 0xdb, 0x71, 0xd7, 0xe5, 0x7b, 0xfd, 0x2f, 0x00, 0x00,
|
||||
0xff, 0xff, 0xb1, 0x2f, 0x12, 0xb6, 0xda, 0x02, 0x00, 0x00,
|
||||
// 483 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4f, 0x6b, 0xdb, 0x4e,
|
||||
0x10, 0xf5, 0xc6, 0x8a, 0xff, 0xac, 0xe2, 0xfc, 0x7e, 0x59, 0x4a, 0x58, 0x0c, 0x95, 0x85, 0xa1,
|
||||
0x20, 0x0a, 0x95, 0x5b, 0x87, 0x5e, 0xda, 0x9b, 0x29, 0x81, 0x40, 0x29, 0x45, 0x09, 0xe9, 0xa5,
|
||||
0x60, 0x2c, 0x69, 0x2c, 0x2f, 0x95, 0x76, 0x55, 0xed, 0xca, 0xa0, 0x7e, 0x92, 0x1e, 0xf3, 0x71,
|
||||
0x7c, 0xec, 0xb9, 0x87, 0xd0, 0xba, 0x9f, 0xa3, 0x50, 0xb4, 0x92, 0x15, 0x1d, 0xdb, 0xdb, 0xbc,
|
||||
0xd9, 0xf7, 0xe6, 0xbd, 0x61, 0x16, 0x8f, 0xa4, 0xca, 0xf2, 0x40, 0x49, 0x37, 0xcd, 0x84, 0x12,
|
||||
0xe4, 0x28, 0xf4, 0xc7, 0xcf, 0x22, 0xa6, 0x36, 0xb9, 0xef, 0x06, 0x22, 0x99, 0x45, 0x22, 0x12,
|
||||
0x33, 0xfd, 0xe4, 0xe7, 0x6b, 0x8d, 0x34, 0xd0, 0x55, 0x25, 0x19, 0xbf, 0x6c, 0xd1, 0x65, 0xc1,
|
||||
0x03, 0xb5, 0x61, 0x3c, 0x6a, 0x55, 0x31, 0xf3, 0xab, 0x09, 0x81, 0x88, 0x67, 0x3e, 0xa4, 0x95,
|
||||
0x6c, 0xfa, 0x01, 0x9b, 0x97, 0x2c, 0x86, 0x5b, 0xc8, 0x24, 0x13, 0x9c, 0x3c, 0xc7, 0xfd, 0x6d,
|
||||
0x55, 0x52, 0x64, 0x23, 0xc7, 0x9c, 0xff, 0xef, 0x1e, 0x44, 0xee, 0x2d, 0x04, 0x4a, 0x64, 0x0b,
|
||||
0x63, 0x77, 0x3f, 0xe9, 0x78, 0x07, 0x1a, 0x39, 0xc7, 0xbd, 0x10, 0xb6, 0x2c, 0x00, 0x7a, 0x64,
|
||||
0x23, 0xe7, 0xc4, 0xab, 0xd1, 0xf4, 0x12, 0x9b, 0xf5, 0xd0, 0xb7, 0x4c, 0x2a, 0xf2, 0x02, 0x0f,
|
||||
0x6a, 0x85, 0xa4, 0xc8, 0xee, 0x3a, 0xe6, 0xfc, 0x3f, 0x37, 0xf4, 0xdd, 0x96, 0x77, 0x3d, 0xb8,
|
||||
0xa1, 0xbd, 0x32, 0xbe, 0xde, 0x4d, 0x3a, 0xd3, 0xdf, 0x5d, 0x7c, 0x56, 0xb2, 0xae, 0xf8, 0x5a,
|
||||
0xdc, 0x64, 0x39, 0x0f, 0x56, 0x0a, 0x42, 0x42, 0xb0, 0xc1, 0x57, 0x09, 0xe8, 0x90, 0x43, 0x4f,
|
||||
0xd7, 0xe4, 0x29, 0x36, 0x54, 0x91, 0x56, 0x39, 0x4e, 0xe7, 0xe7, 0x0f, 0xc1, 0x1b, 0x79, 0x91,
|
||||
0x82, 0xa7, 0x39, 0xa5, 0x5e, 0xb2, 0x2f, 0x40, 0xbb, 0x36, 0x72, 0xba, 0x9e, 0xae, 0x89, 0x8d,
|
||||
0xcd, 0x14, 0xb2, 0x84, 0xc9, 0x2a, 0xa5, 0x61, 0x23, 0x67, 0xe4, 0xb5, 0x5b, 0xe4, 0x31, 0xc6,
|
||||
0x89, 0x08, 0xd9, 0x9a, 0x41, 0xb8, 0x94, 0xf4, 0x58, 0x6b, 0x87, 0x87, 0xce, 0x35, 0xa1, 0xb8,
|
||||
0x1f, 0x42, 0x0c, 0x0a, 0x42, 0xda, 0xb3, 0x91, 0x33, 0xf0, 0x0e, 0xb0, 0x7c, 0x61, 0x7c, 0xbb,
|
||||
0x8a, 0x59, 0x48, 0xfb, 0xd5, 0x4b, 0x0d, 0xc9, 0x13, 0x7c, 0xca, 0xc5, 0xb2, 0xed, 0x3b, 0xd0,
|
||||
0x84, 0x11, 0x17, 0xef, 0x5b, 0xce, 0xad, 0xbb, 0x0c, 0xff, 0xee, 0x2e, 0x63, 0x3c, 0x90, 0xf0,
|
||||
0x39, 0x07, 0x1e, 0x00, 0xc5, 0x3a, 0x69, 0x83, 0xc9, 0x04, 0x9b, 0xcd, 0x1e, 0x5c, 0x52, 0xd3,
|
||||
0x46, 0xce, 0xb1, 0xd7, 0xac, 0xf6, 0x4e, 0x92, 0x8f, 0x2d, 0x82, 0x5f, 0xd0, 0x13, 0x1b, 0x39,
|
||||
0xc6, 0xe2, 0x75, 0x69, 0xf0, 0xfd, 0x7e, 0x72, 0xf1, 0x0f, 0x3f, 0xcd, 0xbd, 0xde, 0x88, 0x4c,
|
||||
0x5d, 0xbd, 0x79, 0x98, 0xbe, 0x28, 0xca, 0x9d, 0x65, 0x91, 0xc4, 0x8c, 0x7f, 0x5a, 0xaa, 0x55,
|
||||
0x16, 0x81, 0xa2, 0x67, 0xfa, 0x8c, 0xa3, 0xba, 0x7b, 0xa3, 0x9b, 0xd5, 0xfd, 0x17, 0x8f, 0x76,
|
||||
0x3f, 0xad, 0xce, 0x6e, 0x6f, 0xa1, 0x6f, 0x7b, 0x0b, 0xfd, 0xd8, 0x5b, 0x9d, 0xbb, 0x5f, 0x16,
|
||||
0xf2, 0x7b, 0xda, 0xe0, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xcd, 0xe3, 0xfd, 0x38,
|
||||
0x03, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ message FileInfoTruncated {
|
||||
uint32 permissions = 4;
|
||||
int64 modified_s = 5;
|
||||
int32 modified_ns = 11;
|
||||
uint64 modified_by = 12 [(gogoproto.customtype) = "github.com/syncthing/syncthing/lib/protocol.ShortID", (gogoproto.nullable) = false];
|
||||
bool deleted = 6;
|
||||
bool invalid = 7;
|
||||
bool no_permissions = 8;
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// Dial tries dialing via proxy if a proxy is configured, and falls back to
|
||||
@@ -75,3 +78,22 @@ func SetTCPOptions(conn net.Conn) error {
|
||||
return fmt.Errorf("unknown connection type %T", conn)
|
||||
}
|
||||
}
|
||||
|
||||
func SetTrafficClass(conn net.Conn, class int) error {
|
||||
switch conn := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
e1 := ipv4.NewConn(conn).SetTOS(class)
|
||||
e2 := ipv6.NewConn(conn).SetTrafficClass(class)
|
||||
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
return e2
|
||||
|
||||
case dialerConn:
|
||||
return SetTrafficClass(conn.Conn, class)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown connection type %T", conn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../script/protofmt.go local.proto
|
||||
//go:generate protoc -I ../../../../../ -I ../../../../gogo/protobuf/protobuf -I . --gogofast_out=. local.proto
|
||||
//go:generate protoc -I ../../vendor/ -I ../../vendor/github.com/gogo/protobuf/protobuf -I . --gogofast_out=. local.proto
|
||||
|
||||
package discover
|
||||
|
||||
@@ -51,7 +51,7 @@ func NewLocal(id protocol.DeviceID, addr string, addrList AddressLister) (Finder
|
||||
Supervisor: suture.NewSimple("local"),
|
||||
myID: id,
|
||||
addrList: addrList,
|
||||
localBcastTick: time.Tick(BroadcastInterval),
|
||||
localBcastTick: time.NewTicker(BroadcastInterval).C,
|
||||
forcedBcastTick: make(chan time.Time),
|
||||
localBcastStart: time.Now(),
|
||||
cache: newCache(),
|
||||
|
||||
@@ -29,7 +29,9 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.GoGoProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Announce struct {
|
||||
ID github_com_syncthing_syncthing_lib_protocol.DeviceID `protobuf:"bytes,1,opt,name=id,proto3,customtype=github.com/syncthing/syncthing/lib/protocol.DeviceID" json:"id"`
|
||||
@@ -45,77 +47,77 @@ func (*Announce) Descriptor() ([]byte, []int) { return fileDescriptorLocal, []in
|
||||
func init() {
|
||||
proto.RegisterType((*Announce)(nil), "discover.Announce")
|
||||
}
|
||||
func (m *Announce) Marshal() (data []byte, err error) {
|
||||
func (m *Announce) Marshal() (dAtA []byte, err error) {
|
||||
size := m.ProtoSize()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Announce) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Announce) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintLocal(data, i, uint64(m.ID.ProtoSize()))
|
||||
n1, err := m.ID.MarshalTo(data[i:])
|
||||
i = encodeVarintLocal(dAtA, i, uint64(m.ID.ProtoSize()))
|
||||
n1, err := m.ID.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, s := range m.Addresses {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
data[i] = uint8(l)
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(data[i:], s)
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.InstanceID != 0 {
|
||||
data[i] = 0x18
|
||||
dAtA[i] = 0x18
|
||||
i++
|
||||
i = encodeVarintLocal(data, i, uint64(m.InstanceID))
|
||||
i = encodeVarintLocal(dAtA, i, uint64(m.InstanceID))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Local(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Local(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Local(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Local(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintLocal(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintLocal(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Announce) ProtoSize() (n int) {
|
||||
@@ -148,8 +150,8 @@ func sovLocal(x uint64) (n int) {
|
||||
func sozLocal(x uint64) (n int) {
|
||||
return sovLocal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Announce) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Announce) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@@ -161,7 +163,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -189,7 +191,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -203,7 +205,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.ID.Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
@@ -219,7 +221,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -234,7 +236,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, string(data[iNdEx:postIndex]))
|
||||
m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
@@ -248,7 +250,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.InstanceID |= (int64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -257,7 +259,7 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipLocal(data[iNdEx:])
|
||||
skippy, err := skipLocal(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,8 +278,8 @@ func (m *Announce) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipLocal(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipLocal(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@@ -288,7 +290,7 @@ func skipLocal(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -306,7 +308,7 @@ func skipLocal(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -323,7 +325,7 @@ func skipLocal(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -346,7 +348,7 @@ func skipLocal(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@@ -357,7 +359,7 @@ func skipLocal(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipLocal(data[start:])
|
||||
next, err := skipLocal(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -381,21 +383,24 @@ var (
|
||||
ErrIntOverflowLocal = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("local.proto", fileDescriptorLocal) }
|
||||
|
||||
var fileDescriptorLocal = []byte{
|
||||
// 235 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0xc9, 0x4f, 0x4e,
|
||||
0xcc, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x48, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b,
|
||||
0x2d, 0x92, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf,
|
||||
0x4f, 0xcf, 0xd7, 0x07, 0x2b, 0x48, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x51,
|
||||
0x69, 0x2d, 0x23, 0x17, 0x87, 0x63, 0x5e, 0x5e, 0x7e, 0x69, 0x5e, 0x72, 0xaa, 0x50, 0x10, 0x17,
|
||||
0x53, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x8f, 0x93, 0xd3, 0x89, 0x7b, 0xf2, 0x0c, 0xb7,
|
||||
0xee, 0xc9, 0x9b, 0x20, 0x99, 0x57, 0x5c, 0x99, 0x97, 0x5c, 0x92, 0x91, 0x99, 0x97, 0x8e, 0xc4,
|
||||
0xca, 0xc9, 0x4c, 0x82, 0x58, 0x91, 0x9c, 0x9f, 0xa3, 0xe7, 0x92, 0x5a, 0x96, 0x99, 0x9c, 0xea,
|
||||
0xe9, 0xf2, 0xe8, 0x9e, 0x3c, 0x93, 0xa7, 0x4b, 0x10, 0xd0, 0x34, 0x21, 0x19, 0x2e, 0xce, 0xc4,
|
||||
0x94, 0x94, 0xa2, 0xd4, 0xe2, 0xe2, 0xd4, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0xce, 0x20, 0x84,
|
||||
0x80, 0x90, 0x3e, 0x17, 0x77, 0x66, 0x5e, 0x71, 0x49, 0x22, 0xd0, 0xf6, 0x78, 0xa0, 0xd5, 0xcc,
|
||||
0x40, 0xab, 0x99, 0x9d, 0xf8, 0x80, 0xda, 0xb9, 0x3c, 0xa1, 0xc2, 0x40, 0x63, 0xb8, 0x60, 0x4a,
|
||||
0x3c, 0x53, 0x9c, 0x44, 0x4e, 0x3c, 0x94, 0x63, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0x02, 0x10, 0x3f,
|
||||
0x78, 0x24, 0xc7, 0xb0, 0xe0, 0xb1, 0x1c, 0x63, 0x12, 0x1b, 0xd8, 0x05, 0xc6, 0x80, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0xa4, 0x46, 0x4f, 0x13, 0x14, 0x01, 0x00, 0x00,
|
||||
// 241 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0x4f, 0x4e, 0x84, 0x30,
|
||||
0x14, 0xc6, 0x29, 0x24, 0x66, 0xa6, 0x63, 0x5c, 0x10, 0x17, 0xc4, 0x98, 0x42, 0x5c, 0xb1, 0x11,
|
||||
0x16, 0x7a, 0x01, 0x09, 0x9b, 0x6e, 0xb9, 0x80, 0x81, 0xb6, 0x32, 0x2f, 0xc1, 0x3e, 0x43, 0x61,
|
||||
0x12, 0x6f, 0xe3, 0x05, 0xbc, 0x07, 0x4b, 0xd7, 0x2e, 0x1a, 0xad, 0x17, 0x31, 0xe9, 0x68, 0x86,
|
||||
0xdd, 0xf7, 0xfd, 0xf2, 0x7b, 0x7f, 0xe8, 0x6e, 0x40, 0xd1, 0x0e, 0xc5, 0xcb, 0x88, 0x13, 0xc6,
|
||||
0x1b, 0x09, 0x46, 0xe0, 0x41, 0x8d, 0x57, 0xb7, 0x3d, 0x4c, 0xfb, 0xb9, 0x2b, 0x04, 0x3e, 0x97,
|
||||
0x3d, 0xf6, 0x58, 0x7a, 0xa1, 0x9b, 0x9f, 0x7c, 0xf3, 0xc5, 0xa7, 0xe3, 0xe0, 0xcd, 0x3b, 0xa1,
|
||||
0x9b, 0x07, 0xad, 0x71, 0xd6, 0x42, 0xc5, 0x0d, 0x0d, 0x41, 0x26, 0x24, 0x23, 0xf9, 0x79, 0x55,
|
||||
0x2d, 0x36, 0x0d, 0x3e, 0x6d, 0x7a, 0xbf, 0xda, 0x67, 0x5e, 0xb5, 0x98, 0xf6, 0xa0, 0xfb, 0x55,
|
||||
0x1a, 0xa0, 0x3b, 0x9e, 0x10, 0x38, 0x14, 0xb5, 0x3a, 0x80, 0x50, 0xbc, 0x76, 0x36, 0x0d, 0x79,
|
||||
0xdd, 0x84, 0x20, 0xe3, 0x6b, 0xba, 0x6d, 0xa5, 0x1c, 0x95, 0x31, 0xca, 0x24, 0x61, 0x16, 0xe5,
|
||||
0xdb, 0xe6, 0x04, 0xe2, 0x92, 0xee, 0x40, 0x9b, 0xa9, 0xd5, 0x42, 0x3d, 0x82, 0x4c, 0xa2, 0x8c,
|
||||
0xe4, 0x51, 0x75, 0xe1, 0x6c, 0x4a, 0xf9, 0x1f, 0xe6, 0x75, 0x43, 0xff, 0x15, 0x2e, 0xab, 0xcb,
|
||||
0xe5, 0x9b, 0x05, 0x8b, 0x63, 0xe4, 0xc3, 0x31, 0xf2, 0xe5, 0x58, 0xf0, 0xf6, 0xc3, 0x48, 0x77,
|
||||
0xe6, 0x3f, 0xb8, 0xfb, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x46, 0x4f, 0x13, 0x14, 0x01, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ const (
|
||||
DevicePaused
|
||||
DeviceResumed
|
||||
LocalChangeDetected
|
||||
RemoteChangeDetected
|
||||
LocalIndexUpdated
|
||||
RemoteIndexUpdated
|
||||
ItemStarted
|
||||
@@ -42,6 +43,8 @@ const (
|
||||
FolderCompletion
|
||||
FolderErrors
|
||||
FolderScanProgress
|
||||
FolderPaused
|
||||
FolderResumed
|
||||
ListenAddressesChanged
|
||||
LoginAttempt
|
||||
|
||||
@@ -68,6 +71,8 @@ func (t EventType) String() string {
|
||||
return "DeviceRejected"
|
||||
case LocalChangeDetected:
|
||||
return "LocalChangeDetected"
|
||||
case RemoteChangeDetected:
|
||||
return "RemoteChangeDetected"
|
||||
case LocalIndexUpdated:
|
||||
return "LocalIndexUpdated"
|
||||
case RemoteIndexUpdated:
|
||||
@@ -98,6 +103,10 @@ func (t EventType) String() string {
|
||||
return "DeviceResumed"
|
||||
case FolderScanProgress:
|
||||
return "FolderScanProgress"
|
||||
case FolderPaused:
|
||||
return "FolderPaused"
|
||||
case FolderResumed:
|
||||
return "FolderResumed"
|
||||
case ListenAddressesChanged:
|
||||
return "ListenAddressesChanged"
|
||||
case LoginAttempt:
|
||||
|
||||
@@ -247,22 +247,23 @@ func BenchmarkBufferedSub(b *testing.B) {
|
||||
}
|
||||
|
||||
// Receive the events
|
||||
done := make(chan struct{})
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
defer close(done)
|
||||
recv := 0
|
||||
var evs []Event
|
||||
for i := 0; i < b.N; {
|
||||
evs = bs.Since(recv, evs[:0])
|
||||
for _, ev := range evs {
|
||||
if ev.GlobalID != recv+1 {
|
||||
b.Fatal("skipped event", ev.GlobalID, recv)
|
||||
done <- fmt.Errorf("skipped event %v %v", ev.GlobalID, recv)
|
||||
return
|
||||
}
|
||||
recv = ev.GlobalID
|
||||
coord <- struct{}{}
|
||||
}
|
||||
i += len(evs)
|
||||
}
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Send the events
|
||||
@@ -276,7 +277,9 @@ func BenchmarkBufferedSub(b *testing.B) {
|
||||
<-coord
|
||||
}
|
||||
|
||||
<-done
|
||||
if err := <-done; err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestCache(t *testing.T) {
|
||||
c := newCache(nil)
|
||||
|
||||
res, ok := c.get("nonexistent")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != false {
|
||||
if res.IsIgnored() || res.IsDeletable() || ok {
|
||||
t.Errorf("res %v, ok %v for nonexistent item", res, ok)
|
||||
}
|
||||
|
||||
@@ -25,12 +25,12 @@ func TestCache(t *testing.T) {
|
||||
c.set("false", 0)
|
||||
|
||||
res, ok = c.get("true")
|
||||
if !res.IsIgnored() || !res.IsDeletable() || ok != true {
|
||||
if !res.IsIgnored() || !res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for true item", res, ok)
|
||||
}
|
||||
|
||||
res, ok = c.get("false")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != true {
|
||||
if res.IsIgnored() || res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for false item", res, ok)
|
||||
}
|
||||
|
||||
@@ -41,12 +41,12 @@ func TestCache(t *testing.T) {
|
||||
// Same values should exist
|
||||
|
||||
res, ok = c.get("true")
|
||||
if !res.IsIgnored() || !res.IsDeletable() || ok != true {
|
||||
if !res.IsIgnored() || !res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for true item", res, ok)
|
||||
}
|
||||
|
||||
res, ok = c.get("false")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != true {
|
||||
if res.IsIgnored() || res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for false item", res, ok)
|
||||
}
|
||||
|
||||
|
||||
@@ -93,12 +93,12 @@ type Model struct {
|
||||
folderStatRefs map[string]*stats.FolderStatisticsReference // folder -> statsRef
|
||||
fmut sync.RWMutex // protects the above
|
||||
|
||||
conn map[protocol.DeviceID]connections.Connection
|
||||
closed map[protocol.DeviceID]chan struct{}
|
||||
helloMessages map[protocol.DeviceID]protocol.HelloResult
|
||||
devicePaused map[protocol.DeviceID]bool
|
||||
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
||||
pmut sync.RWMutex // protects the above
|
||||
conn map[protocol.DeviceID]connections.Connection
|
||||
closed map[protocol.DeviceID]chan struct{}
|
||||
helloMessages map[protocol.DeviceID]protocol.HelloResult
|
||||
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
||||
remotePausedFolders map[protocol.DeviceID][]string // deviceID -> folders
|
||||
pmut sync.RWMutex // protects the above
|
||||
}
|
||||
|
||||
type folderFactory func(*Model, config.FolderConfiguration, versioner.Versioner, *fs.MtimeFS) service
|
||||
@@ -121,7 +121,6 @@ var (
|
||||
errDevicePaused = errors.New("device is paused")
|
||||
errDeviceIgnored = errors.New("device is ignored")
|
||||
errNotRelative = errors.New("not a relative path")
|
||||
errNotDir = errors.New("parent is not a directory")
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
@@ -134,33 +133,33 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName,
|
||||
l.Debugln(line)
|
||||
},
|
||||
}),
|
||||
cfg: cfg,
|
||||
db: ldb,
|
||||
finder: db.NewBlockFinder(ldb),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
id: id,
|
||||
shortID: id.Short(),
|
||||
cacheIgnoredFiles: cfg.Options().CacheIgnoredFiles,
|
||||
protectedFiles: protectedFiles,
|
||||
deviceName: deviceName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
folderCfgs: make(map[string]config.FolderConfiguration),
|
||||
folderFiles: make(map[string]*db.FileSet),
|
||||
folderDevices: make(folderDeviceSet),
|
||||
deviceFolders: make(map[protocol.DeviceID][]string),
|
||||
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
|
||||
folderIgnores: make(map[string]*ignore.Matcher),
|
||||
folderRunners: make(map[string]service),
|
||||
folderRunnerTokens: make(map[string][]suture.ServiceToken),
|
||||
folderStatRefs: make(map[string]*stats.FolderStatisticsReference),
|
||||
conn: make(map[protocol.DeviceID]connections.Connection),
|
||||
closed: make(map[protocol.DeviceID]chan struct{}),
|
||||
helloMessages: make(map[protocol.DeviceID]protocol.HelloResult),
|
||||
devicePaused: make(map[protocol.DeviceID]bool),
|
||||
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
||||
fmut: sync.NewRWMutex(),
|
||||
pmut: sync.NewRWMutex(),
|
||||
cfg: cfg,
|
||||
db: ldb,
|
||||
finder: db.NewBlockFinder(ldb),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
id: id,
|
||||
shortID: id.Short(),
|
||||
cacheIgnoredFiles: cfg.Options().CacheIgnoredFiles,
|
||||
protectedFiles: protectedFiles,
|
||||
deviceName: deviceName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
folderCfgs: make(map[string]config.FolderConfiguration),
|
||||
folderFiles: make(map[string]*db.FileSet),
|
||||
folderDevices: make(folderDeviceSet),
|
||||
deviceFolders: make(map[protocol.DeviceID][]string),
|
||||
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
|
||||
folderIgnores: make(map[string]*ignore.Matcher),
|
||||
folderRunners: make(map[string]service),
|
||||
folderRunnerTokens: make(map[string][]suture.ServiceToken),
|
||||
folderStatRefs: make(map[string]*stats.FolderStatisticsReference),
|
||||
conn: make(map[protocol.DeviceID]connections.Connection),
|
||||
closed: make(map[protocol.DeviceID]chan struct{}),
|
||||
helloMessages: make(map[protocol.DeviceID]protocol.HelloResult),
|
||||
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
||||
remotePausedFolders: make(map[protocol.DeviceID][]string),
|
||||
fmut: sync.NewRWMutex(),
|
||||
pmut: sync.NewRWMutex(),
|
||||
}
|
||||
if cfg.Options().ProgressUpdateIntervalS > -1 {
|
||||
go m.progressEmitter.Serve()
|
||||
@@ -183,10 +182,13 @@ func (m *Model) StartDeadlockDetector(timeout time.Duration) {
|
||||
// StartFolder constructs the folder service and starts it.
|
||||
func (m *Model) StartFolder(folder string) {
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
folderType := m.startFolderLocked(folder)
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
|
||||
l.Infoln("Ready to synchronize", folder, fmt.Sprintf("(%s)", folderType))
|
||||
l.Infof("Ready to synchronize %s (%s)", folderCfg.Description(), folderType)
|
||||
}
|
||||
|
||||
func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
@@ -217,6 +219,11 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
}
|
||||
}
|
||||
|
||||
// Close connections to affected devices
|
||||
for _, id := range cfg.DeviceIDs() {
|
||||
m.closeLocked(id)
|
||||
}
|
||||
|
||||
v, ok := fs.Sequence(protocol.LocalDeviceID), true
|
||||
indexHasFiles := ok && v > 0
|
||||
if !indexHasFiles {
|
||||
@@ -265,7 +272,7 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
}
|
||||
|
||||
func (m *Model) warnAboutOverwritingProtectedFiles(folder string) {
|
||||
if m.folderCfgs[folder].Type == config.FolderTypeReadOnly {
|
||||
if m.folderCfgs[folder].Type == config.FolderTypeSendOnly {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -322,6 +329,11 @@ func (m *Model) RemoveFolder(folder string) {
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
|
||||
// Delete syncthing specific files
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
folderPath := folderCfg.Path()
|
||||
os.Remove(filepath.Join(folderPath, ".stfolder"))
|
||||
|
||||
m.tearDownFolderLocked(folder)
|
||||
// Remove it from the database
|
||||
db.DropFolder(m.db, folder)
|
||||
@@ -365,13 +377,16 @@ func (m *Model) RestartFolder(cfg config.FolderConfiguration) {
|
||||
m.pmut.Lock()
|
||||
|
||||
m.tearDownFolderLocked(cfg.ID)
|
||||
m.addFolderLocked(cfg)
|
||||
folderType := m.startFolderLocked(cfg.ID)
|
||||
if !cfg.Paused {
|
||||
m.addFolderLocked(cfg)
|
||||
folderType := m.startFolderLocked(cfg.ID)
|
||||
l.Infoln("Restarted folder", cfg.Description(), fmt.Sprintf("(%s)", folderType))
|
||||
} else {
|
||||
l.Infoln("Paused folder", cfg.Description())
|
||||
}
|
||||
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
|
||||
l.Infoln("Restarted folder", cfg.ID, fmt.Sprintf("(%s)", folderType))
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
@@ -404,7 +419,7 @@ func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
devs := m.cfg.Devices()
|
||||
conns := make(map[string]ConnectionInfo, len(devs))
|
||||
for device := range devs {
|
||||
for device, deviceCfg := range devs {
|
||||
hello := m.helloMessages[device]
|
||||
versionString := hello.ClientVersion
|
||||
if hello.ClientName != "syncthing" {
|
||||
@@ -412,7 +427,7 @@ func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
}
|
||||
ci := ConnectionInfo{
|
||||
ClientVersion: strings.TrimSpace(versionString),
|
||||
Paused: m.devicePaused[device],
|
||||
Paused: deviceCfg.Paused,
|
||||
}
|
||||
if conn, ok := m.conn[device]; ok {
|
||||
ci.Type = conn.Type()
|
||||
@@ -782,7 +797,17 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
}
|
||||
|
||||
m.fmut.Lock()
|
||||
var paused []string
|
||||
for _, folder := range cm.Folders {
|
||||
if folder.Paused {
|
||||
paused = append(paused, folder.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if cfg, ok := m.cfg.Folder(folder.ID); ok && cfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
if !m.folderSharedWithLocked(folder.ID, deviceID) {
|
||||
events.Default.Log(events.FolderRejected, map[string]string{
|
||||
"folder": folder.ID,
|
||||
@@ -870,6 +895,10 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
go sendIndexes(conn, folder.ID, fs, m.folderIgnores[folder.ID], startSequence, dbLocation, dropSymlinks)
|
||||
}
|
||||
|
||||
m.pmut.Lock()
|
||||
m.remotePausedFolders[deviceID] = paused
|
||||
m.pmut.Unlock()
|
||||
|
||||
// This breaks if we send multiple CM messages during the same connection.
|
||||
if len(tempIndexFolders) > 0 {
|
||||
m.pmut.RLock()
|
||||
@@ -1057,6 +1086,7 @@ func (m *Model) Closed(conn protocol.Connection, err error) {
|
||||
delete(m.conn, device)
|
||||
delete(m.helloMessages, device)
|
||||
delete(m.deviceDownloads, device)
|
||||
delete(m.remotePausedFolders, device)
|
||||
closed := m.closed[device]
|
||||
delete(m.closed, device)
|
||||
m.pmut.Unlock()
|
||||
@@ -1072,9 +1102,13 @@ func (m *Model) Closed(conn protocol.Connection, err error) {
|
||||
// close will close the underlying connection for a given device
|
||||
func (m *Model) close(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
conn, ok := m.conn[device]
|
||||
m.closeLocked(device)
|
||||
m.pmut.Unlock()
|
||||
}
|
||||
|
||||
// closeLocked will close the underlying connection for a given device
|
||||
func (m *Model) closeLocked(device protocol.DeviceID) {
|
||||
conn, ok := m.conn[device]
|
||||
if !ok {
|
||||
// There is no connection to close
|
||||
return
|
||||
@@ -1124,8 +1158,8 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
|
||||
return protocol.ErrNoSuchFile
|
||||
}
|
||||
|
||||
if !osutil.IsDir(folderPath, filepath.Dir(name)) {
|
||||
l.Debugf("%v REQ(in) for file not in dir: %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, len(buf))
|
||||
if err := osutil.TraversesSymlink(folderPath, filepath.Dir(name)); err != nil {
|
||||
l.Debugf("%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d", m, err, deviceID, folder, name, offset, len(buf))
|
||||
return protocol.ErrNoSuchFile
|
||||
}
|
||||
|
||||
@@ -1270,16 +1304,15 @@ func (m *Model) SetIgnores(folder string, content []string) error {
|
||||
// This allows us to extract some information from the Hello message
|
||||
// and add it to a list of known devices ahead of any checks.
|
||||
func (m *Model) OnHello(remoteID protocol.DeviceID, addr net.Addr, hello protocol.HelloResult) error {
|
||||
if m.IsPaused(remoteID) {
|
||||
return errDevicePaused
|
||||
}
|
||||
|
||||
if m.cfg.IgnoredDevice(remoteID) {
|
||||
return errDeviceIgnored
|
||||
}
|
||||
|
||||
if _, ok := m.cfg.Device(remoteID); ok {
|
||||
if cfg, ok := m.cfg.Device(remoteID); ok {
|
||||
// The device exists
|
||||
if cfg.Paused {
|
||||
return errDevicePaused
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1346,10 +1379,11 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
|
||||
l.Infof(`Device %s client is "%s %s" named "%s"`, deviceID, hello.ClientName, hello.ClientVersion, hello.DeviceName)
|
||||
|
||||
conn.Start()
|
||||
m.pmut.Unlock()
|
||||
|
||||
// Acquires fmut, so has to be done outside of pmut.
|
||||
cm := m.generateClusterConfig(deviceID)
|
||||
conn.ClusterConfig(cm)
|
||||
m.pmut.Unlock()
|
||||
|
||||
device, ok := m.cfg.Devices()[deviceID]
|
||||
if ok && (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) {
|
||||
@@ -1361,17 +1395,6 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
|
||||
m.deviceWasSeen(deviceID)
|
||||
}
|
||||
|
||||
func (m *Model) PauseDevice(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
m.devicePaused[device] = true
|
||||
conn, ok := m.conn[device]
|
||||
m.pmut.Unlock()
|
||||
if ok {
|
||||
closeRawConn(conn)
|
||||
}
|
||||
events.Default.Log(events.DevicePaused, map[string]string{"device": device.String()})
|
||||
}
|
||||
|
||||
func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, updates []protocol.FileDownloadProgressUpdate) {
|
||||
if !m.folderSharedWith(folder, device) {
|
||||
return
|
||||
@@ -1381,7 +1404,7 @@ func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, update
|
||||
cfg, ok := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok || cfg.Type == config.FolderTypeReadOnly || cfg.DisableTempIndexes {
|
||||
if !ok || cfg.Type == config.FolderTypeSendOnly || cfg.DisableTempIndexes {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1397,20 +1420,6 @@ func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, update
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Model) ResumeDevice(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
m.devicePaused[device] = false
|
||||
m.pmut.Unlock()
|
||||
events.Default.Log(events.DeviceResumed, map[string]string{"device": device.String()})
|
||||
}
|
||||
|
||||
func (m *Model) IsPaused(device protocol.DeviceID) bool {
|
||||
m.pmut.Lock()
|
||||
paused := m.devicePaused[device]
|
||||
m.pmut.Unlock()
|
||||
return paused
|
||||
}
|
||||
|
||||
func (m *Model) deviceStatRef(deviceID protocol.DeviceID) *stats.DeviceStatisticsReference {
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
@@ -1563,12 +1572,18 @@ func (m *Model) updateLocalsFromScanning(folder string, fs []protocol.FileInfo)
|
||||
m.fmut.RLock()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
// Fire the LocalChangeDetected event to notify listeners about local updates.
|
||||
m.localChangeDetected(folderCfg, fs)
|
||||
|
||||
m.diskChangeDetected(folderCfg, fs, events.LocalChangeDetected)
|
||||
}
|
||||
|
||||
func (m *Model) updateLocalsFromPulling(folder string, fs []protocol.FileInfo) {
|
||||
m.updateLocals(folder, fs)
|
||||
|
||||
m.fmut.RLock()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
m.diskChangeDetected(folderCfg, fs, events.RemoteChangeDetected)
|
||||
}
|
||||
|
||||
func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
@@ -1594,7 +1609,7 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo) {
|
||||
func (m *Model) diskChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo, typeOfEvent events.EventType) {
|
||||
path := strings.Replace(folderCfg.Path(), `\\?\`, "", 1)
|
||||
|
||||
for _, file := range files {
|
||||
@@ -1623,12 +1638,14 @@ func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files
|
||||
// for windows paths, strip unwanted chars from the front.
|
||||
path := filepath.Join(path, filepath.FromSlash(file.Name))
|
||||
|
||||
events.Default.Log(events.LocalChangeDetected, map[string]string{
|
||||
"folderID": folderCfg.ID,
|
||||
"label": folderCfg.Label,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
// Two different events can be fired here based on what EventType is passed into function
|
||||
events.Default.Log(typeOfEvent, map[string]string{
|
||||
"folderID": folderCfg.ID,
|
||||
"label": folderCfg.Label,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
"modifiedBy": file.ModifiedBy.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1871,6 +1888,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
Size: f.Size,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
ModifiedBy: m.id.Short(),
|
||||
Permissions: f.Permissions,
|
||||
NoPermissions: f.NoPermissions,
|
||||
Invalid: true,
|
||||
@@ -1896,6 +1914,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
Size: 0,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
ModifiedBy: m.id.Short(),
|
||||
Deleted: true,
|
||||
Version: f.Version.Update(m.shortID),
|
||||
}
|
||||
@@ -1981,10 +2000,11 @@ func (m *Model) generateClusterConfig(device protocol.DeviceID) protocol.Cluster
|
||||
protocolFolder := protocol.Folder{
|
||||
ID: folder,
|
||||
Label: folderCfg.Label,
|
||||
ReadOnly: folderCfg.Type == config.FolderTypeReadOnly,
|
||||
ReadOnly: folderCfg.Type == config.FolderTypeSendOnly,
|
||||
IgnorePermissions: folderCfg.IgnorePerms,
|
||||
IgnoreDelete: folderCfg.IgnoreDelete,
|
||||
DisableTempIndexes: folderCfg.DisableTempIndexes,
|
||||
Paused: folderCfg.Paused,
|
||||
}
|
||||
|
||||
// Devices are sorted, so we always get the same order.
|
||||
@@ -2194,7 +2214,13 @@ func (m *Model) Availability(folder, file string, version protocol.Vector, block
|
||||
}
|
||||
|
||||
var availabilities []Availability
|
||||
next:
|
||||
for _, device := range fs.Availability(file) {
|
||||
for _, pausedFolder := range m.remotePausedFolders[device] {
|
||||
if pausedFolder == folder {
|
||||
continue next
|
||||
}
|
||||
}
|
||||
_, ok := m.conn[device]
|
||||
if ok {
|
||||
availabilities = append(availabilities, Availability{ID: device, FromTemporary: false})
|
||||
@@ -2352,16 +2378,6 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
l.Debugln(m, "adding folder", folderID)
|
||||
m.AddFolder(cfg)
|
||||
m.StartFolder(folderID)
|
||||
|
||||
// Drop connections to all devices that can now share the new
|
||||
// folder.
|
||||
m.pmut.Lock()
|
||||
for _, dev := range cfg.DeviceIDs() {
|
||||
if conn, ok := m.conn[dev]; ok {
|
||||
closeRawConn(conn)
|
||||
}
|
||||
}
|
||||
m.pmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2383,6 +2399,15 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if !reflect.DeepEqual(fromCfgCopy, toCfgCopy) {
|
||||
m.RestartFolder(toCfg)
|
||||
}
|
||||
|
||||
// Emit the folder pause/resume event
|
||||
if fromCfg.Paused != toCfg.Paused {
|
||||
eventType := events.FolderResumed
|
||||
if toCfg.Paused {
|
||||
eventType = events.FolderPaused
|
||||
}
|
||||
events.Default.Log(eventType, map[string]string{"id": toCfg.ID, "label": toCfg.Label})
|
||||
}
|
||||
}
|
||||
|
||||
// Removing a device. We actually don't need to do anything.
|
||||
@@ -2392,6 +2417,24 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// At some point model.Close() will get called for that device which will
|
||||
// clean residue device state that is not part of any folder.
|
||||
|
||||
// Pausing a device, unpausing is handled by the connection service.
|
||||
fromDevices := mapDeviceConfigs(from.Devices)
|
||||
toDevices := mapDeviceConfigs(to.Devices)
|
||||
for deviceID, toCfg := range toDevices {
|
||||
fromCfg, ok := fromDevices[deviceID]
|
||||
if !ok || fromCfg.Paused == toCfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
if toCfg.Paused {
|
||||
l.Infoln("Pausing", deviceID)
|
||||
m.close(deviceID)
|
||||
events.Default.Log(events.DevicePaused, map[string]string{"device": deviceID.String()})
|
||||
} else {
|
||||
events.Default.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
|
||||
}
|
||||
}
|
||||
|
||||
// Some options don't require restart as those components handle it fine
|
||||
// by themselves.
|
||||
from.Options.URAccepted = to.Options.URAccepted
|
||||
@@ -2399,6 +2442,9 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
from.Options.ListenAddresses = to.Options.ListenAddresses
|
||||
from.Options.RelaysEnabled = to.Options.RelaysEnabled
|
||||
from.Options.UnackedNotificationIDs = to.Options.UnackedNotificationIDs
|
||||
from.Options.MaxRecvKbps = to.Options.MaxRecvKbps
|
||||
from.Options.MaxSendKbps = to.Options.MaxSendKbps
|
||||
from.Options.LimitBandwidthInLan = to.Options.LimitBandwidthInLan
|
||||
// All of the other generic options require restart. Or at least they may;
|
||||
// removing this check requires going through those options carefully and
|
||||
// making sure there are individual services that handle them correctly.
|
||||
@@ -2433,6 +2479,16 @@ func mapDevices(devices []protocol.DeviceID) map[protocol.DeviceID]struct{} {
|
||||
return m
|
||||
}
|
||||
|
||||
// mapDeviceConfigs returns a map of device ID to device configuration for the given
|
||||
// slice of folder configurations.
|
||||
func mapDeviceConfigs(devices []config.DeviceConfiguration) map[protocol.DeviceID]config.DeviceConfiguration {
|
||||
m := make(map[protocol.DeviceID]config.DeviceConfiguration, len(devices))
|
||||
for _, dev := range devices {
|
||||
m[dev.DeviceID] = dev
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func symlinkInvalid(folder string, fi db.FileIntf) bool {
|
||||
if !symlinks.Supported && fi.IsSymlink() && !fi.IsInvalid() && !fi.IsDeleted() {
|
||||
symlinkWarning.Do(func() {
|
||||
|
||||
@@ -1028,7 +1028,7 @@ func TestROScanRecovery(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "default",
|
||||
RawPath: "testdata/rotestfolder",
|
||||
Type: config.FolderTypeReadOnly,
|
||||
Type: config.FolderTypeSendOnly,
|
||||
RescanIntervalS: 1,
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
@@ -1115,7 +1115,7 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "default",
|
||||
RawPath: "testdata/rwtestfolder",
|
||||
Type: config.FolderTypeReadWrite,
|
||||
Type: config.FolderTypeSendReceive,
|
||||
RescanIntervalS: 1,
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
@@ -1763,11 +1763,11 @@ func TestIssue3028(t *testing.T) {
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
|
||||
// Ugly hack for testing: reach into the model for the rwfolder and wait
|
||||
// Ugly hack for testing: reach into the model for the SendReceiveFolder and wait
|
||||
// for it to complete the initial scan. The risk is that it otherwise
|
||||
// runs during our modifications and screws up the test.
|
||||
m.fmut.RLock()
|
||||
folder := m.folderRunners["default"].(*rwFolder)
|
||||
folder := m.folderRunners["default"].(*sendReceiveFolder)
|
||||
m.fmut.RUnlock()
|
||||
<-folder.initialScanCompleted
|
||||
|
||||
@@ -1822,7 +1822,7 @@ func TestIssue3164(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fl := rwFolder{
|
||||
fl := sendReceiveFolder{
|
||||
dbUpdates: make(chan dbUpdateJob, 1),
|
||||
dir: "testdata",
|
||||
}
|
||||
@@ -2090,6 +2090,8 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue3496(t *testing.T) {
|
||||
t.Skip("This test deletes files that the other test depend on. Needs fixing.")
|
||||
|
||||
// It seems like lots of deleted files can cause negative completion
|
||||
// percentages. Lets make sure that doesn't happen. Also do some general
|
||||
// checks on the completion calculation stuff.
|
||||
@@ -2196,6 +2198,99 @@ func TestIssue3829(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoRequestsFromPausedDevices(t *testing.T) {
|
||||
t.Skip("broken, fails randomly, #3843")
|
||||
|
||||
dbi := db.OpenMemory()
|
||||
|
||||
fcfg := config.NewFolderConfiguration("default", "testdata")
|
||||
fcfg.Devices = []config.FolderDeviceConfiguration{
|
||||
{DeviceID: device1},
|
||||
{DeviceID: device2},
|
||||
}
|
||||
cfg := config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
Devices: []config.DeviceConfiguration{
|
||||
config.NewDeviceConfiguration(device1, "device1"),
|
||||
config.NewDeviceConfiguration(device2, "device2"),
|
||||
},
|
||||
Options: config.OptionsConfiguration{
|
||||
// Don't remove temporaries directly on startup
|
||||
KeepTemporariesH: 1,
|
||||
},
|
||||
}
|
||||
|
||||
wcfg := config.Wrap("/tmp/test", cfg)
|
||||
|
||||
m := NewModel(wcfg, protocol.LocalDeviceID, "device", "syncthing", "dev", dbi, nil)
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolder(fcfg.ID)
|
||||
m.ServeBackground()
|
||||
|
||||
file := testDataExpected["foo"]
|
||||
files := m.folderFiles["default"]
|
||||
files.Update(device1, []protocol.FileInfo{file})
|
||||
files.Update(device2, []protocol.FileInfo{file})
|
||||
|
||||
avail := m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 0 {
|
||||
t.Errorf("should not be available, no connections")
|
||||
}
|
||||
|
||||
addFakeConn(m, device1)
|
||||
addFakeConn(m, device2)
|
||||
|
||||
// !!! This is not what I'd expect to happen, as we don't even know if the peer has the original index !!!
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 2 {
|
||||
t.Errorf("should have two available")
|
||||
}
|
||||
|
||||
cc := protocol.ClusterConfig{
|
||||
Folders: []protocol.Folder{
|
||||
{
|
||||
ID: "default",
|
||||
Devices: []protocol.Device{
|
||||
{ID: device1},
|
||||
{ID: device2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m.ClusterConfig(device1, cc)
|
||||
m.ClusterConfig(device2, cc)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 2 {
|
||||
t.Errorf("should have two available")
|
||||
}
|
||||
|
||||
m.Closed(&fakeConnection{id: device1}, errDeviceUnknown)
|
||||
m.Closed(&fakeConnection{id: device2}, errDeviceUnknown)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 0 {
|
||||
t.Errorf("should have no available")
|
||||
}
|
||||
|
||||
// Test that remote paused folders are not used.
|
||||
|
||||
addFakeConn(m, device1)
|
||||
addFakeConn(m, device2)
|
||||
|
||||
m.ClusterConfig(device1, cc)
|
||||
ccp := cc
|
||||
ccp.Folders[0].Paused = true
|
||||
m.ClusterConfig(device1, ccp)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 1 {
|
||||
t.Errorf("should have one available")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootedJoinedPath(t *testing.T) {
|
||||
type testcase struct {
|
||||
root string
|
||||
|
||||
@@ -15,25 +15,27 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
folderFactories[config.FolderTypeReadOnly] = newROFolder
|
||||
folderFactories[config.FolderTypeSendOnly] = newSendOnlyFolder
|
||||
}
|
||||
|
||||
type roFolder struct {
|
||||
type sendOnlyFolder struct {
|
||||
folder
|
||||
config.FolderConfiguration
|
||||
}
|
||||
|
||||
func newROFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
|
||||
return &roFolder{
|
||||
func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
|
||||
return &sendOnlyFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker(cfg.ID),
|
||||
scan: newFolderScanner(cfg),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
FolderConfiguration: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *roFolder) Serve() {
|
||||
func (f *sendOnlyFolder) Serve() {
|
||||
l.Debugln(f, "starting")
|
||||
defer l.Debugln(f, "exiting")
|
||||
|
||||
@@ -49,7 +51,7 @@ func (f *roFolder) Serve() {
|
||||
|
||||
case <-f.scan.timer.C:
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping folder", f.folderID, "scan due to folder error:", err)
|
||||
l.Infoln("Skipping scan of", f.Description(), "due to folder error:", err)
|
||||
f.scan.Reschedule()
|
||||
continue
|
||||
}
|
||||
@@ -67,7 +69,7 @@ func (f *roFolder) Serve() {
|
||||
}
|
||||
|
||||
if !initialScanCompleted {
|
||||
l.Infoln("Completed initial scan (ro) of folder", f.folderID)
|
||||
l.Infoln("Completed initial scan (ro) of", f.Description())
|
||||
initialScanCompleted = true
|
||||
}
|
||||
|
||||
@@ -86,6 +88,6 @@ func (f *roFolder) Serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *roFolder) String() string {
|
||||
return fmt.Sprintf("roFolder/%s@%p", f.folderID, f)
|
||||
func (f *sendOnlyFolder) String() string {
|
||||
return fmt.Sprintf("sendOnlyFolder/%s@%p", f.folderID, f)
|
||||
}
|
||||
|
||||
@@ -28,10 +28,11 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/versioner"
|
||||
"github.com/syncthing/syncthing/lib/weakhash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
folderFactories[config.FolderTypeReadWrite] = newRWFolder
|
||||
folderFactories[config.FolderTypeSendReceive] = newSendReceiveFolder
|
||||
}
|
||||
|
||||
// A pullBlockState is passed to the puller routine for each block that needs
|
||||
@@ -46,6 +47,7 @@ type pullBlockState struct {
|
||||
type copyBlocksState struct {
|
||||
*sharedPullerState
|
||||
blocks []protocol.BlockInfo
|
||||
have int
|
||||
}
|
||||
|
||||
// Which filemode bits to preserve
|
||||
@@ -77,24 +79,15 @@ type dbUpdateJob struct {
|
||||
jobType int
|
||||
}
|
||||
|
||||
type rwFolder struct {
|
||||
type sendReceiveFolder struct {
|
||||
folder
|
||||
config.FolderConfiguration
|
||||
|
||||
mtimeFS *fs.MtimeFS
|
||||
dir string
|
||||
versioner versioner.Versioner
|
||||
ignorePerms bool
|
||||
order config.PullOrder
|
||||
maxConflicts int
|
||||
sleep time.Duration
|
||||
pause time.Duration
|
||||
allowSparse bool
|
||||
checkFreeSpace bool
|
||||
ignoreDelete bool
|
||||
fsync bool
|
||||
|
||||
copiers int
|
||||
pullers int
|
||||
mtimeFS *fs.MtimeFS
|
||||
dir string
|
||||
versioner versioner.Versioner
|
||||
sleep time.Duration
|
||||
pause time.Duration
|
||||
|
||||
queue *jobQueue
|
||||
dbUpdates chan dbUpdateJob
|
||||
@@ -107,27 +100,19 @@ type rwFolder struct {
|
||||
initialScanCompleted chan (struct{}) // exposed for testing
|
||||
}
|
||||
|
||||
func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
|
||||
f := &rwFolder{
|
||||
func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
|
||||
f := &sendReceiveFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker(cfg.ID),
|
||||
scan: newFolderScanner(cfg),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
FolderConfiguration: cfg,
|
||||
|
||||
mtimeFS: mtimeFS,
|
||||
dir: cfg.Path(),
|
||||
versioner: ver,
|
||||
ignorePerms: cfg.IgnorePerms,
|
||||
copiers: cfg.Copiers,
|
||||
pullers: cfg.Pullers,
|
||||
order: cfg.Order,
|
||||
maxConflicts: cfg.MaxConflicts,
|
||||
allowSparse: !cfg.DisableSparseFiles,
|
||||
checkFreeSpace: cfg.MinDiskFreePct != 0,
|
||||
ignoreDelete: cfg.IgnoreDelete,
|
||||
fsync: cfg.Fsync,
|
||||
mtimeFS: mtimeFS,
|
||||
dir: cfg.Path(),
|
||||
versioner: ver,
|
||||
|
||||
queue: newJobQueue(),
|
||||
pullTimer: time.NewTimer(time.Second),
|
||||
@@ -138,42 +123,42 @@ func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Ver
|
||||
initialScanCompleted: make(chan struct{}),
|
||||
}
|
||||
|
||||
f.configureCopiersAndPullers(cfg)
|
||||
f.configureCopiersAndPullers()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *rwFolder) configureCopiersAndPullers(cfg config.FolderConfiguration) {
|
||||
if f.copiers == 0 {
|
||||
f.copiers = defaultCopiers
|
||||
func (f *sendReceiveFolder) configureCopiersAndPullers() {
|
||||
if f.Copiers == 0 {
|
||||
f.Copiers = defaultCopiers
|
||||
}
|
||||
if f.pullers == 0 {
|
||||
f.pullers = defaultPullers
|
||||
if f.Pullers == 0 {
|
||||
f.Pullers = defaultPullers
|
||||
}
|
||||
|
||||
if cfg.PullerPauseS == 0 {
|
||||
if f.PullerPauseS == 0 {
|
||||
f.pause = defaultPullerPause
|
||||
} else {
|
||||
f.pause = time.Duration(cfg.PullerPauseS) * time.Second
|
||||
f.pause = time.Duration(f.PullerPauseS) * time.Second
|
||||
}
|
||||
|
||||
if cfg.PullerSleepS == 0 {
|
||||
if f.PullerSleepS == 0 {
|
||||
f.sleep = defaultPullerSleep
|
||||
} else {
|
||||
f.sleep = time.Duration(cfg.PullerSleepS) * time.Second
|
||||
f.sleep = time.Duration(f.PullerSleepS) * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to check whether either the ignorePerm flag has been
|
||||
// set on the local host or the FlagNoPermBits has been set on the file/dir
|
||||
// which is being pulled.
|
||||
func (f *rwFolder) ignorePermissions(file protocol.FileInfo) bool {
|
||||
return f.ignorePerms || file.NoPermissions
|
||||
func (f *sendReceiveFolder) ignorePermissions(file protocol.FileInfo) bool {
|
||||
return f.IgnorePerms || file.NoPermissions
|
||||
}
|
||||
|
||||
// Serve will run scans and pulls. It will return when Stop()ed or on a
|
||||
// critical error.
|
||||
func (f *rwFolder) Serve() {
|
||||
func (f *sendReceiveFolder) Serve() {
|
||||
l.Debugln(f, "starting")
|
||||
defer l.Debugln(f, "exiting")
|
||||
|
||||
@@ -228,7 +213,7 @@ func (f *rwFolder) Serve() {
|
||||
}
|
||||
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping folder", f.folderID, "pull due to folder error:", err)
|
||||
l.Infoln("Skipping pull of", f.Description(), "due to folder error:", err)
|
||||
f.pullTimer.Reset(f.sleep)
|
||||
continue
|
||||
}
|
||||
@@ -301,7 +286,7 @@ func (f *rwFolder) Serve() {
|
||||
select {
|
||||
case <-f.initialScanCompleted:
|
||||
default:
|
||||
l.Infoln("Completed initial scan (rw) of folder", f.folderID)
|
||||
l.Infoln("Completed initial scan (rw) of", f.Description())
|
||||
close(f.initialScanCompleted)
|
||||
}
|
||||
|
||||
@@ -314,7 +299,7 @@ func (f *rwFolder) Serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) IndexUpdated() {
|
||||
func (f *sendReceiveFolder) IndexUpdated() {
|
||||
select {
|
||||
case f.remoteIndex <- struct{}{}:
|
||||
default:
|
||||
@@ -325,15 +310,15 @@ func (f *rwFolder) IndexUpdated() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) String() string {
|
||||
return fmt.Sprintf("rwFolder/%s@%p", f.folderID, f)
|
||||
func (f *sendReceiveFolder) String() string {
|
||||
return fmt.Sprintf("sendReceiveFolder/%s@%p", f.folderID, f)
|
||||
}
|
||||
|
||||
// pullerIteration runs a single puller iteration for the given folder and
|
||||
// returns the number items that should have been synced (even those that
|
||||
// might have failed). One puller iteration handles all files currently
|
||||
// flagged as needed in the folder.
|
||||
func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
pullChan := make(chan pullBlockState)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
finisherChan := make(chan *sharedPullerState)
|
||||
@@ -343,7 +328,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
pullWg := sync.NewWaitGroup()
|
||||
doneWg := sync.NewWaitGroup()
|
||||
|
||||
l.Debugln(f, "c", f.copiers, "p", f.pullers)
|
||||
l.Debugln(f, "c", f.Copiers, "p", f.Pullers)
|
||||
|
||||
f.dbUpdates = make(chan dbUpdateJob)
|
||||
updateWg.Add(1)
|
||||
@@ -353,7 +338,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
updateWg.Done()
|
||||
}()
|
||||
|
||||
for i := 0; i < f.copiers; i++ {
|
||||
for i := 0; i < f.Copiers; i++ {
|
||||
copyWg.Add(1)
|
||||
go func() {
|
||||
// copierRoutine finishes when copyChan is closed
|
||||
@@ -362,7 +347,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < f.pullers; i++ {
|
||||
for i := 0; i < f.Pullers; i++ {
|
||||
pullWg.Add(1)
|
||||
go func() {
|
||||
// pullerRoutine finishes when pullChan is closed
|
||||
@@ -391,7 +376,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
// pile.
|
||||
|
||||
folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
if shouldIgnore(intf, ignores, f.ignoreDelete, defTempNamer) {
|
||||
if shouldIgnore(intf, ignores, f.IgnoreDelete, defTempNamer) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -446,8 +431,8 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
for _, fi := range processDirectly {
|
||||
// Verify that the thing we are handling lives inside a directory,
|
||||
// and not a symlink or empty space.
|
||||
if !osutil.IsDir(f.dir, filepath.Dir(fi.Name)) {
|
||||
f.newError(fi.Name, errNotDir)
|
||||
if err := osutil.TraversesSymlink(f.dir, filepath.Dir(fi.Name)); err != nil {
|
||||
f.newError(fi.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -488,7 +473,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
|
||||
// Now do the file queue. Reorder it according to configuration.
|
||||
|
||||
switch f.order {
|
||||
switch f.Order {
|
||||
case config.OrderRandom:
|
||||
f.queue.Shuffle()
|
||||
case config.OrderAlphabetic:
|
||||
@@ -510,7 +495,7 @@ nextFile:
|
||||
select {
|
||||
case <-f.stop:
|
||||
// Stop processing files if the puller has been told to stop.
|
||||
break
|
||||
break nextFile
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -535,8 +520,8 @@ nextFile:
|
||||
|
||||
// Verify that the thing we are handling lives inside a directory,
|
||||
// and not a symlink or empty space.
|
||||
if !osutil.IsDir(f.dir, filepath.Dir(fi.Name)) {
|
||||
f.newError(fi.Name, errNotDir)
|
||||
if err := osutil.TraversesSymlink(f.dir, filepath.Dir(fi.Name)); err != nil {
|
||||
f.newError(fi.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -600,7 +585,7 @@ nextFile:
|
||||
}
|
||||
|
||||
// handleDir creates or updates the given directory
|
||||
func (f *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -702,7 +687,7 @@ func (f *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
}
|
||||
|
||||
// handleSymlink creates or updates the given symlink
|
||||
func (f *rwFolder) handleSymlink(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -776,7 +761,7 @@ func (f *rwFolder) handleSymlink(file protocol.FileInfo) {
|
||||
}
|
||||
|
||||
// deleteDir attempts to delete the given directory
|
||||
func (f *rwFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -834,7 +819,7 @@ func (f *rwFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
}
|
||||
|
||||
// deleteFile attempts to delete the given file
|
||||
func (f *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -892,7 +877,7 @@ func (f *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
|
||||
// renameFile attempts to rename an existing file to a destination
|
||||
// and set the right attributes on it.
|
||||
func (f *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -1016,10 +1001,12 @@ func (f *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
|
||||
// handleFile queues the copies and pulls as necessary for a single new or
|
||||
// changed file.
|
||||
func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
|
||||
|
||||
if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
|
||||
have, need := scanner.BlockDiff(curFile.Blocks, file.Blocks)
|
||||
|
||||
if hasCurFile && len(need) == 0 {
|
||||
// We are supposed to copy the entire file, and then fetch nothing. We
|
||||
// are only updating metadata, so we don't actually *need* to make the
|
||||
// copy.
|
||||
@@ -1130,7 +1117,7 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
blocksSize = file.Size
|
||||
}
|
||||
|
||||
if f.checkFreeSpace {
|
||||
if f.MinDiskFreePct > 0 {
|
||||
if free, err := osutil.DiskFreeBytes(f.dir); err == nil && free < blocksSize {
|
||||
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024)
|
||||
f.newError(file.Name, errors.New("insufficient space"))
|
||||
@@ -1165,22 +1152,23 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
ignorePerms: f.ignorePermissions(file),
|
||||
version: curFile.Version,
|
||||
mut: sync.NewRWMutex(),
|
||||
sparse: f.allowSparse,
|
||||
sparse: !f.DisableSparseFiles,
|
||||
created: time.Now(),
|
||||
}
|
||||
|
||||
l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), reused)
|
||||
l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), len(reused))
|
||||
|
||||
cs := copyBlocksState{
|
||||
sharedPullerState: &s,
|
||||
blocks: blocks,
|
||||
have: len(have),
|
||||
}
|
||||
copyChan <- cs
|
||||
}
|
||||
|
||||
// shortcutFile sets file mode and modification time, when that's the only
|
||||
// thing that has changed.
|
||||
func (f *rwFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
realName, err := rootedJoinedPath(f.dir, file.Name)
|
||||
if err != nil {
|
||||
f.newError(file.Name, err)
|
||||
@@ -1207,7 +1195,7 @@ func (f *rwFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
|
||||
// copierRoutine reads copierStates until the in channel closes and performs
|
||||
// the relevant copies when possible, or passes it to the puller routine.
|
||||
func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
buf := make([]byte, protocol.BlockSize)
|
||||
|
||||
for state := range in {
|
||||
@@ -1231,8 +1219,28 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
}
|
||||
f.model.fmut.RUnlock()
|
||||
|
||||
var weakHashFinder *weakhash.Finder
|
||||
blocksPercentChanged := 0
|
||||
if tot := len(state.file.Blocks); tot > 0 {
|
||||
blocksPercentChanged = (tot - state.have) * 100 / tot
|
||||
}
|
||||
|
||||
if blocksPercentChanged >= f.WeakHashThresholdPct {
|
||||
hashesToFind := make([]uint32, 0, len(state.blocks))
|
||||
for _, block := range state.blocks {
|
||||
if block.WeakHash != 0 {
|
||||
hashesToFind = append(hashesToFind, block.WeakHash)
|
||||
}
|
||||
}
|
||||
|
||||
weakHashFinder, err = weakhash.NewFinder(state.realName, protocol.BlockSize, hashesToFind)
|
||||
if err != nil {
|
||||
l.Debugln("weak hasher", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range state.blocks {
|
||||
if f.allowSparse && state.reused == 0 && block.IsEmpty() {
|
||||
if !f.DisableSparseFiles && state.reused == 0 && block.IsEmpty() {
|
||||
// The block is a block of all zeroes, and we are not reusing
|
||||
// a temp file, so there is no need to do anything with it.
|
||||
// If we were reusing a temp file and had this block to copy,
|
||||
@@ -1245,45 +1253,70 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
}
|
||||
|
||||
buf = buf[:int(block.Size)]
|
||||
found := f.model.finder.Iterate(folders, block.Hash, func(folder, file string, index int32) bool {
|
||||
inFile, err := rootedJoinedPath(folderRoots[folder], file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fd, err := os.Open(inFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
hash, err := scanner.VerifyBuffer(buf, block)
|
||||
if err != nil {
|
||||
if hash != nil {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
err = f.model.finder.Fix(folder, file, index, block.Hash, hash)
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
} else {
|
||||
l.Debugln("Finder failed to verify buffer", err)
|
||||
}
|
||||
return false
|
||||
found, err := weakHashFinder.Iterate(block.WeakHash, buf, func(offset int64) bool {
|
||||
if _, err := scanner.VerifyBuffer(buf, block); err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
if err != nil {
|
||||
state.fail("dst write", err)
|
||||
|
||||
}
|
||||
if file == state.file.Name {
|
||||
if offset == block.Offset {
|
||||
state.copiedFromOrigin()
|
||||
} else {
|
||||
state.copiedFromOriginShifted()
|
||||
}
|
||||
return true
|
||||
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
l.Debugln("weak hasher iter", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
found = f.model.finder.Iterate(folders, block.Hash, func(folder, file string, index int32) bool {
|
||||
inFile, err := rootedJoinedPath(folderRoots[folder], file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fd, err := os.Open(inFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
hash, err := scanner.VerifyBuffer(buf, block)
|
||||
if err != nil {
|
||||
if hash != nil {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
err = f.model.finder.Fix(folder, file, index, block.Hash, hash)
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
} else {
|
||||
l.Debugln("Finder failed to verify buffer", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
if err != nil {
|
||||
state.fail("dst write", err)
|
||||
}
|
||||
if file == state.file.Name {
|
||||
state.copiedFromOrigin()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
if state.failed() != nil {
|
||||
break
|
||||
@@ -1300,11 +1333,12 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
state.copyDone(block)
|
||||
}
|
||||
}
|
||||
weakHashFinder.Close()
|
||||
out <- state.sharedPullerState
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||
for state := range in {
|
||||
if state.failed() != nil {
|
||||
out <- state.sharedPullerState
|
||||
@@ -1320,7 +1354,7 @@ func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
|
||||
continue
|
||||
}
|
||||
|
||||
if f.allowSparse && state.reused == 0 && state.block.IsEmpty() {
|
||||
if !f.DisableSparseFiles && state.reused == 0 && state.block.IsEmpty() {
|
||||
// There is no need to request a block of all zeroes. Pretend we
|
||||
// requested it and handled it correctly.
|
||||
state.pullDone(state.block)
|
||||
@@ -1377,7 +1411,7 @@ func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
|
||||
// Set the correct permission bits on the new file
|
||||
if !f.ignorePermissions(state.file) {
|
||||
if err := os.Chmod(state.tempName, os.FileMode(state.file.Permissions&0777)); err != nil {
|
||||
@@ -1439,7 +1473,7 @@ func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
for state := range in {
|
||||
if closed, err := state.finalClose(); closed {
|
||||
l.Debugln(f, "closing", state.file.Name)
|
||||
@@ -1470,17 +1504,17 @@ func (f *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
}
|
||||
|
||||
// Moves the given filename to the front of the job queue
|
||||
func (f *rwFolder) BringToFront(filename string) {
|
||||
func (f *sendReceiveFolder) BringToFront(filename string) {
|
||||
f.queue.BringToFront(filename)
|
||||
}
|
||||
|
||||
func (f *rwFolder) Jobs() ([]string, []string) {
|
||||
func (f *sendReceiveFolder) Jobs() ([]string, []string) {
|
||||
return f.queue.Jobs()
|
||||
}
|
||||
|
||||
// dbUpdaterRoutine aggregates db updates and commits them in batches no
|
||||
// larger than 1000 items, and no more delayed than 2 seconds.
|
||||
func (f *rwFolder) dbUpdaterRoutine() {
|
||||
func (f *sendReceiveFolder) dbUpdaterRoutine() {
|
||||
const (
|
||||
maxBatchSize = 1000
|
||||
maxBatchTime = 2 * time.Second
|
||||
@@ -1493,7 +1527,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
|
||||
var changedFiles []string
|
||||
var changedDirs []string
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
changedFiles = make([]string, 0, maxBatchSize)
|
||||
changedDirs = make([]string, 0, maxBatchSize)
|
||||
}
|
||||
@@ -1518,7 +1552,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
|
||||
for _, job := range batch {
|
||||
files = append(files, job.file)
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
// collect changed files and dirs
|
||||
switch job.jobType {
|
||||
case dbUpdateHandleFile, dbUpdateShortcutFile:
|
||||
@@ -1544,7 +1578,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
lastFile = job.file
|
||||
}
|
||||
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
// sync files and dirs to disk
|
||||
syncFilesOnce(changedFiles, osutil.SyncFile)
|
||||
changedFiles = changedFiles[:0]
|
||||
@@ -1591,7 +1625,7 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) inConflict(current, replacement protocol.Vector) bool {
|
||||
func (f *sendReceiveFolder) inConflict(current, replacement protocol.Vector) bool {
|
||||
if current.Concurrent(replacement) {
|
||||
// Obvious case
|
||||
return true
|
||||
@@ -1617,7 +1651,7 @@ func removeAvailability(availabilities []Availability, availability Availability
|
||||
return availabilities
|
||||
}
|
||||
|
||||
func (f *rwFolder) moveForConflict(name string) error {
|
||||
func (f *sendReceiveFolder) moveForConflict(name string) error {
|
||||
if strings.Contains(filepath.Base(name), ".sync-conflict-") {
|
||||
l.Infoln("Conflict for", name, "which is already a conflict copy; not copying again.")
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
@@ -1626,7 +1660,7 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.maxConflicts == 0 {
|
||||
if f.MaxConflicts == 0 {
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -1644,11 +1678,11 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
// matter, go ahead as if the move succeeded.
|
||||
err = nil
|
||||
}
|
||||
if f.maxConflicts > -1 {
|
||||
if f.MaxConflicts > -1 {
|
||||
matches, gerr := osutil.Glob(withoutExt + ".sync-conflict-????????-??????" + ext)
|
||||
if gerr == nil && len(matches) > f.maxConflicts {
|
||||
if gerr == nil && len(matches) > f.MaxConflicts {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
|
||||
for _, match := range matches[f.maxConflicts:] {
|
||||
for _, match := range matches[f.MaxConflicts:] {
|
||||
gerr = os.Remove(match)
|
||||
if gerr != nil {
|
||||
l.Debugln(f, "removing extra conflict", gerr)
|
||||
@@ -1661,7 +1695,7 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *rwFolder) newError(path string, err error) {
|
||||
func (f *sendReceiveFolder) newError(path string, err error) {
|
||||
f.errorsMut.Lock()
|
||||
defer f.errorsMut.Unlock()
|
||||
|
||||
@@ -1675,13 +1709,13 @@ func (f *rwFolder) newError(path string, err error) {
|
||||
f.errors[path] = err.Error()
|
||||
}
|
||||
|
||||
func (f *rwFolder) clearErrors() {
|
||||
func (f *sendReceiveFolder) clearErrors() {
|
||||
f.errorsMut.Lock()
|
||||
f.errors = make(map[string]string)
|
||||
f.errorsMut.Unlock()
|
||||
}
|
||||
|
||||
func (f *rwFolder) currentErrors() []fileError {
|
||||
func (f *sendReceiveFolder) currentErrors() []fileError {
|
||||
f.errorsMut.Lock()
|
||||
errors := make([]fileError, 0, len(f.errors))
|
||||
for path, err := range f.errors {
|
||||
|
||||
@@ -7,12 +7,15 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -66,12 +69,14 @@ func setUpModel(file protocol.FileInfo) *Model {
|
||||
return model
|
||||
}
|
||||
|
||||
func setUpRwFolder(model *Model) rwFolder {
|
||||
return rwFolder{
|
||||
func setUpSendReceiveFolder(model *Model) sendReceiveFolder {
|
||||
return sendReceiveFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker("default"),
|
||||
model: model,
|
||||
},
|
||||
|
||||
mtimeFS: fs.NewMtimeFS(db.NewNamespacedKV(model.db, "mtime")),
|
||||
dir: "testdata",
|
||||
queue: newJobQueue(),
|
||||
errors: make(map[string]string),
|
||||
@@ -95,7 +100,7 @@ func TestHandleFile(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState, 1)
|
||||
|
||||
f.handleFile(requiredFile, copyChan, nil)
|
||||
@@ -136,7 +141,7 @@ func TestHandleFileWithTemp(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState, 1)
|
||||
|
||||
f.handleFile(requiredFile, copyChan, nil)
|
||||
@@ -184,7 +189,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
requiredFile.Name = "file2"
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, 4)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
@@ -199,7 +204,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
|
||||
select {
|
||||
case <-pullChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
t.Fatal("Pull channel has data to be read")
|
||||
case <-finisherChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
default:
|
||||
@@ -240,6 +245,133 @@ func TestCopierFinder(t *testing.T) {
|
||||
os.Remove(tempFile)
|
||||
}
|
||||
|
||||
func TestWeakHash(t *testing.T) {
|
||||
tempFile := filepath.Join("testdata", defTempNamer.TempName("weakhash"))
|
||||
var shift int64 = 10
|
||||
var size int64 = 1 << 20
|
||||
expectBlocks := int(size / protocol.BlockSize)
|
||||
expectPulls := int(shift / protocol.BlockSize)
|
||||
if shift > 0 {
|
||||
expectPulls++
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
for _, path := range []string{tempFile, "testdata/weakhash"} {
|
||||
os.Remove(path)
|
||||
}
|
||||
}
|
||||
|
||||
cleanup()
|
||||
defer cleanup()
|
||||
|
||||
f, err := os.Create("testdata/weakhash")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.CopyN(f, rand.Reader, size)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Create two files, second file has `shifted` bytes random prefix, yet
|
||||
// both are of the same length, for example:
|
||||
// File 1: abcdefgh
|
||||
// File 2: xyabcdef
|
||||
f.Seek(0, os.SEEK_SET)
|
||||
existing, err := scanner.Blocks(f, protocol.BlockSize, size, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
f.Seek(0, os.SEEK_SET)
|
||||
remainder := io.LimitReader(f, size-shift)
|
||||
prefix := io.LimitReader(rand.Reader, shift)
|
||||
nf := io.MultiReader(prefix, remainder)
|
||||
desired, err := scanner.Blocks(nf, protocol.BlockSize, size, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
existingFile := protocol.FileInfo{
|
||||
Name: "weakhash",
|
||||
Blocks: existing,
|
||||
Size: size,
|
||||
ModifiedS: info.ModTime().Unix(),
|
||||
ModifiedNs: int32(info.ModTime().Nanosecond()),
|
||||
}
|
||||
desiredFile := protocol.FileInfo{
|
||||
Name: "weakhash",
|
||||
Size: size,
|
||||
Blocks: desired,
|
||||
ModifiedS: info.ModTime().Unix() + 1,
|
||||
}
|
||||
|
||||
// Setup the model/pull environment
|
||||
m := setUpModel(existingFile)
|
||||
fo := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, expectBlocks)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single fetcher routine
|
||||
go fo.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
|
||||
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
|
||||
fo.WeakHashThresholdPct = 101
|
||||
fo.handleFile(desiredFile, copyChan, finisherChan)
|
||||
|
||||
var pulls []pullBlockState
|
||||
for len(pulls) < expectBlocks {
|
||||
select {
|
||||
case pull := <-pullChan:
|
||||
pulls = append(pulls, pull)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
|
||||
}
|
||||
}
|
||||
finish := <-finisherChan
|
||||
|
||||
select {
|
||||
case <-pullChan:
|
||||
t.Fatal("Pull channel has data to be read")
|
||||
case <-finisherChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
default:
|
||||
}
|
||||
|
||||
finish.fd.Close()
|
||||
if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Test 2 - using weak hash, expectPulls blocks pulled.
|
||||
fo.WeakHashThresholdPct = -1
|
||||
fo.handleFile(desiredFile, copyChan, finisherChan)
|
||||
|
||||
pulls = pulls[:0]
|
||||
for len(pulls) < expectPulls {
|
||||
select {
|
||||
case pull := <-pullChan:
|
||||
pulls = append(pulls, pull)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
|
||||
}
|
||||
}
|
||||
|
||||
finish = <-finisherChan
|
||||
finish.fd.Close()
|
||||
|
||||
expectShifted := expectBlocks - expectPulls
|
||||
if finish.copyOriginShifted != expectShifted {
|
||||
t.Errorf("did not copy %d shifted", expectShifted)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that updating a file removes it's old blocks from the blockmap
|
||||
func TestCopierCleanup(t *testing.T) {
|
||||
iterFn := func(folder, file string, index int32) bool {
|
||||
@@ -293,7 +425,7 @@ func TestLastResortPulling(t *testing.T) {
|
||||
return true
|
||||
}
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, 1)
|
||||
@@ -331,7 +463,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
f.queue.Push("filex", 0, time.Time{})
|
||||
@@ -404,7 +536,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
f.queue.Push("filex", 0, time.Time{})
|
||||
|
||||
@@ -31,30 +31,32 @@ type sharedPullerState struct {
|
||||
created time.Time
|
||||
|
||||
// Mutable, must be locked for access
|
||||
err error // The first error we hit
|
||||
fd *os.File // The fd of the temp file
|
||||
copyTotal int // Total number of copy actions for the whole job
|
||||
pullTotal int // Total number of pull actions for the whole job
|
||||
copyOrigin int // Number of blocks copied from the original file
|
||||
copyNeeded int // Number of copy actions still pending
|
||||
pullNeeded int // Number of block pulls still pending
|
||||
updated time.Time // Time when any of the counters above were last updated
|
||||
closed bool // True if the file has been finalClosed.
|
||||
available []int32 // Indexes of the blocks that are available in the temporary file
|
||||
availableUpdated time.Time // Time when list of available blocks was last updated
|
||||
mut sync.RWMutex // Protects the above
|
||||
err error // The first error we hit
|
||||
fd *os.File // The fd of the temp file
|
||||
copyTotal int // Total number of copy actions for the whole job
|
||||
pullTotal int // Total number of pull actions for the whole job
|
||||
copyOrigin int // Number of blocks copied from the original file
|
||||
copyOriginShifted int // Number of blocks copied from the original file but shifted
|
||||
copyNeeded int // Number of copy actions still pending
|
||||
pullNeeded int // Number of block pulls still pending
|
||||
updated time.Time // Time when any of the counters above were last updated
|
||||
closed bool // True if the file has been finalClosed.
|
||||
available []int32 // Indexes of the blocks that are available in the temporary file
|
||||
availableUpdated time.Time // Time when list of available blocks was last updated
|
||||
mut sync.RWMutex // Protects the above
|
||||
}
|
||||
|
||||
// A momentary state representing the progress of the puller
|
||||
type pullerProgress struct {
|
||||
Total int `json:"total"`
|
||||
Reused int `json:"reused"`
|
||||
CopiedFromOrigin int `json:"copiedFromOrigin"`
|
||||
CopiedFromElsewhere int `json:"copiedFromElsewhere"`
|
||||
Pulled int `json:"pulled"`
|
||||
Pulling int `json:"pulling"`
|
||||
BytesDone int64 `json:"bytesDone"`
|
||||
BytesTotal int64 `json:"bytesTotal"`
|
||||
Total int `json:"total"`
|
||||
Reused int `json:"reused"`
|
||||
CopiedFromOrigin int `json:"copiedFromOrigin"`
|
||||
CopiedFromOriginShifted int `json:"copiedFromOriginShifted"`
|
||||
CopiedFromElsewhere int `json:"copiedFromElsewhere"`
|
||||
Pulled int `json:"pulled"`
|
||||
Pulling int `json:"pulling"`
|
||||
BytesDone int64 `json:"bytesDone"`
|
||||
BytesTotal int64 `json:"bytesTotal"`
|
||||
}
|
||||
|
||||
// A lockedWriterAt synchronizes WriteAt calls with an external mutex.
|
||||
@@ -241,6 +243,14 @@ func (s *sharedPullerState) copiedFromOrigin() {
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) copiedFromOriginShifted() {
|
||||
s.mut.Lock()
|
||||
s.copyOrigin++
|
||||
s.copyOriginShifted++
|
||||
s.updated = time.Now()
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) pullStarted() {
|
||||
s.mut.Lock()
|
||||
s.copyTotal--
|
||||
|
||||
@@ -37,6 +37,7 @@ func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
|
||||
|
||||
go func() {
|
||||
mut.Lock()
|
||||
_ = 1 // empty critical section
|
||||
mut.Unlock()
|
||||
ok <- true
|
||||
}()
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IsDir returns true if base and every path component of name up to and
|
||||
// including filepath.Join(base, name) is a directory (and not a symlink or
|
||||
// similar). Base and name must both be clean and name must be relative to
|
||||
// base.
|
||||
func IsDir(base, name string) bool {
|
||||
path := base
|
||||
info, err := Lstat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
if name == "." {
|
||||
// The result of calling IsDir("some/where", filepath.Dir("foo"))
|
||||
return true
|
||||
}
|
||||
|
||||
parts := strings.Split(name, string(os.PathSeparator))
|
||||
for _, part := range parts {
|
||||
path = filepath.Join(path, part)
|
||||
info, err := Lstat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return false
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -22,10 +22,7 @@ func SyncFile(path string) error {
|
||||
}
|
||||
defer fd.Close()
|
||||
// MacOS and Windows do not flush the disk cache
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fd.Sync()
|
||||
}
|
||||
|
||||
func SyncDir(path string) error {
|
||||
|
||||
76
lib/osutil/traversessymlink.go
Normal file
76
lib/osutil/traversessymlink.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TraversesSymlinkError is an error indicating symlink traversal
|
||||
type TraversesSymlinkError struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (e TraversesSymlinkError) Error() string {
|
||||
return fmt.Sprintf("traverses symlink: %s", e.path)
|
||||
}
|
||||
|
||||
// NotADirectoryError is an error indicating an expected path is not a directory
|
||||
type NotADirectoryError struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func (e NotADirectoryError) Error() string {
|
||||
return fmt.Sprintf("not a directory: %s", e.path)
|
||||
}
|
||||
|
||||
// TraversesSymlink returns an error if base and any path component of name up to and
|
||||
// including filepath.Join(base, name) traverses a symlink.
|
||||
// Base and name must both be clean and name must be relative to base.
|
||||
func TraversesSymlink(base, name string) error {
|
||||
path := base
|
||||
info, err := Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return &NotADirectoryError{
|
||||
path: base,
|
||||
}
|
||||
}
|
||||
|
||||
if name == "." {
|
||||
// The result of calling TraversesSymlink("some/where", filepath.Dir("foo"))
|
||||
return nil
|
||||
}
|
||||
|
||||
parts := strings.Split(name, string(os.PathSeparator))
|
||||
for _, part := range parts {
|
||||
path = filepath.Join(path, part)
|
||||
info, err := Lstat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return &TraversesSymlinkError{
|
||||
path: strings.TrimPrefix(path, base),
|
||||
}
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return &NotADirectoryError{
|
||||
path: strings.TrimPrefix(path, base),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
)
|
||||
|
||||
func TestIsDir(t *testing.T) {
|
||||
func TestTraversesSymlink(t *testing.T) {
|
||||
if !symlinks.Supported {
|
||||
t.Skip("pointless test")
|
||||
return
|
||||
@@ -35,40 +35,42 @@ func TestIsDir(t *testing.T) {
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
isDir bool
|
||||
name string
|
||||
traverses bool
|
||||
}{
|
||||
// Exist
|
||||
{".", true},
|
||||
{"a", true},
|
||||
{"a/b", true},
|
||||
{"a/b/c", true},
|
||||
{".", false},
|
||||
{"a", false},
|
||||
{"a/b", false},
|
||||
{"a/b/c", false},
|
||||
// Don't exist
|
||||
{"x", false},
|
||||
{"a/x", false},
|
||||
{"a/b/x", false},
|
||||
{"a/x/c", false},
|
||||
// Symlink or behind symlink
|
||||
{"a/l", false},
|
||||
{"a/l/c", false},
|
||||
{"a/l", true},
|
||||
{"a/l/c", true},
|
||||
// Non-existing behind a symlink
|
||||
{"a/l/x", true},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
if res := osutil.IsDir("testdata", tc.name); res != tc.isDir {
|
||||
t.Errorf("IsDir(%q) = %v, should be %v", tc.name, res, tc.isDir)
|
||||
if res := osutil.TraversesSymlink("testdata", tc.name); tc.traverses == (res == nil) {
|
||||
t.Errorf("TraversesSymlink(%q) = %v, should be %v", tc.name, res, tc.traverses)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var isDirResult bool
|
||||
var traversesSymlinkResult error
|
||||
|
||||
func BenchmarkIsDir(b *testing.B) {
|
||||
func BenchmarkTraversesSymlink(b *testing.B) {
|
||||
os.RemoveAll("testdata")
|
||||
defer os.RemoveAll("testdata")
|
||||
os.MkdirAll("testdata/a/b/c", 0755)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
isDirResult = osutil.IsDir("testdata", "a/b/c")
|
||||
traversesSymlinkResult = osutil.TraversesSymlink("testdata", "a/b/c")
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user