mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-18 10:48:54 -05:00
Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54911d44c5 | ||
|
|
c765f7be8d | ||
|
|
44bdaf3ac2 | ||
|
|
fc16e49cb0 | ||
|
|
f5a310ad64 | ||
|
|
01e50eb3fa | ||
|
|
bab7c8ebbf | ||
|
|
0725e3af38 | ||
|
|
dd7bb6c4b8 | ||
|
|
d41c131364 | ||
|
|
47f22ff3e5 | ||
|
|
744c2e82b5 | ||
|
|
ead7281c20 | ||
|
|
aa3ef49dd7 | ||
|
|
5c067661f4 | ||
|
|
226da976dc | ||
|
|
ba17cc0a11 | ||
|
|
9e0afb7d8a | ||
|
|
9e7d50bc76 | ||
|
|
d7d5687faa | ||
|
|
21eb098dd2 | ||
|
|
2f770f8bfb | ||
|
|
f09698d845 | ||
|
|
3fbcb024a7 | ||
|
|
b8c1c0e048 | ||
|
|
2d47242d54 | ||
|
|
66a7829eee | ||
|
|
9c67bd2550 | ||
|
|
f67c5a2fd6 | ||
|
|
263402f80a | ||
|
|
920a83ec7a | ||
|
|
3c2ac3522c | ||
|
|
9fdaa637a8 | ||
|
|
81a9d7f2b9 | ||
|
|
d8d3f05164 | ||
|
|
653be136ee | ||
|
|
398c356f22 | ||
|
|
542b76f687 | ||
|
|
abb8a1914a | ||
|
|
163d335078 | ||
|
|
0582836820 | ||
|
|
bb15776ae6 | ||
|
|
dde9d4c9eb |
1
AUTHORS
1
AUTHORS
@@ -48,6 +48,7 @@ Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
|
||||
1
NICKS
1
NICKS
@@ -101,6 +101,7 @@ sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
simplypeachy <aD@simplypeachy.co.uk>
|
||||
simplypeachy <simplypeachy@users.noreply.github.com>
|
||||
Smiley73 <heiko@zuerker.org>
|
||||
snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
|
||||
135
build.go
135
build.go
@@ -27,7 +27,6 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
@@ -154,6 +153,40 @@ var targets = map[string]target{
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
// fast linters complete in a fraction of a second and might as well be
|
||||
// run always as part of the build
|
||||
fastLinters = []string{
|
||||
"deadcode",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"vet",
|
||||
}
|
||||
|
||||
// slow linters take several seconds and are run only as part of the
|
||||
// "metalint" command.
|
||||
slowLinters = []string{
|
||||
"gosimple",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"unused",
|
||||
"varcheck",
|
||||
}
|
||||
|
||||
// Which parts of the tree to lint
|
||||
lintDirs = []string{".", "./lib/...", "./cmd/..."}
|
||||
|
||||
// Messages to ignore
|
||||
lintExcludes = []string{
|
||||
".pb.go",
|
||||
"should have comment",
|
||||
"protocol.Vector composite literal uses unkeyed fields",
|
||||
"cli.Requires composite literal uses unkeyed fields",
|
||||
"Use DialContext instead", // Go 1.7
|
||||
"os.SEEK_SET is deprecated", // Go 1.7
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// The "syncthing" target includes a few more files found in the "etc"
|
||||
// and "extra" dirs.
|
||||
@@ -203,8 +236,6 @@ func main() {
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
runCommand("install", targets["all"])
|
||||
runCommand("vet", target{})
|
||||
runCommand("lint", target{})
|
||||
} else {
|
||||
// with any command given but not a target, the target is
|
||||
// "syncthing". So "go run build.go install" is "go run build.go install
|
||||
@@ -242,6 +273,7 @@ func runCommand(cmd string, target target) {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
install(target, tags)
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "build":
|
||||
var tags []string
|
||||
@@ -249,6 +281,7 @@ func runCommand(cmd string, target target) {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
build(target, tags)
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "test":
|
||||
test("./lib/...", "./cmd/...")
|
||||
@@ -284,25 +317,14 @@ func runCommand(cmd string, target target) {
|
||||
clean()
|
||||
|
||||
case "vet":
|
||||
vet("build.go")
|
||||
vet("cmd", "lib")
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "lint":
|
||||
lint(".")
|
||||
lint("./cmd/...")
|
||||
lint("./lib/...")
|
||||
metalint(fastLinters, lintDirs)
|
||||
|
||||
case "metalint":
|
||||
if isGometalinterInstalled() {
|
||||
dirs := []string{".", "./cmd/...", "./lib/..."}
|
||||
ok := gometalinter("deadcode", dirs, "test/util.go")
|
||||
ok = gometalinter("structcheck", dirs) && ok
|
||||
ok = gometalinter("varcheck", dirs) && ok
|
||||
ok = gometalinter("ineffassign", dirs) && ok
|
||||
if !ok {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
metalint(fastLinters, lintDirs)
|
||||
metalint(slowLinters, lintDirs)
|
||||
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
@@ -364,11 +386,15 @@ func setup() {
|
||||
"github.com/FiloSottile/gvt",
|
||||
"github.com/golang/lint/golint",
|
||||
"github.com/gordonklaus/ineffassign",
|
||||
"github.com/mdempsky/unconvert",
|
||||
"github.com/mitchellh/go-wordwrap",
|
||||
"github.com/opennota/check/cmd/...",
|
||||
"github.com/tsenart/deadcode",
|
||||
"golang.org/x/net/html",
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"honnef.co/go/simple/cmd/gosimple",
|
||||
"honnef.co/go/staticcheck/cmd/staticcheck",
|
||||
"honnef.co/go/unused/cmd/unused",
|
||||
}
|
||||
for _, pkg := range packages {
|
||||
fmt.Println(pkg)
|
||||
@@ -1003,48 +1029,6 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
}
|
||||
|
||||
func vet(dirs ...string) {
|
||||
params := []string{"tool", "vet", "-all"}
|
||||
params = append(params, dirs...)
|
||||
bs, err := runError("go", params...)
|
||||
|
||||
if len(bs) > 0 {
|
||||
log.Printf("%s", bs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if exitStatus(err) == 3 {
|
||||
// Exit code 3, the "vet" tool is not installed
|
||||
return
|
||||
}
|
||||
|
||||
// A genuine error exit from the vet tool.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func lint(pkg string) {
|
||||
bs, err := runError("golint", pkg)
|
||||
if err != nil {
|
||||
log.Println(`- No golint, not linting. Try "go get -u github.com/golang/lint/golint".`)
|
||||
return
|
||||
}
|
||||
|
||||
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if analCommentPolicy.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
}
|
||||
}
|
||||
|
||||
func macosCodesign(file string) {
|
||||
if pass := os.Getenv("CODESIGN_KEYCHAIN_PASS"); pass != "" {
|
||||
bs, err := runError("security", "unlock-keychain", "-p", pass)
|
||||
@@ -1064,14 +1048,16 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func exitStatus(err error) int {
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := err.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
return ws.ExitStatus()
|
||||
func metalint(linters []string, dirs []string) {
|
||||
ok := true
|
||||
if isGometalinterInstalled() {
|
||||
if !gometalinter(linters, dirs, lintExcludes...) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
if !ok {
|
||||
log.Fatal("Build succeeded, but there were lint warnings")
|
||||
}
|
||||
}
|
||||
|
||||
func isGometalinterInstalled() bool {
|
||||
@@ -1082,10 +1068,12 @@ func isGometalinterInstalled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func gometalinter(linter string, dirs []string, excludes ...string) bool {
|
||||
params := []string{"--disable-all"}
|
||||
params = append(params, fmt.Sprintf("--deadline=%ds", 60))
|
||||
params = append(params, "--enable="+linter)
|
||||
func gometalinter(linters []string, dirs []string, excludes ...string) bool {
|
||||
params := []string{"--disable-all", "--concurrency=2", "--deadline=300s"}
|
||||
|
||||
for _, linter := range linters {
|
||||
params = append(params, "--enable="+linter)
|
||||
}
|
||||
|
||||
for _, exclude := range excludes {
|
||||
params = append(params, "--exclude="+exclude)
|
||||
@@ -1098,14 +1086,19 @@ func gometalinter(linter string, dirs []string, excludes ...string) bool {
|
||||
bs, _ := runError("gometalinter", params...)
|
||||
|
||||
nerr := 0
|
||||
lines := make(map[string]struct{})
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
if _, ok := lines[line]; ok {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
if strings.Contains(line, "executable file not found") {
|
||||
log.Println(` - Try "go run build.go setup" to install missing tools`)
|
||||
}
|
||||
lines[line] = struct{}{}
|
||||
nerr++
|
||||
}
|
||||
|
||||
|
||||
19
cmd/stcli/LICENSE
Normal file
19
cmd/stcli/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
10
cmd/stcli/README.md
Normal file
10
cmd/stcli/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
syncthing-cli
|
||||
=============
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing-cli/lastBuild/)
|
||||
|
||||
A CLI that talks to the Syncthing REST interface.
|
||||
|
||||
`go get github.com/syncthing/syncthing-cli`
|
||||
|
||||
Or download the [latest build](http://build.syncthing.net/job/syncthing-cli/lastSuccessfulBuild/artifact/).
|
||||
115
cmd/stcli/client.go
Normal file
115
cmd/stcli/client.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
httpClient http.Client
|
||||
endpoint string
|
||||
apikey string
|
||||
username string
|
||||
password string
|
||||
id string
|
||||
csrf string
|
||||
}
|
||||
|
||||
var instance *APIClient
|
||||
|
||||
func getClient(c *cli.Context) *APIClient {
|
||||
if instance != nil {
|
||||
return instance
|
||||
}
|
||||
endpoint := c.GlobalString("endpoint")
|
||||
if !strings.HasPrefix(endpoint, "http") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: c.GlobalBool("insecure"),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := APIClient{
|
||||
httpClient: httpClient,
|
||||
endpoint: endpoint,
|
||||
apikey: c.GlobalString("apikey"),
|
||||
username: c.GlobalString("username"),
|
||||
password: c.GlobalString("password"),
|
||||
}
|
||||
|
||||
if client.apikey == "" {
|
||||
request, err := http.NewRequest("GET", client.endpoint, nil)
|
||||
die(err)
|
||||
response := client.handleRequest(request)
|
||||
client.id = response.Header.Get("X-Syncthing-ID")
|
||||
if client.id == "" {
|
||||
die("Failed to get device ID")
|
||||
}
|
||||
for _, item := range response.Cookies() {
|
||||
if item.Name == "CSRF-Token-"+client.id[:5] {
|
||||
client.csrf = item.Value
|
||||
goto csrffound
|
||||
}
|
||||
}
|
||||
die("Failed to get CSRF token")
|
||||
csrffound:
|
||||
}
|
||||
instance = &client
|
||||
return &client
|
||||
}
|
||||
|
||||
func (client *APIClient) handleRequest(request *http.Request) *http.Response {
|
||||
if client.apikey != "" {
|
||||
request.Header.Set("X-API-Key", client.apikey)
|
||||
}
|
||||
if client.username != "" || client.password != "" {
|
||||
request.SetBasicAuth(client.username, client.password)
|
||||
}
|
||||
if client.csrf != "" {
|
||||
request.Header.Set("X-CSRF-Token-"+client.id[:5], client.csrf)
|
||||
}
|
||||
|
||||
response, err := client.httpClient.Do(request)
|
||||
die(err)
|
||||
|
||||
if response.StatusCode == 404 {
|
||||
die("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 401 {
|
||||
die("Invalid username or password")
|
||||
} else if response.StatusCode == 403 {
|
||||
if client.apikey == "" {
|
||||
die("Invalid CSRF token")
|
||||
}
|
||||
die("Invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
body := strings.TrimSpace(string(responseToBArray(response)))
|
||||
if body != "" {
|
||||
die(body)
|
||||
}
|
||||
die("Unknown HTTP status returned: " + response.Status)
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func httpGet(c *cli.Context, url string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("GET", client.endpoint+"/rest/"+url, nil)
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
|
||||
func httpPost(c *cli.Context, url string, body string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("POST", client.endpoint+"/rest/"+url, bytes.NewBufferString(body))
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
188
cmd/stcli/cmd_devices.go
Normal file
188
cmd/stcli/cmd_devices.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "devices",
|
||||
HideHelp: true,
|
||||
Usage: "Device command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List registered devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: devicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new device",
|
||||
Requires: &cli.Requires{"device id", "device name?"},
|
||||
Action: devicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing device",
|
||||
Requires: &cli.Requires{"device id"},
|
||||
Action: devicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property"},
|
||||
Action: devicesGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property", "value..."},
|
||||
Action: devicesSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func devicesList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, device := range cfg.Devices {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", device.DeviceID, "\t")
|
||||
fmt.Fprintln(writer, "Name:\t", device.Name, "\t(name)")
|
||||
fmt.Fprintln(writer, "Address:\t", strings.Join(device.Addresses, " "), "\t(address)")
|
||||
fmt.Fprintln(writer, "Compression:\t", device.Compression, "\t(compression)")
|
||||
fmt.Fprintln(writer, "Certificate name:\t", device.CertName, "\t(certname)")
|
||||
fmt.Fprintln(writer, "Introducer:\t", device.Introducer, "\t(introducer)")
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func devicesAdd(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
|
||||
newDevice := config.DeviceConfiguration{
|
||||
DeviceID: id,
|
||||
Name: nid,
|
||||
Addresses: []string{"dynamic"},
|
||||
}
|
||||
|
||||
if len(c.Args()) > 1 {
|
||||
newDevice.Name = c.Args()[1]
|
||||
}
|
||||
|
||||
if len(c.Args()) > 2 {
|
||||
addresses := c.Args()[2:]
|
||||
for _, item := range addresses {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
newDevice.Addresses = addresses
|
||||
}
|
||||
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
die("Device " + nid + " already exists")
|
||||
}
|
||||
}
|
||||
cfg.Devices = append(cfg.Devices, newDevice)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func devicesRemove(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
if nid == getMyID(c) {
|
||||
die("Cannot remove yourself")
|
||||
}
|
||||
cfg := getConfig(c)
|
||||
for i, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
last := len(cfg.Devices) - 1
|
||||
cfg.Devices[i] = cfg.Devices[last]
|
||||
cfg.Devices = cfg.Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesGet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
fmt.Println(device.Name)
|
||||
case "address":
|
||||
fmt.Println(strings.Join(device.Addresses, "\n"))
|
||||
case "compression":
|
||||
fmt.Println(device.Compression.String())
|
||||
case "certname":
|
||||
fmt.Println(device.CertName)
|
||||
case "introducer":
|
||||
fmt.Println(device.Introducer)
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesSet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
config := getConfig(c)
|
||||
for i, device := range config.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
config.Devices[i].Name = strings.Join(c.Args()[2:], " ")
|
||||
case "address":
|
||||
for _, item := range c.Args()[2:] {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
config.Devices[i].Addresses = c.Args()[2:]
|
||||
case "compression":
|
||||
err := config.Devices[i].Compression.UnmarshalText([]byte(c.Args()[2]))
|
||||
die(err)
|
||||
case "certname":
|
||||
config.Devices[i].CertName = strings.Join(c.Args()[2:], " ")
|
||||
case "introducer":
|
||||
config.Devices[i].Introducer = parseBool(c.Args()[2])
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
setConfig(c, config)
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
67
cmd/stcli/cmd_errors.go
Normal file
67
cmd/stcli/cmd_errors.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "errors",
|
||||
HideHelp: true,
|
||||
Usage: "Error command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: errorsShow,
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
Requires: &cli.Requires{"error message..."},
|
||||
Action: errorsPush,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Clear pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/error/clear"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func errorsShow(c *cli.Context) {
|
||||
response := httpGet(c, "system/error")
|
||||
var data map[string][]map[string]interface{}
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
writer := newTableWriter()
|
||||
for _, item := range data["errors"] {
|
||||
time := item["time"].(string)[:19]
|
||||
time = strings.Replace(time, "T", " ", 1)
|
||||
err := item["error"].(string)
|
||||
err = strings.TrimSpace(err)
|
||||
fmt.Fprintln(writer, time+":\t"+err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) {
|
||||
err := strings.Join(c.Args(), " ")
|
||||
response := httpPost(c, "system/error", strings.TrimSpace(err))
|
||||
if response.StatusCode != 200 {
|
||||
err = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
}
|
||||
351
cmd/stcli/cmd_folders.go
Normal file
351
cmd/stcli/cmd_folders.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "folders",
|
||||
HideHelp: true,
|
||||
Usage: "Folder command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List available folders",
|
||||
Requires: &cli.Requires{},
|
||||
Action: foldersList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new folder",
|
||||
Requires: &cli.Requires{"folder id", "directory"},
|
||||
Action: foldersAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersRemove,
|
||||
},
|
||||
{
|
||||
Name: "override",
|
||||
Usage: "Override changes from other nodes for a master folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersOverride,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property", "value..."},
|
||||
Action: foldersSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersUnset,
|
||||
},
|
||||
{
|
||||
Name: "devices",
|
||||
Usage: "Folder devices command group",
|
||||
HideHelp: true,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List of devices which the folder is shared with",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Share a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Unshare a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Unshare a folder with all devices",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesClear,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func foldersList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, folder := range cfg.Folders {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", folder.ID, "\t")
|
||||
fmt.Fprintln(writer, "Path:\t", folder.RawPath, "\t(directory)")
|
||||
fmt.Fprintln(writer, "Folder type:\t", folder.Type, "\t(type)")
|
||||
fmt.Fprintln(writer, "Ignore permissions:\t", folder.IgnorePerms, "\t(permissions)")
|
||||
fmt.Fprintln(writer, "Rescan interval in seconds:\t", folder.RescanIntervalS, "\t(rescan)")
|
||||
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Fprintln(writer, "Versioning:\t", folder.Versioning.Type, "\t(versioning)")
|
||||
for key, value := range folder.Versioning.Params {
|
||||
fmt.Fprintf(writer, "Versioning %s:\t %s \t(versioning-%s)\n", key, value, key)
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func foldersAdd(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
abs, err := filepath.Abs(c.Args()[1])
|
||||
die(err)
|
||||
folder := config.FolderConfiguration{
|
||||
ID: c.Args()[0],
|
||||
RawPath: filepath.Clean(abs),
|
||||
}
|
||||
cfg.Folders = append(cfg.Folders, folder)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func foldersRemove(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID == rid {
|
||||
last := len(cfg.Folders) - 1
|
||||
cfg.Folders[i] = cfg.Folders[last]
|
||||
cfg.Folders = cfg.Folders[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID == rid && folder.Type == config.FolderTypeSendOnly {
|
||||
response := httpPost(c, "db/override", "")
|
||||
if response.StatusCode != 200 {
|
||||
err := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found or folder not master")
|
||||
}
|
||||
|
||||
func foldersGet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
value, ok := folder.Versioning.Params[arg]
|
||||
if ok {
|
||||
fmt.Println(value)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
fmt.Println(folder.RawPath)
|
||||
case "type":
|
||||
fmt.Println(folder.Type)
|
||||
case "permissions":
|
||||
fmt.Println(folder.IgnorePerms)
|
||||
case "rescan":
|
||||
fmt.Println(folder.RescanIntervalS)
|
||||
case "versioning":
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Println(folder.Versioning.Type)
|
||||
}
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, type, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersSet(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
val := strings.Join(c.Args()[2:], " ")
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
cfg.Folders[i].Versioning.Params[arg[11:]] = val
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
cfg.Folders[i].RawPath = val
|
||||
case "type":
|
||||
var t config.FolderType
|
||||
if err := t.UnmarshalText([]byte(val)); err != nil {
|
||||
die("Invalid folder type: " + err.Error())
|
||||
}
|
||||
cfg.Folders[i].Type = t
|
||||
case "permissions":
|
||||
cfg.Folders[i].IgnorePerms = parseBool(val)
|
||||
case "rescan":
|
||||
cfg.Folders[i].RescanIntervalS = parseInt(val)
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = val
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, master, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersUnset(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
if _, ok := folder.Versioning.Params[arg]; ok {
|
||||
delete(cfg.Folders[i].Versioning.Params, arg)
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = ""
|
||||
cfg.Folders[i].Versioning.Params = make(map[string]string)
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesList(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
fmt.Println(device.DeviceID)
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesAdd(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
die("Device " + c.Args()[1] + " is already part of this folder")
|
||||
}
|
||||
}
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == nid {
|
||||
cfg.Folders[i].Devices = append(folder.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: device.DeviceID,
|
||||
})
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found in device list")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesRemove(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for ri, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for ni, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
last := len(folder.Devices) - 1
|
||||
cfg.Folders[ri].Devices[ni] = folder.Devices[last]
|
||||
cfg.Folders[ri].Devices = cfg.Folders[ri].Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesClear(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
cfg.Folders[i].Devices = []config.FolderDeviceConfiguration{}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
78
cmd/stcli/cmd_general.go
Normal file
78
cmd/stcli/cmd_general.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, []cli.Command{
|
||||
{
|
||||
Name: "id",
|
||||
Usage: "Get ID of the Syncthing client",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalID,
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Configuration status, whether or not a restart is required for changes to take effect",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalStatus,
|
||||
},
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/restart"),
|
||||
},
|
||||
{
|
||||
Name: "shutdown",
|
||||
Usage: "Shutdown syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/shutdown"),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/reset"),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/upgrade"),
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Syncthing client version",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalVersion,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
func generalID(c *cli.Context) {
|
||||
fmt.Println(getMyID(c))
|
||||
}
|
||||
|
||||
func generalStatus(c *cli.Context) {
|
||||
response := httpGet(c, "system/config/insync")
|
||||
var status struct{ ConfigInSync bool }
|
||||
json.Unmarshal(responseToBArray(response), &status)
|
||||
if !status.ConfigInSync {
|
||||
die("Config out of sync")
|
||||
}
|
||||
fmt.Println("Config in sync")
|
||||
}
|
||||
|
||||
func generalVersion(c *cli.Context) {
|
||||
response := httpGet(c, "system/version")
|
||||
version := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &version)
|
||||
prettyPrintJSON(version)
|
||||
}
|
||||
127
cmd/stcli/cmd_gui.go
Normal file
127
cmd/stcli/cmd_gui.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "gui",
|
||||
HideHelp: true,
|
||||
Usage: "GUI command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all GUI configuration settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: guiDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting", "value"},
|
||||
Action: guiSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiUnset,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func guiDump(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
writer := newTableWriter()
|
||||
fmt.Fprintln(writer, "Enabled:\t", cfg.Enabled, "\t(enabled)")
|
||||
fmt.Fprintln(writer, "Use HTTPS:\t", cfg.UseTLS(), "\t(tls)")
|
||||
fmt.Fprintln(writer, "Listen Addresses:\t", cfg.Address(), "\t(address)")
|
||||
if cfg.User != "" {
|
||||
fmt.Fprintln(writer, "Authentication User:\t", cfg.User, "\t(username)")
|
||||
fmt.Fprintln(writer, "Authentication Password:\t", cfg.Password, "\t(password)")
|
||||
}
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Fprintln(writer, "API Key:\t", cfg.APIKey, "\t(apikey)")
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func guiGet(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
fmt.Println(cfg.Enabled)
|
||||
case "tls":
|
||||
fmt.Println(cfg.UseTLS())
|
||||
case "address":
|
||||
fmt.Println(cfg.Address())
|
||||
case "user":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.User)
|
||||
}
|
||||
case "password":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.Password)
|
||||
}
|
||||
case "apikey":
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Println(cfg.APIKey)
|
||||
}
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
}
|
||||
|
||||
func guiSet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
cfg.GUI.Enabled = parseBool(val)
|
||||
case "tls":
|
||||
cfg.GUI.RawUseTLS = parseBool(val)
|
||||
case "address":
|
||||
validAddress(val)
|
||||
cfg.GUI.RawAddress = val
|
||||
case "user":
|
||||
cfg.GUI.User = val
|
||||
case "password":
|
||||
cfg.GUI.Password = val
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = val
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func guiUnset(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "user":
|
||||
cfg.GUI.User = ""
|
||||
case "password":
|
||||
cfg.GUI.Password = ""
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = ""
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
173
cmd/stcli/cmd_options.go
Normal file
173
cmd/stcli/cmd_options.go
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "options",
|
||||
HideHelp: true,
|
||||
Usage: "Options command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all Syncthing option settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: optionsDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: optionsGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting", "value..."},
|
||||
Action: optionsSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func optionsDump(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
writer := newTableWriter()
|
||||
|
||||
fmt.Fprintln(writer, "Sync protocol listen addresses:\t", strings.Join(cfg.ListenAddresses, " "), "\t(addresses)")
|
||||
fmt.Fprintln(writer, "Global discovery enabled:\t", cfg.GlobalAnnEnabled, "\t(globalannenabled)")
|
||||
fmt.Fprintln(writer, "Global discovery servers:\t", strings.Join(cfg.GlobalAnnServers, " "), "\t(globalannserver)")
|
||||
|
||||
fmt.Fprintln(writer, "Local discovery enabled:\t", cfg.LocalAnnEnabled, "\t(localannenabled)")
|
||||
fmt.Fprintln(writer, "Local discovery port:\t", cfg.LocalAnnPort, "\t(localannport)")
|
||||
|
||||
fmt.Fprintln(writer, "Outgoing rate limit in KiB/s:\t", cfg.MaxSendKbps, "\t(maxsend)")
|
||||
fmt.Fprintln(writer, "Incoming rate limit in KiB/s:\t", cfg.MaxRecvKbps, "\t(maxrecv)")
|
||||
fmt.Fprintln(writer, "Reconnect interval in seconds:\t", cfg.ReconnectIntervalS, "\t(reconnect)")
|
||||
fmt.Fprintln(writer, "Start browser:\t", cfg.StartBrowser, "\t(browser)")
|
||||
fmt.Fprintln(writer, "Enable UPnP:\t", cfg.NATEnabled, "\t(nat)")
|
||||
fmt.Fprintln(writer, "UPnP Lease in minutes:\t", cfg.NATLeaseM, "\t(natlease)")
|
||||
fmt.Fprintln(writer, "UPnP Renewal period in minutes:\t", cfg.NATRenewalM, "\t(natrenew)")
|
||||
fmt.Fprintln(writer, "Restart on Wake Up:\t", cfg.RestartOnWakeup, "\t(wake)")
|
||||
|
||||
reporting := "unrecognized value"
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
reporting = "false"
|
||||
case 0:
|
||||
reporting = "undecided/false"
|
||||
case 1:
|
||||
reporting = "true"
|
||||
}
|
||||
fmt.Fprintln(writer, "Anonymous usage reporting:\t", reporting, "\t(reporting)")
|
||||
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func optionsGet(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
fmt.Println(strings.Join(cfg.ListenAddresses, "\n"))
|
||||
case "globalannenabled":
|
||||
fmt.Println(cfg.GlobalAnnEnabled)
|
||||
case "globalannservers":
|
||||
fmt.Println(strings.Join(cfg.GlobalAnnServers, "\n"))
|
||||
case "localannenabled":
|
||||
fmt.Println(cfg.LocalAnnEnabled)
|
||||
case "localannport":
|
||||
fmt.Println(cfg.LocalAnnPort)
|
||||
case "maxsend":
|
||||
fmt.Println(cfg.MaxSendKbps)
|
||||
case "maxrecv":
|
||||
fmt.Println(cfg.MaxRecvKbps)
|
||||
case "reconnect":
|
||||
fmt.Println(cfg.ReconnectIntervalS)
|
||||
case "browser":
|
||||
fmt.Println(cfg.StartBrowser)
|
||||
case "nat":
|
||||
fmt.Println(cfg.NATEnabled)
|
||||
case "natlease":
|
||||
fmt.Println(cfg.NATLeaseM)
|
||||
case "natrenew":
|
||||
fmt.Println(cfg.NATRenewalM)
|
||||
case "reporting":
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
fmt.Println("false")
|
||||
case 0:
|
||||
fmt.Println("undecided/false")
|
||||
case 1:
|
||||
fmt.Println("true")
|
||||
default:
|
||||
fmt.Println("unknown")
|
||||
}
|
||||
case "wake":
|
||||
fmt.Println(cfg.RestartOnWakeup)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
}
|
||||
|
||||
func optionsSet(c *cli.Context) {
|
||||
config := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.ListenAddresses = c.Args().Tail()
|
||||
case "globalannenabled":
|
||||
config.Options.GlobalAnnEnabled = parseBool(val)
|
||||
case "globalannserver":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.GlobalAnnServers = c.Args().Tail()
|
||||
case "localannenabled":
|
||||
config.Options.LocalAnnEnabled = parseBool(val)
|
||||
case "localannport":
|
||||
config.Options.LocalAnnPort = parsePort(val)
|
||||
case "maxsend":
|
||||
config.Options.MaxSendKbps = parseUint(val)
|
||||
case "maxrecv":
|
||||
config.Options.MaxRecvKbps = parseUint(val)
|
||||
case "reconnect":
|
||||
config.Options.ReconnectIntervalS = parseUint(val)
|
||||
case "browser":
|
||||
config.Options.StartBrowser = parseBool(val)
|
||||
case "nat":
|
||||
config.Options.NATEnabled = parseBool(val)
|
||||
case "natlease":
|
||||
config.Options.NATLeaseM = parseUint(val)
|
||||
case "natrenew":
|
||||
config.Options.NATRenewalM = parseUint(val)
|
||||
case "reporting":
|
||||
switch strings.ToLower(val) {
|
||||
case "u", "undecided", "unset":
|
||||
config.Options.URAccepted = 0
|
||||
default:
|
||||
boolvalue := parseBool(val)
|
||||
if boolvalue {
|
||||
config.Options.URAccepted = 1
|
||||
} else {
|
||||
config.Options.URAccepted = -1
|
||||
}
|
||||
}
|
||||
case "wake":
|
||||
config.Options.RestartOnWakeup = parseBool(val)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
setConfig(c, config)
|
||||
}
|
||||
72
cmd/stcli/cmd_report.go
Normal file
72
cmd/stcli/cmd_report.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "report",
|
||||
HideHelp: true,
|
||||
Usage: "Reporting command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Report system state",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportSystem,
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportConnections,
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Usage report",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportUsage,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func reportSystem(c *cli.Context) {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func reportConnections(c *cli.Context) {
|
||||
response := httpGet(c, "system/connections")
|
||||
data := make(map[string]map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
var overall map[string]interface{}
|
||||
for key, value := range data {
|
||||
if key == "total" {
|
||||
overall = value
|
||||
continue
|
||||
}
|
||||
value["Device ID"] = key
|
||||
prettyPrintJSON(value)
|
||||
fmt.Println()
|
||||
}
|
||||
if overall != nil {
|
||||
fmt.Println("=== Overall statistics ===")
|
||||
prettyPrintJSON(overall)
|
||||
}
|
||||
}
|
||||
|
||||
func reportUsage(c *cli.Context) {
|
||||
response := httpGet(c, "svc/report")
|
||||
report := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &report)
|
||||
prettyPrintJSON(report)
|
||||
}
|
||||
31
cmd/stcli/labels.go
Normal file
31
cmd/stcli/labels.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
var jsonAttributeLabels = map[string]string{
|
||||
"folderMaxMiB": "Largest folder size in MiB",
|
||||
"folderMaxFiles": "Largest folder file count",
|
||||
"longVersion": "Long version",
|
||||
"totMiB": "Total size in MiB",
|
||||
"totFiles": "Total files",
|
||||
"uniqueID": "Unique ID",
|
||||
"numFolders": "Folder count",
|
||||
"numDevices": "Device count",
|
||||
"memoryUsageMiB": "Memory usage in MiB",
|
||||
"memorySize": "Total memory in MiB",
|
||||
"sha256Perf": "SHA256 Benchmark",
|
||||
"At": "Last contacted",
|
||||
"Completion": "Percent complete",
|
||||
"InBytesTotal": "Total bytes received",
|
||||
"OutBytesTotal": "Total bytes sent",
|
||||
"ClientVersion": "Client version",
|
||||
"alloc": "Memory allocated in bytes",
|
||||
"sys": "Memory using in bytes",
|
||||
"cpuPercent": "CPU load in percent",
|
||||
"extAnnounceOK": "External announcments working",
|
||||
"goroutines": "Number of Go routines",
|
||||
"myID": "Client ID",
|
||||
"tilde": "Tilde expands to",
|
||||
"arch": "Architecture",
|
||||
"os": "OS",
|
||||
}
|
||||
63
cmd/stcli/main.go
Normal file
63
cmd/stcli/main.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type ByAlphabet []cli.Command
|
||||
|
||||
func (a ByAlphabet) Len() int { return len(a) }
|
||||
func (a ByAlphabet) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByAlphabet) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
|
||||
var cliCommands []cli.Command
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "syncthing-cli"
|
||||
app.Author = "Audrius Butkevičius"
|
||||
app.Email = "audrius.butkevicius@gmail.com"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = "0.1"
|
||||
app.HideHelp = true
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "endpoint, e",
|
||||
Value: "http://127.0.0.1:8384",
|
||||
Usage: "End point to connect to",
|
||||
EnvVar: "STENDPOINT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "apikey, k",
|
||||
Value: "",
|
||||
Usage: "API Key",
|
||||
EnvVar: "STAPIKEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "username, u",
|
||||
Value: "",
|
||||
Usage: "Username",
|
||||
EnvVar: "STUSERNAME",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "password, p",
|
||||
Value: "",
|
||||
Usage: "Password",
|
||||
EnvVar: "STPASSWORD",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure, i",
|
||||
Usage: "Do not verify SSL certificate",
|
||||
EnvVar: "STINSECURE",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(ByAlphabet(cliCommands))
|
||||
app.Commands = cliCommands
|
||||
app.RunAndExitOnError()
|
||||
}
|
||||
165
cmd/stcli/utils.go
Normal file
165
cmd/stcli/utils.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"unicode"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) []byte {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func die(vals ...interface{}) {
|
||||
if len(vals) > 1 || vals[0] != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintln(vals...))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedHTTPPost(url string) func(c *cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
httpPost(c, url, "")
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(json map[string]interface{}) {
|
||||
writer := newTableWriter()
|
||||
remap := make(map[string]interface{})
|
||||
for k, v := range json {
|
||||
key, ok := jsonAttributeLabels[k]
|
||||
if !ok {
|
||||
key = firstUpper(k)
|
||||
}
|
||||
remap[key] = v
|
||||
}
|
||||
|
||||
jsonKeys := make([]string, 0, len(remap))
|
||||
for key := range remap {
|
||||
jsonKeys = append(jsonKeys, key)
|
||||
}
|
||||
sort.Strings(jsonKeys)
|
||||
for _, k := range jsonKeys {
|
||||
var value string
|
||||
rvalue := remap[k]
|
||||
switch rvalue.(type) {
|
||||
case int, int16, int32, int64, uint, uint16, uint32, uint64, float32, float64:
|
||||
value = fmt.Sprintf("%.0f", rvalue)
|
||||
default:
|
||||
value = fmt.Sprint(rvalue)
|
||||
}
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(writer, k+":\t"+value)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func firstUpper(str string) string {
|
||||
for i, v := range str {
|
||||
return string(unicode.ToUpper(v)) + str[i+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newTableWriter() *tabwriter.Writer {
|
||||
writer := new(tabwriter.Writer)
|
||||
writer.Init(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
return writer
|
||||
}
|
||||
|
||||
func getMyID(c *cli.Context) string {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
return data["myID"].(string)
|
||||
}
|
||||
|
||||
func getConfig(c *cli.Context) config.Configuration {
|
||||
response := httpGet(c, "system/config")
|
||||
config := config.Configuration{}
|
||||
json.Unmarshal(responseToBArray(response), &config)
|
||||
return config
|
||||
}
|
||||
|
||||
func setConfig(c *cli.Context, cfg config.Configuration) {
|
||||
body, err := json.Marshal(cfg)
|
||||
die(err)
|
||||
response := httpPost(c, "system/config", string(body))
|
||||
if response.StatusCode != 200 {
|
||||
die("Unexpected status code", response.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func parseBool(input string) bool {
|
||||
val, err := strconv.ParseBool(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for a boolean")
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func parseInt(input string) int {
|
||||
val, err := strconv.ParseInt(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parseUint(input string) int {
|
||||
val, err := strconv.ParseUint(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an unsigned integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parsePort(input string) int {
|
||||
port := parseUint(input)
|
||||
if port < 1 || port > 65535 {
|
||||
die(input + " is not a valid port\nExpected value between 1 and 65535")
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func validAddress(input string) {
|
||||
tokens := strings.Split(input, ":")
|
||||
if len(tokens) != 2 {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
matched, err := regexp.MatchString("^[a-zA-Z0-9]+([-a-zA-Z0-9.]+[-a-zA-Z0-9]+)?$", tokens[0])
|
||||
die(err)
|
||||
if !matched {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
parsePort(tokens[1])
|
||||
}
|
||||
|
||||
func parseDeviceID(input string) protocol.DeviceID {
|
||||
device, err := protocol.DeviceIDFromString(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid device id")
|
||||
}
|
||||
return device
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func generateFiles(dir string, files, maxexp int, srcname string) error {
|
||||
}
|
||||
|
||||
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
src := io.LimitReader(&inifiteReader{fd}, int64(s))
|
||||
src := io.LimitReader(&inifiteReader{fd}, s)
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,12 +85,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
err = os.Chtimes(p1, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return os.Chtimes(p1, t, t)
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
|
||||
@@ -249,13 +249,13 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, int64(getLimitBurst)) {
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, int64(postLimitBurst)) {
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -54,7 +54,6 @@ func init() {
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
proto string
|
||||
|
||||
sessionAddress []byte
|
||||
sessionPort uint16
|
||||
|
||||
@@ -216,7 +216,7 @@ done:
|
||||
func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.serverid[:],
|
||||
Key: []byte(s.clientkey),
|
||||
Key: s.clientkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: false,
|
||||
@@ -226,7 +226,7 @@ func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
func (s *session) GetServerInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.clientid[:],
|
||||
Key: []byte(s.serverkey),
|
||||
Key: s.serverkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: true,
|
||||
|
||||
@@ -42,7 +42,7 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
status["goMaxProcs"] = runtime.GOMAXPROCS(-1)
|
||||
status["goNumRoutine"] = runtime.NumGoroutine()
|
||||
status["kbps10s1m5m15m30m60m"] = []int64{
|
||||
rc.rate(10/10) * 8 / 1000,
|
||||
rc.rate(1) * 8 / 1000, // each interval is 10s
|
||||
rc.rate(60/10) * 8 / 1000,
|
||||
rc.rate(5*60/10) * 8 / 1000,
|
||||
rc.rate(15*60/10) * 8 / 1000,
|
||||
|
||||
@@ -82,8 +82,6 @@ type modelIntf interface {
|
||||
Availability(folder, file string, version protocol.Vector, block protocol.BlockInfo) []model.Availability
|
||||
GetIgnores(folder string) ([]string, []string, error)
|
||||
SetIgnores(folder string, content []string) error
|
||||
PauseDevice(device protocol.DeviceID)
|
||||
ResumeDevice(device protocol.DeviceID)
|
||||
DelayScan(folder string, next time.Duration)
|
||||
ScanFolder(folder string) error
|
||||
ScanFolders() map[string]error
|
||||
@@ -105,6 +103,7 @@ type configIntf interface {
|
||||
Subscribe(c config.Committer)
|
||||
Folders() map[string]config.FolderConfiguration
|
||||
Devices() map[protocol.DeviceID]config.DeviceConfiguration
|
||||
SetDevice(config.DeviceConfiguration) error
|
||||
Save() error
|
||||
ListenAddresses() []string
|
||||
RequiresRestart() bool
|
||||
@@ -258,21 +257,21 @@ func (s *apiService) Serve() {
|
||||
|
||||
// The POST handlers
|
||||
postRestMux := http.NewServeMux()
|
||||
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
||||
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
||||
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
||||
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
||||
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
||||
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
||||
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
||||
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
||||
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
||||
postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device
|
||||
postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
||||
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
||||
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
||||
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
||||
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
||||
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
||||
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
||||
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
||||
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
||||
postRestMux.HandleFunc("/rest/system/pause", s.makeDevicePauseHandler(true)) // device
|
||||
postRestMux.HandleFunc("/rest/system/resume", s.makeDevicePauseHandler(false)) // device
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
|
||||
// Debug endpoints, not for general use
|
||||
debugMux := http.NewServeMux()
|
||||
@@ -381,10 +380,8 @@ func (s *apiService) String() string {
|
||||
}
|
||||
|
||||
func (s *apiService) VerifyConfiguration(from, to config.Configuration) error {
|
||||
if _, err := net.ResolveTCPAddr("tcp", to.GUI.Address()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
_, err := net.ResolveTCPAddr("tcp", to.GUI.Address())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *apiService) CommitConfiguration(from, to config.Configuration) bool {
|
||||
@@ -1105,30 +1102,27 @@ func (s *apiService) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) postSystemPause(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
func (s *apiService) makeDevicePauseHandler(paused bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, ok := s.cfg.Devices()[device]
|
||||
if !ok {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
cfg.Paused = paused
|
||||
if err := s.cfg.SetDevice(cfg); err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
}
|
||||
}
|
||||
|
||||
s.model.PauseDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiService) postSystemResume(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
s.model.ResumeDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiService) postDBScan(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -199,7 +199,8 @@ var (
|
||||
|
||||
type RuntimeOptions struct {
|
||||
confDir string
|
||||
reset bool
|
||||
resetDatabase bool
|
||||
resetDeltaIdxs bool
|
||||
showVersion bool
|
||||
showPaths bool
|
||||
doUpgrade bool
|
||||
@@ -212,6 +213,7 @@ type RuntimeOptions struct {
|
||||
auditEnabled bool
|
||||
verbose bool
|
||||
paused bool
|
||||
unpaused bool
|
||||
guiAddress string
|
||||
guiAPIKey string
|
||||
generateDir string
|
||||
@@ -259,7 +261,8 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.BoolVar(&options.noBrowser, "no-browser", false, "Do not start browser")
|
||||
flag.BoolVar(&options.browserOnly, "browser-only", false, "Open GUI in browser")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Do not restart; just exit")
|
||||
flag.BoolVar(&options.reset, "reset", false, "Reset the database")
|
||||
flag.BoolVar(&options.resetDatabase, "reset-database", false, "Reset the database, forcing a full rescan and resync")
|
||||
flag.BoolVar(&options.resetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
|
||||
flag.BoolVar(&options.doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&options.showVersion, "version", false, "Show version")
|
||||
@@ -267,7 +270,8 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&options.verbose, "verbose", false, "Print verbose log output")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices paused")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices and folders paused")
|
||||
flag.BoolVar(&options.unpaused, "unpaused", false, "Start with all devices and folders unpaused")
|
||||
flag.StringVar(&options.logFile, "logfile", options.logFile, "Log file name (use \"-\" for stdout)")
|
||||
if runtime.GOOS == "windows" {
|
||||
// Allow user to hide the console window
|
||||
@@ -367,7 +371,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if options.reset {
|
||||
if options.resetDatabase {
|
||||
resetDB()
|
||||
return
|
||||
}
|
||||
@@ -554,8 +558,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// Event subscription for the API; must start early to catch the early
|
||||
// events. The LocalChangeDetected event might overwhelm the event
|
||||
// receiver in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected), 1000)
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected&^events.RemoteChangeDetected), 1000)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected|events.RemoteChangeDetected), 1000)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -620,6 +624,10 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
0xCCA8, // TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
0xCCA9, // TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
@@ -663,6 +671,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
if runtimeOptions.resetDeltaIdxs {
|
||||
l.Infoln("Reinitializing delta index IDs")
|
||||
ldb.DropDeltaIndexIDs()
|
||||
}
|
||||
|
||||
protectedFiles := []string{
|
||||
locations[locDatabase],
|
||||
locations[locConfigFile],
|
||||
@@ -697,14 +710,17 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
m.StartDeadlockDetector(20 * time.Minute)
|
||||
}
|
||||
|
||||
if runtimeOptions.paused {
|
||||
for device := range cfg.Devices() {
|
||||
m.PauseDevice(device)
|
||||
}
|
||||
if runtimeOptions.unpaused {
|
||||
setPauseState(cfg, false)
|
||||
} else if runtimeOptions.paused {
|
||||
setPauseState(cfg, true)
|
||||
}
|
||||
|
||||
// Add and start folders
|
||||
for _, folderCfg := range cfg.Folders() {
|
||||
if folderCfg.Paused {
|
||||
continue
|
||||
}
|
||||
m.AddFolder(folderCfg)
|
||||
m.StartFolder(folderCfg.ID)
|
||||
}
|
||||
@@ -1066,7 +1082,7 @@ func getFreePort(host string, ports ...int) (int, error) {
|
||||
}
|
||||
|
||||
func standbyMonitor() {
|
||||
restartDelay := time.Duration(60 * time.Second)
|
||||
restartDelay := 60 * time.Second
|
||||
now := time.Now()
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -1199,3 +1215,16 @@ func showPaths() {
|
||||
fmt.Printf("GUI override directory:\n\t%s\n\n", locations[locGUIAssets])
|
||||
fmt.Printf("Default sync folder directory:\n\t%s\n\n", locations[locDefFolder])
|
||||
}
|
||||
|
||||
func setPauseState(cfg *config.Wrapper, paused bool) {
|
||||
raw := cfg.RawCopy()
|
||||
for i := range raw.Devices {
|
||||
raw.Devices[i].Paused = paused
|
||||
}
|
||||
for i := range raw.Folders {
|
||||
raw.Folders[i].Paused = paused
|
||||
}
|
||||
if err := cfg.Replace(raw); err != nil {
|
||||
l.Fatalln("Cannot adjust paused state:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ func (c *mockedConfig) Devices() map[protocol.DeviceID]config.DeviceConfiguratio
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) SetDevice(config.DeviceConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -202,8 +202,24 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
}
|
||||
|
||||
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
if strings.Contains(line, "leveldb") && strings.Contains(line, "corrupt") {
|
||||
l.Warnln(`
|
||||
*********************************************************************************
|
||||
* Crash due to corrupt database. *
|
||||
* *
|
||||
* This crash usually occurs due to one of the following reasons: *
|
||||
* - Syncthing being stopped abruptly (killed/loss of power) *
|
||||
* - Bad hardware (memory/disk issues) *
|
||||
* - Software that affects disk writes (SSD caching software and simillar) *
|
||||
* *
|
||||
* Please see the following URL for instructions on how to recover: *
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
*********************************************************************************
|
||||
`)
|
||||
} else {
|
||||
l.Warnln("Please check for existing issues with similar panic message at https://github.com/syncthing/syncthing/issues/")
|
||||
l.Warnln("If no issue with similar panic message exists, please create a new issue with the panic log attached")
|
||||
}
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
|
||||
@@ -134,7 +134,7 @@ func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
for _, cfg := range cfg.Folders() {
|
||||
rescanIntvs = append(rescanIntvs, cfg.RescanIntervalS)
|
||||
|
||||
if cfg.Type == config.FolderTypeReadOnly {
|
||||
if cfg.Type == config.FolderTypeSendOnly {
|
||||
folderUses["readonly"]++
|
||||
}
|
||||
if cfg.IgnorePerms {
|
||||
|
||||
@@ -94,9 +94,12 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
|
||||
case events.LocalChangeDetected:
|
||||
data := ev.Data.(map[string]string)
|
||||
// Local change detected in folder "foo": modified file /Users/jb/whatever
|
||||
return fmt.Sprintf("Local change detected in folder %q: %s %s %s", data["folder"], data["action"], data["type"], data["path"])
|
||||
|
||||
case events.RemoteChangeDetected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Remote change detected in folder %q: %s %s %s", data["folder"], data["action"], data["type"], data["path"])
|
||||
|
||||
case events.RemoteIndexUpdated:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Device %v sent an index update for %q with %d items", data["device"], data["folder"], data["items"])
|
||||
@@ -163,6 +166,18 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was resumed", device)
|
||||
|
||||
case events.FolderPaused:
|
||||
data := ev.Data.(map[string]string)
|
||||
id := data["id"]
|
||||
label := data["label"]
|
||||
return fmt.Sprintf("Folder %v (%v) was paused", id, label)
|
||||
|
||||
case events.FolderResumed:
|
||||
data := ev.Data.(map[string]string)
|
||||
id := data["id"]
|
||||
label := data["label"]
|
||||
return fmt.Sprintf("Folder %v (%v) was resumed", id, label)
|
||||
|
||||
case events.ListenAddressesChanged:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
address := data["address"]
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"Actions": "Aktionen",
|
||||
"Add": "Hinzufügen",
|
||||
"Add Device": "Gerät hinzufügen",
|
||||
"Add Folder": "Verzeichnis hinzufügen",
|
||||
"Add Folder": "Ordner hinzufügen",
|
||||
"Add Remote Device": "Fern-Gerät hinzufügen",
|
||||
"Add new folder?": "Neues Verzeichnis hinzufügen?",
|
||||
"Add new folder?": "Neuen Ordner hinzufügen?",
|
||||
"Address": "Adresse",
|
||||
"Addresses": "Adressen",
|
||||
"Advanced": "Erweitert",
|
||||
@@ -18,7 +18,7 @@
|
||||
"All Data": "Alle Daten",
|
||||
"Allow Anonymous Usage Reporting?": "Übertragung von anonymen Nutzungsberichten erlauben?",
|
||||
"Alphabetic": "Alphabetisch",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Verzeichnis entfernen.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Ordner entfernen.",
|
||||
"Anonymous Usage Reporting": "Anonymer Nutzungsbericht",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Alle Geräte, die beim Verteiler eingetragen sind, werden auch bei diesem Gerät eingetragen",
|
||||
"Automatic upgrades": "Automatische Updates aktivieren",
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "Heruntergeladen",
|
||||
"Downloading": "Lädt herunter",
|
||||
"Edit": "Bearbeiten",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Gerät bearbeiten",
|
||||
"Edit Folder": "Ordner bearbeiten",
|
||||
"Editing": "Bearbeitet",
|
||||
"Enable NAT traversal": "NAT-Durchdringung aktivieren",
|
||||
"Enable Relaying": "Weiterleitung aktivieren",
|
||||
@@ -65,15 +65,15 @@
|
||||
"File Pull Order": "Dateiübertragungsreihenfolge",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen zu verwenden.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Wenn Syncthing Dateien ersetzt oder löscht, werden sie in das .stversions Verzeichnis verschoben.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, datiert in das Verzeichnis .stversions verschoben.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Wenn Syncthing Dateien ersetzt oder löscht, werden sie in den Ordner .stversions verschoben.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, datiert in den Ordner .stversions verschoben.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind auf diesem Gerät schreibgeschützt. Auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
|
||||
"Folder": "Verzeichnis",
|
||||
"Folder ID": "Verzeichniskennung",
|
||||
"Folder Label": "Verzeichnisbezeichnung",
|
||||
"Folder Path": "Verzeichnispfad",
|
||||
"Folder Type": "Verzeichnistyp",
|
||||
"Folders": "Verzeichnisse",
|
||||
"Folder": "Ordner",
|
||||
"Folder ID": "Ordnerkennung",
|
||||
"Folder Label": "Ordnerbezeichnung",
|
||||
"Folder Path": "Ordnerpfad",
|
||||
"Folder Type": "Ordnertyp",
|
||||
"Folders": "Ordner",
|
||||
"GUI": "GUI",
|
||||
"GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche",
|
||||
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche",
|
||||
@@ -88,7 +88,7 @@
|
||||
"Ignore Patterns": "Ignoriermuster",
|
||||
"Ignore Permissions": "Berechtigungen ignorieren",
|
||||
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Eine falsche Konfiguration kann den Verzeichnisinhalt beschädigen und Syncthing in einen unausführbaren Zustand versetzen.",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Eine falsche Konfiguration kann den Ordnerinhalt beschädigen und Syncthing in einen unausführbaren Zustand versetzen.",
|
||||
"Introducer": "Verteilergerät",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (z.B. schließe nicht aus)",
|
||||
"Keep Versions": "Versionen erhalten",
|
||||
@@ -108,10 +108,10 @@
|
||||
"Metadata Only": "Nur Metadaten",
|
||||
"Minimum Free Disk Space": "Minimal freier Festplattenspeicher",
|
||||
"Move to top of queue": "An den Anfang der Warteschlange setzen",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Verschachteltes Maskenzeichen (wird für verschachtelte Verzeichnisse verwendet)",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Verschachteltes Maskenzeichen (wird für verschachtelte Ordner verwendet)",
|
||||
"Never": "Nie",
|
||||
"New Device": "Neues Gerät",
|
||||
"New Folder": "Neues Verzeichnis",
|
||||
"New Folder": "Neuer Ordner",
|
||||
"Newest First": "Neueste zuerst",
|
||||
"No": "Nein",
|
||||
"No File Versioning": "Keine Dateiversionierung",
|
||||
@@ -120,14 +120,14 @@
|
||||
"OK": "OK",
|
||||
"Off": "Aus",
|
||||
"Oldest First": "Älteste zuerst",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optionale beschreibende Bezeichnung des Verzeichnisses. Kann auf jedem Gerät unterschiedlich sein.",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optionale beschreibende Bezeichnung des Ordners. Kann auf jedem Gerät unterschiedlich sein.",
|
||||
"Options": "Optionen",
|
||||
"Out of Sync": "Nicht synchronisiert",
|
||||
"Out of Sync Items": "Nicht synchronisierte Objekte",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limit Datenrate (ausgehend) (KB/s)",
|
||||
"Override Changes": "Änderungen überschreiben",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Verzeichnis auf dem lokalen Gerät. Verzeichnis wird erzeugt, wenn es nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem alte Dateiversionen gespeichert werden sollen (ohne Angabe wird das Verzeichnis .stversions im Verzeichnis verwendet).",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Ordner auf dem lokalen Gerät. Ordner wird erzeugt, wenn er nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem alte Dateiversionen gespeichert werden sollen (ohne Angabe wird der Ordner .stversions im Ordner verwendet).",
|
||||
"Pause": "Pause",
|
||||
"Paused": "Pausiert",
|
||||
"Please consult the release notes before performing a major upgrade.": "Bitte lesen Sie die Veröffentlichungsnotizen bevor Sie eine neue Hauptversion installieren.",
|
||||
@@ -142,7 +142,7 @@
|
||||
"Release Notes": "Veröffentlichungsnotizen",
|
||||
"Remote Devices": "Fern-Geräte",
|
||||
"Remove": "Entfernen",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Erforderlicher Bezeichner für das Verzeichnis. Muss auf allen Verbund-Geräten gleich sein.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Erforderlicher Bezeichner für den Ordner. Muss auf allen Verbund-Geräten gleich sein.",
|
||||
"Rescan": "Neu scannen",
|
||||
"Rescan All": "Alle neu scannen",
|
||||
"Rescan Interval": "Scanintervall",
|
||||
@@ -154,16 +154,16 @@
|
||||
"Save": "Speichern",
|
||||
"Scan Time Remaining": "Zeit für Scan verbleibend",
|
||||
"Scanning": "Scannen",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du dieses Verzeichnis teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Verzeichnisse aus, die du mit diesem Gerät teilen möchtest",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du diesen Ordner teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Ordner aus, die Du mit diesem Gerät teilen möchtest",
|
||||
"Send & Receive": "Senden & empfangen",
|
||||
"Send Only": "Nur senden",
|
||||
"Settings": "Einstellungen",
|
||||
"Share": "Teilen",
|
||||
"Share Folder": "Teile Verzeichnis",
|
||||
"Share Folders With Device": "Teile Verzeichnisse mit diesem Gerät",
|
||||
"Share Folder": "Ordner teilen",
|
||||
"Share Folders With Device": "Ordner mit diesem Gerät teilen",
|
||||
"Share With Devices": "Teile mit diesen Geräten",
|
||||
"Share this folder?": "Dieses Verzeichnis teilen?",
|
||||
"Share this folder?": "Diesen Ordner teilen?",
|
||||
"Shared With": "Geteilt mit",
|
||||
"Show ID": "Kennung anzeigen",
|
||||
"Show QR": "Zeige QR Code",
|
||||
@@ -172,7 +172,7 @@
|
||||
"Shutdown": "Herunterfahren",
|
||||
"Shutdown Complete": "Vollständig Heruntergefahren",
|
||||
"Simple File Versioning": "Einfache Dateiversionierung",
|
||||
"Single level wildcard (matches within a directory only)": "Einzelnes Maskenzeichen (wird für ein einzelnes Verzeichnis verwendet)",
|
||||
"Single level wildcard (matches within a directory only)": "Einzelnes Maskenzeichen (wird für einen einzelnen Ordner verwendet)",
|
||||
"Smallest First": "Kleinstes zuerst",
|
||||
"Source Code": "Quellcode",
|
||||
"Staggered File Versioning": "Stufenweise Dateiversionierung",
|
||||
@@ -193,12 +193,12 @@
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber noch nicht aktiviert. Syncthing muss neugestartet werden, um die neue Konfiguration zu übernehmen.",
|
||||
"The device ID cannot be blank.": "Die Gerätekennung darf nicht leer sein.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Die hier einzutragende Gerätekennung kann im Dialog \"Aktionen > Kennung anzeigen\" auf dem anderen Gerät gefunden werden. Leerzeichen und Bindestriche sind optional (werden ignoriert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird verwendet, um Statistiken über verwendete Betriebssysteme, Verzeichnis-Größen und Programm-Versionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird verwendet, um Statistiken über verwendete Betriebssysteme, Ordnergrößen und Programmversionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Gerätekennung scheint nicht gültig zu sein. Es sollte eine 52 oder 56 stellige Zeichenkette aus Buchstaben und Nummern sein. Leerzeichen und Bindestriche sind optional.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Der erste Kommandozeilenparameter ist der Verzeichnis-Pfad und der zweite Parameter ist der relative Pfad in diesem Verzeichnis.",
|
||||
"The folder ID cannot be blank.": "Die Verzeichniskennung darf nicht leer sein.",
|
||||
"The folder ID must be unique.": "Die Verzeichniskennung muss eindeutig sein.",
|
||||
"The folder path cannot be blank.": "Der Verzeichnispfad darf nicht leer sein.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Der erste Kommandozeilenparameter ist der Ordnerpfad und der zweite Parameter ist der relative Pfad in diesem Ordner.",
|
||||
"The folder ID cannot be blank.": "Die Ordnerkennung darf nicht leer sein.",
|
||||
"The folder ID must be unique.": "Die Ordnerkennung muss eindeutig sein.",
|
||||
"The folder path cannot be blank.": "Der Ordnerpfad darf nicht leer sein.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es wird in folgenden Abständen versioniert: In der ersten Stunde wird alle 30 Sekunden eine Version behalten, am ersten Tag eine jede Stunde, in den ersten 30 Tagen eine jeden Tag. Danach wird bis zum angegebenen Höchstalter eine Version pro Woche behalten.",
|
||||
"The following items could not be synchronized.": "Die folgenden Objekte konnten nicht synchronisiert werden.",
|
||||
"The maximum age must be a number and cannot be blank.": "Das Höchstalter muss angegeben werden und eine Zahl sein.",
|
||||
@@ -230,16 +230,16 @@
|
||||
"Version": "Version",
|
||||
"Versions Path": "Versionierungspfad",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Alte Dateiversionen werden automatisch gelöscht, wenn sie älter als das angegebene Höchstalter sind oder die angegebene Höchstzahl an Dateien erreicht ist.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warnung, dieser Pfad ist ein Unterverzeichnis des existierenden Verzeichnisses \"{{otherFolder}}\".",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warnung, dieser Pfad ist ein Unterordner des existierenden Ordners \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Beachte beim Hinzufügen eines neuen Gerätes, dass dieses Gerät auch auf den anderen Geräten hinzugefügt werden muss.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachte bitte beim Hinzufügen eines neuen Verzeichnisses, dass die Verzeichniskennung dazu verwendet wird, Verzeichnisse zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Beachte bitte beim Hinzufügen eines neuen Ordners, dass die Ordnerkennung dazu verwendet wird, Ordner zwischen Geräten zu verbinden. Die Kennung muss also auf allen Geräten gleich sein, die Groß- und Kleinschreibung muss dabei beachtet werden.",
|
||||
"Yes": "Ja",
|
||||
"You must keep at least one version.": "Du musst mindestens eine Version behalten.",
|
||||
"days": "Tage",
|
||||
"directories": "Verzeichnisse",
|
||||
"directories": "Ordner",
|
||||
"files": "Dateien",
|
||||
"full documentation": "Komplette Dokumentation",
|
||||
"items": "Objekte",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} möchte das Verzeichnis \"{{folder}}\" teilen.",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} möchte das Verzeichnis \"{{folderlabel}}\" ({{folder}}) teilen."
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} möchte den Ordner \"{{folder}}\" teilen.",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} möchte den Ordner \"{{folderlabel}}\" ({{folder}}) teilen."
|
||||
}
|
||||
@@ -52,6 +52,8 @@
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Edit",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Editing": "Editing",
|
||||
"Enable NAT traversal": "Enable NAT traversal",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
@@ -95,6 +97,7 @@
|
||||
"Last Scan": "Last Scan",
|
||||
"Last seen": "Last seen",
|
||||
"Later": "Later",
|
||||
"Latest Change": "Latest Change",
|
||||
"Listeners": "Listeners",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
@@ -153,6 +156,8 @@
|
||||
"Scanning": "Scanning",
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Send \u0026 Receive": "Send \u0026 Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Settings": "Settings",
|
||||
"Share": "Share",
|
||||
"Share Folder": "Share Folder",
|
||||
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Senaste skanning",
|
||||
"Last seen": "Senast sedd",
|
||||
"Later": "Senare",
|
||||
"Latest Change": "Senast ändrad",
|
||||
"Latest Change": "Senaste ändring",
|
||||
"Listeners": "Lyssnare",
|
||||
"Local Discovery": "Lokal annonsering",
|
||||
"Local State": "Lokal status",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Command": "Komut",
|
||||
"Comment, when used at the start of a line": "Satır başında kullanıldığında açıklama özelliği taşır",
|
||||
"Compression": "Sıkıştırma",
|
||||
"Configured": "Configured",
|
||||
"Configured": "Yapılandırıldı",
|
||||
"Connection Error": "Bağlantı hatası",
|
||||
"Connection Type": "Bağlantı Türü",
|
||||
"Copied from elsewhere": "Başka bir yerden kopyalanmış",
|
||||
@@ -52,8 +52,8 @@
|
||||
"Downloaded": "İndirilmiş",
|
||||
"Downloading": "İndiriliyor",
|
||||
"Edit": "Düzenle",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Edit Device": "Aygıtı Düzenle",
|
||||
"Edit Folder": "Klasörü Düzenle",
|
||||
"Editing": "Düzenleniyor",
|
||||
"Enable NAT traversal": "Enable NAT traversal",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
@@ -97,7 +97,7 @@
|
||||
"Last Scan": "Son Tarama",
|
||||
"Last seen": "Son görülme",
|
||||
"Later": "Sonra",
|
||||
"Latest Change": "Latest Change",
|
||||
"Latest Change": "Son Değişim",
|
||||
"Listeners": "Dinleyiciler",
|
||||
"Local Discovery": "Yerel Discovery",
|
||||
"Local State": "Yerel Durum",
|
||||
@@ -142,7 +142,7 @@
|
||||
"Release Notes": "Sürüm Notları",
|
||||
"Remote Devices": "Uzak Aygıtlar",
|
||||
"Remove": "Kaldır",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Required identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Klasör için tanımlayıcı gereklidir. Tüm küme cihazlarda aynı olmalıdır.",
|
||||
"Rescan": "Tekrar Tara",
|
||||
"Rescan All": "Tümünü Tekrar Tara",
|
||||
"Rescan Interval": "Tarama Aralığı",
|
||||
@@ -156,8 +156,8 @@
|
||||
"Scanning": "Taranıyor",
|
||||
"Select the devices to share this folder with.": "Bu klasörü paylaşacağın aygıtları seç.",
|
||||
"Select the folders to share with this device.": "Bu aygıtla paylaşılacak klasörleri seç.",
|
||||
"Send & Receive": "Send & Receive",
|
||||
"Send Only": "Send Only",
|
||||
"Send & Receive": "Gönder & Al",
|
||||
"Send Only": "Yalnızca Gönder",
|
||||
"Settings": "Ayarlar",
|
||||
"Share": "Paylaş",
|
||||
"Share Folder": "Paylaşım Klasörü",
|
||||
@@ -189,7 +189,7 @@
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing uygulaması çökmüş olabilir, veya internet bağlantınızda bir sorun var. Tekrar deniyor....",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing isteminizi işleme alırken bir sorunla karşılaştı. Lütfen sayfanızı yenileyin veya sorun devam ediyorsa Syncthing'i yeniden başlatın.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "Syncthing yönetici arayüzü parolasız olarak uzaktan erişime izin verilecek şekilde yapılandırıldı.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "The aggregated statistics are publicly available at the URL below.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Toplanan istatistikler halka açık biçimde aşağıdaki adrestedir.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Yapılandırma kaydedildi ancak etkinleştirilmedi. Etkinleştirmek için Syncthing yeniden başlatılmalı.",
|
||||
"The device ID cannot be blank.": "Aygıt ID boş olamaz.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Buraya girilecek olan aygıt ID'si diğer aygıtlarda, \"Eylemler > ID Göster\" penceresinde bulunabilir. Boşluklar ve çizgiler isteğe bağlıdır (yoksayılmış).",
|
||||
|
||||
@@ -271,6 +271,7 @@
|
||||
<span class="fa fa-fw" ng-class="[folder.type == 'readonly' ? 'fa-lock' : 'fa-folder']"></span>
|
||||
</div>
|
||||
<div class="panel-status pull-right text-{{folderClass(folder)}}" ng-switch="folderStatus(folder)">
|
||||
<span ng-switch-when="paused"><span class="hidden-xs" translate>Paused</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="unknown"><span class="hidden-xs" translate>Unknown</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="unshared"><span class="hidden-xs" translate>Unshared</span><span class="visible-xs">◼</span></span>
|
||||
<span ng-switch-when="stopped"><span class="hidden-xs" translate>Stopped</span><span class="visible-xs">◼</span></span>
|
||||
@@ -307,11 +308,11 @@
|
||||
<span tooltip data-original-title="{{folder.path}}">{{folder.path}}</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-if="model[folder.id].invalid || model[folder.id].error">
|
||||
<tr ng-if="!folder.paused && (model[folder.id].invalid || model[folder.id].error)">
|
||||
<th><span class="fa fa-fw fa-exclamation-triangle"></span> <span translate>Error</span></th>
|
||||
<td class="text-right">{{model[folder.id].invalid || model[folder.id].error}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr ng-if="!folder.paused">
|
||||
<th><span class="fa fa-fw fa-globe"></span> <span translate>Global State</span></th>
|
||||
<td class="text-right">
|
||||
<span tooltip data-original-title="{{model[folder.id].globalFiles | alwaysNumber}} {{'files' | translate}}, {{model[folder.id].globalDirectories | alwaysNumber}} {{'directories' | translate}}, ~{{model[folder.id].globalBytes | binary}}B">
|
||||
@@ -321,7 +322,7 @@
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr ng-if="!folder.paused">
|
||||
<th><span class="fa fa-fw fa-home"></span> <span translate>Local State</span></th>
|
||||
<td class="text-right">
|
||||
<span tooltip data-original-title="{{model[folder.id].localFiles | alwaysNumber}} {{'files' | translate}}, {{model[folder.id].localDirectories | alwaysNumber}} {{'directories' | translate}}, ~{{model[folder.id].localBytes | binary}}B">
|
||||
@@ -354,7 +355,7 @@
|
||||
<tr ng-if="folder.type != 'readwrite'">
|
||||
<th><span class="fa fa-fw fa-lock"></span> <span translate>Folder Type</span></th>
|
||||
<td class="text-right">
|
||||
<span ng-if="folder.type == 'readonly'" translate>Master</span>
|
||||
<span ng-if="folder.type == 'readonly'" translate>Send Only</span>
|
||||
<span ng-if="folder.type != 'readonly'">{{ folder.type.charAt(0).toUpperCase() + folder.type.slice(1) }}</span>
|
||||
</td>
|
||||
</tr>
|
||||
@@ -417,6 +418,12 @@
|
||||
<span class="fa fa-arrow-circle-up"></span> <span translate>Override Changes</span>
|
||||
</button>
|
||||
<span class="pull-right">
|
||||
<button ng-if="!folder.paused" type="button" class="btn btn-sm btn-default" ng-click="setFolderPause(folder.id, true)">
|
||||
<span class="fa fa-pause"></span> <span translate>Pause</span>
|
||||
</button>
|
||||
<button ng-if="folder.paused" type="button" class="btn btn-sm btn-default" ng-click="setFolderPause(folder.id, false)">
|
||||
<span class="fa fa-play"></span> <span translate>Resume</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="rescanFolder(folder.id)" ng-show="['idle', 'stopped', 'unshared'].indexOf(folderStatus(folder)) > -1">
|
||||
<span class="fa fa-refresh"></span> <span translate>Rescan</span>
|
||||
</button>
|
||||
@@ -579,6 +586,10 @@
|
||||
<th><span class="fa fa-fw fa-thumbs-o-up"></span> <span translate>Introducer</span></th>
|
||||
<td translate class="text-right">Yes</td>
|
||||
</tr>
|
||||
<tr ng-if="deviceCfg.introducedBy">
|
||||
<th><span class="fa fa-fw fa-meh-o"></span> <span translate>Introduced By</span></th>
|
||||
<td translate class="text-right">{{ deviceName(findDevice(deviceCfg.introducedBy)) || deviceCfg.introducedBy.substring(0, 5) }}</td>
|
||||
</tr>
|
||||
<tr ng-if="connections[deviceCfg.deviceID].clientVersion">
|
||||
<th><span class="fa fa-fw fa-tag"></span> <span translate>Version</span></th>
|
||||
<td class="text-right">{{connections[deviceCfg.deviceID].clientVersion}}</td>
|
||||
@@ -597,10 +608,10 @@
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<span class="pull-right">
|
||||
<button ng-if="!connections[deviceCfg.deviceID].paused" type="button" class="btn btn-sm btn-default" ng-click="pauseDevice(deviceCfg.deviceID)">
|
||||
<button ng-if="!deviceCfg.paused" type="button" class="btn btn-sm btn-default" ng-click="setDevicePause(deviceCfg.deviceID, true)">
|
||||
<span class="fa fa-pause"></span> <span translate>Pause</span>
|
||||
</button>
|
||||
<button ng-if="connections[deviceCfg.deviceID].paused" type="button" class="btn btn-sm btn-default" ng-click="resumeDevice(deviceCfg.deviceID)">
|
||||
<button ng-if="deviceCfg.paused" type="button" class="btn btn-sm btn-default" ng-click="setDevicePause(deviceCfg.deviceID, false)">
|
||||
<span class="fa fa-play"></span> <span translate>Resume</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="editDevice(deviceCfg)">
|
||||
@@ -613,9 +624,14 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<button type="button" class="btn btn-sm btn-default pull-right" ng-click="addDevice()">
|
||||
<span class="fa fa-plus"></span> <span translate>Add Remote Device</span>
|
||||
</button>
|
||||
<span class="pull-right">
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="globalChanges()">
|
||||
<span class="fa fa-fw fa-history"></span> <span translate>Global Changes</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="addDevice()">
|
||||
<span class="fa fa-plus"></span> <span translate>Add Remote Device</span>
|
||||
</button>
|
||||
</span>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -647,6 +663,7 @@
|
||||
<ng-include src="'syncthing/core/shutdownDialogView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/idqrModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/editDeviceModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/device/globalChangesModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/folder/editFolderModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/folder/editIgnoresModalView.html'"></ng-include>
|
||||
<ng-include src="'syncthing/settings/settingsModalView.html'"></ng-include>
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
<p translate>Copyright © 2014-2016 the following Contributors:</p>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Laurent Etiemble, Leo Arias, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Roman Zaynetdinov, Scott Klupfel, Simon Frei, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Heiko Zuerker, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin White, Jr., Laurent Etiemble, Leo Arias, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Roman Zaynetdinov, Scott Klupfel, Simon Frei, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Xavier O., Yannic A.
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
@@ -77,6 +77,8 @@ angular.module('syncthing.core')
|
||||
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
|
||||
FOLDER_ERRORS: 'FolderErrors', // Emitted when a folder has errors preventing a full sync
|
||||
FOLDER_SCAN_PROGRESS: 'FolderScanProgress', // Emitted every ScanProgressIntervalS seconds, indicating how far into the scan it is at.
|
||||
FOLDER_PAUSED: 'FolderPaused', // Emitted when a folder is paused
|
||||
FOLDER_RESUMED: 'FolderResumed', // Emitted when a folder is resumed
|
||||
|
||||
start: function() {
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
|
||||
@@ -51,6 +51,7 @@ angular.module('syncthing.core')
|
||||
$scope.failedPageSize = 10;
|
||||
$scope.scanProgress = {};
|
||||
$scope.themes = [];
|
||||
$scope.globalChangeEvents = {};
|
||||
|
||||
$scope.localStateTotal = {
|
||||
bytes: 0,
|
||||
@@ -186,6 +187,7 @@ angular.module('syncthing.core')
|
||||
|
||||
$scope.$on(Events.LOCAL_INDEX_UPDATED, function (event, arg) {
|
||||
refreshFolderStats();
|
||||
refreshGlobalChanges();
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_DISCONNECTED, function (event, arg) {
|
||||
@@ -233,14 +235,6 @@ angular.module('syncthing.core')
|
||||
$scope.deviceRejections[arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_PAUSED, function (event, arg) {
|
||||
$scope.connections[arg.data.device].paused = true;
|
||||
});
|
||||
|
||||
$scope.$on(Events.DEVICE_RESUMED, function (event, arg) {
|
||||
$scope.connections[arg.data.device].paused = false;
|
||||
});
|
||||
|
||||
$scope.$on(Events.FOLDER_REJECTED, function (event, arg) {
|
||||
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
|
||||
});
|
||||
@@ -629,6 +623,15 @@ angular.module('syncthing.core')
|
||||
}).error($scope.emitHTTPError);
|
||||
}, 2500);
|
||||
|
||||
var refreshGlobalChanges = debounce(function () {
|
||||
$http.get(urlbase + "/events/disk?limit=15").success(function (data) {
|
||||
data = data.reverse();
|
||||
$scope.globalChangeEvents = data;
|
||||
|
||||
console.log("refreshGlobalChanges", data);
|
||||
}).error($scope.emitHTTPError);
|
||||
}, 2500);
|
||||
|
||||
$scope.refresh = function () {
|
||||
refreshSystem();
|
||||
refreshDiscoveryCache();
|
||||
@@ -641,6 +644,10 @@ angular.module('syncthing.core')
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
if (folderCfg.paused) {
|
||||
return 'paused';
|
||||
}
|
||||
|
||||
// after restart syncthing process state may be empty
|
||||
if (!$scope.model[folderCfg.id].state) {
|
||||
return 'unknown';
|
||||
@@ -674,6 +681,9 @@ angular.module('syncthing.core')
|
||||
if (status === 'idle') {
|
||||
return 'success';
|
||||
}
|
||||
if (status == 'paused') {
|
||||
return 'default';
|
||||
}
|
||||
if (status === 'syncing' || status === 'scanning') {
|
||||
return 'primary';
|
||||
}
|
||||
@@ -790,7 +800,7 @@ angular.module('syncthing.core')
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
if ($scope.connections[deviceCfg.deviceID].paused) {
|
||||
if (deviceCfg.paused) {
|
||||
return 'paused';
|
||||
}
|
||||
|
||||
@@ -816,7 +826,7 @@ angular.module('syncthing.core')
|
||||
return 'info';
|
||||
}
|
||||
|
||||
if ($scope.connections[deviceCfg.deviceID].paused) {
|
||||
if (deviceCfg.paused) {
|
||||
return 'default';
|
||||
}
|
||||
|
||||
@@ -912,6 +922,16 @@ angular.module('syncthing.core')
|
||||
return '';
|
||||
};
|
||||
|
||||
$scope.friendlyNameFromShort = function (shortID) {
|
||||
var matches = $scope.devices.filter(function (n) {
|
||||
return n.deviceID.substr(0, 7) === shortID;
|
||||
});
|
||||
if (matches.length !== 1) {
|
||||
return shortID;
|
||||
}
|
||||
return matches[0].name;
|
||||
};
|
||||
|
||||
$scope.findDevice = function (deviceID) {
|
||||
var matches = $scope.devices.filter(function (n) {
|
||||
return n.deviceID === deviceID;
|
||||
@@ -943,12 +963,23 @@ angular.module('syncthing.core')
|
||||
return device.deviceID.substr(0, 6);
|
||||
};
|
||||
|
||||
$scope.pauseDevice = function (device) {
|
||||
$http.post(urlbase + "/system/pause?device=" + device);
|
||||
$scope.setDevicePause = function (device, pause) {
|
||||
$scope.devices.forEach(function (cfg) {
|
||||
if (cfg.deviceID == device) {
|
||||
cfg.paused = pause;
|
||||
}
|
||||
});
|
||||
$scope.config.devices = $scope.devices;
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.resumeDevice = function (device) {
|
||||
$http.post(urlbase + "/system/resume?device=" + device);
|
||||
$scope.setFolderPause = function (folder, pause) {
|
||||
var cfg = $scope.folders[folder];
|
||||
if (cfg) {
|
||||
cfg.paused = pause;
|
||||
$scope.config.folders = folderList($scope.folders);
|
||||
$scope.saveConfig();
|
||||
}
|
||||
};
|
||||
|
||||
$scope.editSettings = function () {
|
||||
@@ -1268,7 +1299,11 @@ angular.module('syncthing.core')
|
||||
$scope.folderEditor = form;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.globalChanges = function () {
|
||||
$('#globalChanges').modal();
|
||||
};
|
||||
|
||||
$scope.editFolder = function (folderCfg) {
|
||||
$scope.currentFolder = angular.copy(folderCfg);
|
||||
|
||||
27
gui/default/syncthing/device/globalChangesModalView.html
Normal file
27
gui/default/syncthing/device/globalChangesModalView.html
Normal file
@@ -0,0 +1,27 @@
|
||||
<style> th, td { padding: 6px; } </style>
|
||||
<modal id="globalChanges" status="default" icon="{{'history'}}" heading="{{'Global Changes' | translate}}" large="yes" closeable="yes">
|
||||
<div class="modal-body">
|
||||
<table>
|
||||
<tr>
|
||||
<th translate>Device</th>
|
||||
<th translate>Action</th>
|
||||
<th translate>Type</th>
|
||||
<th translate>Path</th>
|
||||
<th translate>Time</th>
|
||||
</tr>
|
||||
<tr ng-repeat="changeEvent in globalChangeEvents">
|
||||
<td ng-if="changeEvent.data.modifiedBy">{{friendlyNameFromShort(changeEvent.data.modifiedBy)}}</td>
|
||||
<td ng-if="!changeEvent.data.modifiedBy"><span translate>Unknown</span></td>
|
||||
<td>{{changeEvent.data.action}}</td>
|
||||
<td>{{changeEvent.data.type}}</td>
|
||||
<td>{{changeEvent.data.path}}</td>
|
||||
<td>{{changeEvent.time | date:'medium'}}</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal">
|
||||
<span class="fa fa-times"></span> <span translate>Close</span>
|
||||
</button>
|
||||
</div>
|
||||
</model>
|
||||
@@ -84,10 +84,10 @@
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<label translate>Folder Type</label>
|
||||
<a href="https://docs.syncthing.net/users/foldermaster.html" target="_blank"><span class="fa fa-book"></span> <span translate>Help</span></a>
|
||||
<a href="https://docs.syncthing.net/users/foldertypes.html" target="_blank"><span class="fa fa-book"></span> <span translate>Help</span></a>
|
||||
<select class="form-control" ng-model="currentFolder.type">
|
||||
<option value="readwrite" translate>Normal</option>
|
||||
<option value="readonly" translate>Master</option>
|
||||
<option value="readwrite" translate>Send & Receive</option>
|
||||
<option value="readonly" translate>Send Only</option>
|
||||
</select>
|
||||
<p ng-if="currentFolder.type == 'readonly'" translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
|
||||
@@ -55,3 +55,15 @@ go run build.go -goarch armhf snap
|
||||
go run build.go -goarch arm64 snap
|
||||
|
||||
mv *.snap "$WORKSPACE"
|
||||
|
||||
if [[ -d /usr/local/oldgo ]]; then
|
||||
echo
|
||||
echo Building with minimum supported Go version
|
||||
export GOROOT=/usr/local/oldgo
|
||||
export PATH="$GOROOT/bin:$PATH"
|
||||
go version
|
||||
echo
|
||||
|
||||
rm -rf "$GOPATH/pkg"
|
||||
go run build.go install all # only compile, don't run lints and stuff
|
||||
fi
|
||||
|
||||
@@ -407,9 +407,9 @@ func convertV13V14(cfg *Configuration) {
|
||||
|
||||
for i, fcfg := range cfg.Folders {
|
||||
if fcfg.DeprecatedReadOnly {
|
||||
cfg.Folders[i].Type = FolderTypeReadOnly
|
||||
cfg.Folders[i].Type = FolderTypeSendOnly
|
||||
} else {
|
||||
cfg.Folders[i].Type = FolderTypeReadWrite
|
||||
cfg.Folders[i].Type = FolderTypeSendReceive
|
||||
}
|
||||
cfg.Folders[i].DeprecatedReadOnly = false
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestDeviceConfig(t *testing.T) {
|
||||
ID: "test",
|
||||
RawPath: "testdata",
|
||||
Devices: []FolderDeviceConfiguration{{DeviceID: device1}, {DeviceID: device4}},
|
||||
Type: FolderTypeReadOnly,
|
||||
Type: FolderTypeSendOnly,
|
||||
RescanIntervalS: 600,
|
||||
Copiers: 0,
|
||||
Pullers: 0,
|
||||
|
||||
@@ -17,6 +17,7 @@ type DeviceConfiguration struct {
|
||||
Introducer bool `xml:"introducer,attr" json:"introducer"`
|
||||
SkipIntroductionRemovals bool `xml:"skipIntroductionRemovals,attr" json:"skipIntroductionRemovals"`
|
||||
IntroducedBy protocol.DeviceID `xml:"introducedBy,attr" json:"introducedBy"`
|
||||
Paused bool `xml:"paused" json:"paused"`
|
||||
}
|
||||
|
||||
func NewDeviceConfiguration(id protocol.DeviceID, name string) DeviceConfiguration {
|
||||
|
||||
@@ -40,6 +40,8 @@ type FolderConfiguration struct {
|
||||
DisableSparseFiles bool `xml:"disableSparseFiles" json:"disableSparseFiles"`
|
||||
DisableTempIndexes bool `xml:"disableTempIndexes" json:"disableTempIndexes"`
|
||||
Fsync bool `xml:"fsync" json:"fsync"`
|
||||
DisableWeakHash bool `xml:"disableWeakHash" json:"disableWeakHash"`
|
||||
Paused bool `xml:"paused" json:"paused"`
|
||||
|
||||
cachedPath string
|
||||
|
||||
@@ -98,13 +100,13 @@ func (f *FolderConfiguration) CreateMarker() error {
|
||||
|
||||
func (f *FolderConfiguration) HasMarker() bool {
|
||||
_, err := os.Stat(filepath.Join(f.Path(), ".stfolder"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (f FolderConfiguration) Description() string {
|
||||
if f.Label == "" {
|
||||
return f.ID
|
||||
}
|
||||
return fmt.Sprintf("%q (%s)", f.Label, f.ID)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,15 +9,15 @@ package config
|
||||
type FolderType int
|
||||
|
||||
const (
|
||||
FolderTypeReadWrite FolderType = iota // default is readwrite
|
||||
FolderTypeReadOnly
|
||||
FolderTypeSendReceive FolderType = iota // default is sendreceive
|
||||
FolderTypeSendOnly
|
||||
)
|
||||
|
||||
func (t FolderType) String() string {
|
||||
switch t {
|
||||
case FolderTypeReadWrite:
|
||||
case FolderTypeSendReceive:
|
||||
return "readwrite"
|
||||
case FolderTypeReadOnly:
|
||||
case FolderTypeSendOnly:
|
||||
return "readonly"
|
||||
default:
|
||||
return "unknown"
|
||||
@@ -30,12 +30,12 @@ func (t FolderType) MarshalText() ([]byte, error) {
|
||||
|
||||
func (t *FolderType) UnmarshalText(bs []byte) error {
|
||||
switch string(bs) {
|
||||
case "readwrite":
|
||||
*t = FolderTypeReadWrite
|
||||
case "readonly":
|
||||
*t = FolderTypeReadOnly
|
||||
case "readwrite", "sendreceive":
|
||||
*t = FolderTypeSendReceive
|
||||
case "readonly", "sendonly":
|
||||
*t = FolderTypeSendOnly
|
||||
default:
|
||||
*t = FolderTypeReadWrite
|
||||
*t = FolderTypeSendReceive
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ type OptionsConfiguration struct {
|
||||
OverwriteRemoteDevNames bool `xml:"overwriteRemoteDeviceNamesOnConnect" json:"overwriteRemoteDeviceNamesOnConnect" default:"false"`
|
||||
TempIndexMinBlocks int `xml:"tempIndexMinBlocks" json:"tempIndexMinBlocks" default:"10"`
|
||||
UnackedNotificationIDs []string `xml:"unackedNotificationID" json:"unackedNotificationIDs"`
|
||||
TrafficClass int `xml:"trafficClass" json:"trafficClass"`
|
||||
|
||||
DeprecatedUPnPEnabled bool `xml:"upnpEnabled,omitempty" json:"-"`
|
||||
DeprecatedUPnPLeaseM int `xml:"upnpLeaseMinutes,omitempty" json:"-"`
|
||||
|
||||
@@ -323,6 +323,18 @@ func (w *Wrapper) Device(id protocol.DeviceID) (DeviceConfiguration, bool) {
|
||||
return DeviceConfiguration{}, false
|
||||
}
|
||||
|
||||
// Folder returns the configuration for the given folder and an "ok" bool.
|
||||
func (w *Wrapper) Folder(id string) (FolderConfiguration, bool) {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
for _, folder := range w.cfg.Folders {
|
||||
if folder.ID == id {
|
||||
return folder, true
|
||||
}
|
||||
}
|
||||
return FolderConfiguration{}, false
|
||||
}
|
||||
|
||||
// Save writes the configuration to disk, and generates a ConfigSaved event.
|
||||
func (w *Wrapper) Save() error {
|
||||
fd, err := osutil.CreateAtomic(w.path)
|
||||
|
||||
26
lib/connections/connections_test.go
Normal file
26
lib/connections/connections_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
|
||||
import "testing"
|
||||
import "net/url"
|
||||
|
||||
func TestFixupPort(t *testing.T) {
|
||||
cases := [][2]string{
|
||||
{"tcp://1.2.3.4:5", "tcp://1.2.3.4:5"},
|
||||
{"tcp://1.2.3.4:", "tcp://1.2.3.4:22000"},
|
||||
{"tcp://1.2.3.4", "tcp://1.2.3.4:22000"},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
u0, _ := url.Parse(tc[0])
|
||||
u1 := fixupPort(u0).String()
|
||||
if u1 != tc[1] {
|
||||
t.Errorf("fixupPort(%q) => %q, expected %q", tc[0], u1, tc[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// The existence of this file means we get 0% test coverage rather than no
|
||||
// test coverage at all. Remove when implementing an actual test.
|
||||
|
||||
package connections
|
||||
@@ -45,6 +45,11 @@ func (d *relayDialer) Dial(id protocol.DeviceID, uri *url.URL) (internalConn, er
|
||||
return internalConn{}, err
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, d.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
var tc *tls.Conn
|
||||
if inv.ServerSocket {
|
||||
tc = tls.Server(conn, d.tlsCfg)
|
||||
|
||||
@@ -29,6 +29,7 @@ type relayListener struct {
|
||||
onAddressesChangedNotifier
|
||||
|
||||
uri *url.URL
|
||||
cfg *config.Wrapper
|
||||
tlsCfg *tls.Config
|
||||
conns chan internalConn
|
||||
factory listenerFactory
|
||||
@@ -79,6 +80,11 @@ func (t *relayListener) Serve() {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, t.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
var tc *tls.Conn
|
||||
if inv.ServerSocket {
|
||||
tc = tls.Server(conn, t.tlsCfg)
|
||||
@@ -170,6 +176,7 @@ type relayListenerFactory struct{}
|
||||
func (f *relayListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
return &relayListener{
|
||||
uri: uri,
|
||||
cfg: cfg,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: conns,
|
||||
factory: f,
|
||||
|
||||
@@ -41,6 +41,32 @@ const (
|
||||
tlsHandshakeTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// From go/src/crypto/tls/cipher_suites.go
|
||||
var tlsCipherSuiteNames = map[uint16]string{
|
||||
0x0005: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
0x000a: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
0x002f: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
0x0035: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
0x003c: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
0x009c: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
0x009d: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
0xc007: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
0xc009: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
0xc00a: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
0xc011: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
0xc012: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
0xc013: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
0xc014: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
0xc023: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
0xc027: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
0xc02f: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
0xc02b: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
0xc030: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
0xc02c: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
0xcca8: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
0xcca9: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// Service listens and dials all configured unconnected devices, via supported
|
||||
// dialers. Successful connections are handed to the model.
|
||||
type Service struct {
|
||||
@@ -272,8 +298,7 @@ next:
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
|
||||
modelConn := completeConn{c, protoConn}
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
l.Debugf("cipher suite: %04X in lan: %t", c.ConnectionState().CipherSuite, !limit)
|
||||
l.Infof("Established secure connection to %s at %s (%s)", remoteID, name, tlsCipherSuiteNames[c.ConnectionState().CipherSuite])
|
||||
|
||||
s.model.AddConnection(modelConn, hello)
|
||||
s.curConMut.Lock()
|
||||
@@ -318,8 +343,7 @@ func (s *Service) connect() {
|
||||
continue
|
||||
}
|
||||
|
||||
paused := s.model.IsPaused(deviceID)
|
||||
if paused {
|
||||
if deviceCfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -116,7 +116,6 @@ type Model interface {
|
||||
protocol.Model
|
||||
AddConnection(conn Connection, hello protocol.HelloResult)
|
||||
ConnectedTo(remoteID protocol.DeviceID) bool
|
||||
IsPaused(remoteID protocol.DeviceID) bool
|
||||
OnHello(protocol.DeviceID, net.Addr, protocol.HelloResult) error
|
||||
GetHello(protocol.DeviceID) protocol.HelloIntf
|
||||
}
|
||||
|
||||
@@ -39,6 +39,16 @@ func (d *tcpDialer) Dial(id protocol.DeviceID, uri *url.URL) (internalConn, erro
|
||||
return internalConn{}, err
|
||||
}
|
||||
|
||||
err = dialer.SetTCPOptions(conn)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, d.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
tc := tls.Client(conn, d.tlsCfg)
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
|
||||
@@ -30,6 +30,7 @@ type tcpListener struct {
|
||||
onAddressesChangedNotifier
|
||||
|
||||
uri *url.URL
|
||||
cfg *config.Wrapper
|
||||
tlsCfg *tls.Config
|
||||
stop chan struct{}
|
||||
conns chan internalConn
|
||||
@@ -107,6 +108,11 @@ func (t *tcpListener) Serve() {
|
||||
l.Infoln(err)
|
||||
}
|
||||
|
||||
err = dialer.SetTrafficClass(conn, t.cfg.Options().TrafficClass)
|
||||
if err != nil {
|
||||
l.Debugf("failed to set traffic class: %s", err)
|
||||
}
|
||||
|
||||
tc := tls.Server(conn, t.tlsCfg)
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
@@ -176,6 +182,7 @@ type tcpListenerFactory struct{}
|
||||
func (f *tcpListenerFactory) New(uri *url.URL, cfg *config.Wrapper, tlsCfg *tls.Config, conns chan internalConn, natService *nat.Service) genericListener {
|
||||
return &tcpListener{
|
||||
uri: fixupPort(uri),
|
||||
cfg: cfg,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: conns,
|
||||
natService: natService,
|
||||
@@ -192,7 +199,7 @@ func fixupPort(uri *url.URL) *url.URL {
|
||||
copyURI := *uri
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
|
||||
if err != nil && strings.Contains(err.Error(), "missing port") {
|
||||
// addr is on the form "1.2.3.4"
|
||||
copyURI.Host = net.JoinHostPort(uri.Host, "22000")
|
||||
} else if err == nil && port == "" {
|
||||
|
||||
@@ -209,7 +209,7 @@ func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
|
||||
}
|
||||
o[0] = KeyTypeBlock
|
||||
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
|
||||
copy(o[keyPrefixLen+keyFolderLen:], []byte(hash))
|
||||
copy(o[keyPrefixLen+keyFolderLen:], hash)
|
||||
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
|
||||
return o
|
||||
}
|
||||
|
||||
@@ -58,10 +58,7 @@ func setup() (*Instance, *BlockFinder) {
|
||||
func dbEmpty(db *Instance) bool {
|
||||
iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
|
||||
defer iter.Release()
|
||||
if iter.Next() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return !iter.Next()
|
||||
}
|
||||
|
||||
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||
|
||||
@@ -4,7 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore // this is a really tedious test for an old issue
|
||||
// this is a really tedious test for an old issue
|
||||
// +build ignore
|
||||
|
||||
package db_test
|
||||
|
||||
|
||||
@@ -635,7 +635,7 @@ func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte
|
||||
k[0] = KeyTypeDevice
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
|
||||
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], []byte(file))
|
||||
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], file)
|
||||
return k[:reqLen]
|
||||
}
|
||||
|
||||
@@ -670,7 +670,7 @@ func (db *Instance) globalKey(folder, file []byte) []byte {
|
||||
k := make([]byte, keyPrefixLen+keyFolderLen+len(file))
|
||||
k[0] = KeyTypeGlobal
|
||||
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
|
||||
copy(k[keyPrefixLen+keyFolderLen:], []byte(file))
|
||||
copy(k[keyPrefixLen+keyFolderLen:], file)
|
||||
return k
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ type FileInfoTruncated struct {
|
||||
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modified_s,omitempty"`
|
||||
ModifiedNs int32 `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3" json:"modified_ns,omitempty"`
|
||||
ModifiedBy protocol.ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=protocol.ShortID" json:"modified_by"`
|
||||
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
|
||||
Invalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid,omitempty"`
|
||||
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"no_permissions,omitempty"`
|
||||
@@ -226,6 +227,11 @@ func (m *FileInfoTruncated) MarshalTo(data []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
data[i] = 0x60
|
||||
i++
|
||||
i = encodeVarintStructs(data, i, uint64(m.ModifiedBy))
|
||||
}
|
||||
if len(m.SymlinkTarget) > 0 {
|
||||
data[i] = 0x8a
|
||||
i++
|
||||
@@ -324,6 +330,9 @@ func (m *FileInfoTruncated) ProtoSize() (n int) {
|
||||
if m.ModifiedNs != 0 {
|
||||
n += 1 + sovStructs(uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
n += 1 + sovStructs(uint64(m.ModifiedBy))
|
||||
}
|
||||
l = len(m.SymlinkTarget)
|
||||
if l > 0 {
|
||||
n += 2 + l + sovStructs(uint64(l))
|
||||
@@ -798,6 +807,25 @@ func (m *FileInfoTruncated) Unmarshal(data []byte) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 12:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
|
||||
}
|
||||
m.ModifiedBy = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowStructs
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
m.ModifiedBy |= (protocol.ShortID(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 17:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SymlinkTarget", wireType)
|
||||
|
||||
@@ -28,6 +28,7 @@ message FileInfoTruncated {
|
||||
uint32 permissions = 4;
|
||||
int64 modified_s = 5;
|
||||
int32 modified_ns = 11;
|
||||
uint64 modified_by = 12 [(gogoproto.customtype) = "protocol.ShortID", (gogoproto.nullable) = false];
|
||||
bool deleted = 6;
|
||||
bool invalid = 7;
|
||||
bool no_permissions = 8;
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// Dial tries dialing via proxy if a proxy is configured, and falls back to
|
||||
@@ -75,3 +78,22 @@ func SetTCPOptions(conn net.Conn) error {
|
||||
return fmt.Errorf("unknown connection type %T", conn)
|
||||
}
|
||||
}
|
||||
|
||||
func SetTrafficClass(conn net.Conn, class int) error {
|
||||
switch conn := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
e1 := ipv4.NewConn(conn).SetTOS(class)
|
||||
e2 := ipv6.NewConn(conn).SetTrafficClass(class)
|
||||
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
return e2
|
||||
|
||||
case dialerConn:
|
||||
return SetTrafficClass(conn.Conn, class)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown connection type %T", conn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func NewLocal(id protocol.DeviceID, addr string, addrList AddressLister) (Finder
|
||||
Supervisor: suture.NewSimple("local"),
|
||||
myID: id,
|
||||
addrList: addrList,
|
||||
localBcastTick: time.Tick(BroadcastInterval),
|
||||
localBcastTick: time.NewTicker(BroadcastInterval).C,
|
||||
forcedBcastTick: make(chan time.Time),
|
||||
localBcastStart: time.Now(),
|
||||
cache: newCache(),
|
||||
|
||||
@@ -29,6 +29,7 @@ const (
|
||||
DevicePaused
|
||||
DeviceResumed
|
||||
LocalChangeDetected
|
||||
RemoteChangeDetected
|
||||
LocalIndexUpdated
|
||||
RemoteIndexUpdated
|
||||
ItemStarted
|
||||
@@ -42,6 +43,8 @@ const (
|
||||
FolderCompletion
|
||||
FolderErrors
|
||||
FolderScanProgress
|
||||
FolderPaused
|
||||
FolderResumed
|
||||
ListenAddressesChanged
|
||||
LoginAttempt
|
||||
|
||||
@@ -68,6 +71,8 @@ func (t EventType) String() string {
|
||||
return "DeviceRejected"
|
||||
case LocalChangeDetected:
|
||||
return "LocalChangeDetected"
|
||||
case RemoteChangeDetected:
|
||||
return "RemoteChangeDetected"
|
||||
case LocalIndexUpdated:
|
||||
return "LocalIndexUpdated"
|
||||
case RemoteIndexUpdated:
|
||||
@@ -98,6 +103,10 @@ func (t EventType) String() string {
|
||||
return "DeviceResumed"
|
||||
case FolderScanProgress:
|
||||
return "FolderScanProgress"
|
||||
case FolderPaused:
|
||||
return "FolderPaused"
|
||||
case FolderResumed:
|
||||
return "FolderResumed"
|
||||
case ListenAddressesChanged:
|
||||
return "ListenAddressesChanged"
|
||||
case LoginAttempt:
|
||||
|
||||
@@ -247,22 +247,23 @@ func BenchmarkBufferedSub(b *testing.B) {
|
||||
}
|
||||
|
||||
// Receive the events
|
||||
done := make(chan struct{})
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
defer close(done)
|
||||
recv := 0
|
||||
var evs []Event
|
||||
for i := 0; i < b.N; {
|
||||
evs = bs.Since(recv, evs[:0])
|
||||
for _, ev := range evs {
|
||||
if ev.GlobalID != recv+1 {
|
||||
b.Fatal("skipped event", ev.GlobalID, recv)
|
||||
done <- fmt.Errorf("skipped event %v %v", ev.GlobalID, recv)
|
||||
return
|
||||
}
|
||||
recv = ev.GlobalID
|
||||
coord <- struct{}{}
|
||||
}
|
||||
i += len(evs)
|
||||
}
|
||||
done <- nil
|
||||
}()
|
||||
|
||||
// Send the events
|
||||
@@ -276,7 +277,9 @@ func BenchmarkBufferedSub(b *testing.B) {
|
||||
<-coord
|
||||
}
|
||||
|
||||
<-done
|
||||
if err := <-done; err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ func TestCache(t *testing.T) {
|
||||
c := newCache(nil)
|
||||
|
||||
res, ok := c.get("nonexistent")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != false {
|
||||
if res.IsIgnored() || res.IsDeletable() || ok {
|
||||
t.Errorf("res %v, ok %v for nonexistent item", res, ok)
|
||||
}
|
||||
|
||||
@@ -25,12 +25,12 @@ func TestCache(t *testing.T) {
|
||||
c.set("false", 0)
|
||||
|
||||
res, ok = c.get("true")
|
||||
if !res.IsIgnored() || !res.IsDeletable() || ok != true {
|
||||
if !res.IsIgnored() || !res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for true item", res, ok)
|
||||
}
|
||||
|
||||
res, ok = c.get("false")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != true {
|
||||
if res.IsIgnored() || res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for false item", res, ok)
|
||||
}
|
||||
|
||||
@@ -41,12 +41,12 @@ func TestCache(t *testing.T) {
|
||||
// Same values should exist
|
||||
|
||||
res, ok = c.get("true")
|
||||
if !res.IsIgnored() || !res.IsDeletable() || ok != true {
|
||||
if !res.IsIgnored() || !res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for true item", res, ok)
|
||||
}
|
||||
|
||||
res, ok = c.get("false")
|
||||
if res.IsIgnored() || res.IsDeletable() || ok != true {
|
||||
if res.IsIgnored() || res.IsDeletable() || !ok {
|
||||
t.Errorf("res %v, ok %v for false item", res, ok)
|
||||
}
|
||||
|
||||
|
||||
@@ -93,12 +93,12 @@ type Model struct {
|
||||
folderStatRefs map[string]*stats.FolderStatisticsReference // folder -> statsRef
|
||||
fmut sync.RWMutex // protects the above
|
||||
|
||||
conn map[protocol.DeviceID]connections.Connection
|
||||
closed map[protocol.DeviceID]chan struct{}
|
||||
helloMessages map[protocol.DeviceID]protocol.HelloResult
|
||||
devicePaused map[protocol.DeviceID]bool
|
||||
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
||||
pmut sync.RWMutex // protects the above
|
||||
conn map[protocol.DeviceID]connections.Connection
|
||||
closed map[protocol.DeviceID]chan struct{}
|
||||
helloMessages map[protocol.DeviceID]protocol.HelloResult
|
||||
deviceDownloads map[protocol.DeviceID]*deviceDownloadState
|
||||
remotePausedFolders map[protocol.DeviceID][]string // deviceID -> folders
|
||||
pmut sync.RWMutex // protects the above
|
||||
}
|
||||
|
||||
type folderFactory func(*Model, config.FolderConfiguration, versioner.Versioner, *fs.MtimeFS) service
|
||||
@@ -134,33 +134,33 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName,
|
||||
l.Debugln(line)
|
||||
},
|
||||
}),
|
||||
cfg: cfg,
|
||||
db: ldb,
|
||||
finder: db.NewBlockFinder(ldb),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
id: id,
|
||||
shortID: id.Short(),
|
||||
cacheIgnoredFiles: cfg.Options().CacheIgnoredFiles,
|
||||
protectedFiles: protectedFiles,
|
||||
deviceName: deviceName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
folderCfgs: make(map[string]config.FolderConfiguration),
|
||||
folderFiles: make(map[string]*db.FileSet),
|
||||
folderDevices: make(folderDeviceSet),
|
||||
deviceFolders: make(map[protocol.DeviceID][]string),
|
||||
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
|
||||
folderIgnores: make(map[string]*ignore.Matcher),
|
||||
folderRunners: make(map[string]service),
|
||||
folderRunnerTokens: make(map[string][]suture.ServiceToken),
|
||||
folderStatRefs: make(map[string]*stats.FolderStatisticsReference),
|
||||
conn: make(map[protocol.DeviceID]connections.Connection),
|
||||
closed: make(map[protocol.DeviceID]chan struct{}),
|
||||
helloMessages: make(map[protocol.DeviceID]protocol.HelloResult),
|
||||
devicePaused: make(map[protocol.DeviceID]bool),
|
||||
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
||||
fmut: sync.NewRWMutex(),
|
||||
pmut: sync.NewRWMutex(),
|
||||
cfg: cfg,
|
||||
db: ldb,
|
||||
finder: db.NewBlockFinder(ldb),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
id: id,
|
||||
shortID: id.Short(),
|
||||
cacheIgnoredFiles: cfg.Options().CacheIgnoredFiles,
|
||||
protectedFiles: protectedFiles,
|
||||
deviceName: deviceName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
folderCfgs: make(map[string]config.FolderConfiguration),
|
||||
folderFiles: make(map[string]*db.FileSet),
|
||||
folderDevices: make(folderDeviceSet),
|
||||
deviceFolders: make(map[protocol.DeviceID][]string),
|
||||
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
|
||||
folderIgnores: make(map[string]*ignore.Matcher),
|
||||
folderRunners: make(map[string]service),
|
||||
folderRunnerTokens: make(map[string][]suture.ServiceToken),
|
||||
folderStatRefs: make(map[string]*stats.FolderStatisticsReference),
|
||||
conn: make(map[protocol.DeviceID]connections.Connection),
|
||||
closed: make(map[protocol.DeviceID]chan struct{}),
|
||||
helloMessages: make(map[protocol.DeviceID]protocol.HelloResult),
|
||||
deviceDownloads: make(map[protocol.DeviceID]*deviceDownloadState),
|
||||
remotePausedFolders: make(map[protocol.DeviceID][]string),
|
||||
fmut: sync.NewRWMutex(),
|
||||
pmut: sync.NewRWMutex(),
|
||||
}
|
||||
if cfg.Options().ProgressUpdateIntervalS > -1 {
|
||||
go m.progressEmitter.Serve()
|
||||
@@ -183,10 +183,13 @@ func (m *Model) StartDeadlockDetector(timeout time.Duration) {
|
||||
// StartFolder constructs the folder service and starts it.
|
||||
func (m *Model) StartFolder(folder string) {
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
folderType := m.startFolderLocked(folder)
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
|
||||
l.Infoln("Ready to synchronize", folder, fmt.Sprintf("(%s)", folderType))
|
||||
l.Infof("Ready to synchronize %s (%s)", folderCfg.Description(), folderType)
|
||||
}
|
||||
|
||||
func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
@@ -217,6 +220,11 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
}
|
||||
}
|
||||
|
||||
// Close connections to affected devices
|
||||
for _, id := range cfg.DeviceIDs() {
|
||||
m.closeLocked(id)
|
||||
}
|
||||
|
||||
v, ok := fs.Sequence(protocol.LocalDeviceID), true
|
||||
indexHasFiles := ok && v > 0
|
||||
if !indexHasFiles {
|
||||
@@ -265,7 +273,7 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
}
|
||||
|
||||
func (m *Model) warnAboutOverwritingProtectedFiles(folder string) {
|
||||
if m.folderCfgs[folder].Type == config.FolderTypeReadOnly {
|
||||
if m.folderCfgs[folder].Type == config.FolderTypeSendOnly {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -365,13 +373,16 @@ func (m *Model) RestartFolder(cfg config.FolderConfiguration) {
|
||||
m.pmut.Lock()
|
||||
|
||||
m.tearDownFolderLocked(cfg.ID)
|
||||
m.addFolderLocked(cfg)
|
||||
folderType := m.startFolderLocked(cfg.ID)
|
||||
if !cfg.Paused {
|
||||
m.addFolderLocked(cfg)
|
||||
folderType := m.startFolderLocked(cfg.ID)
|
||||
l.Infoln("Restarted folder", cfg.Description(), fmt.Sprintf("(%s)", folderType))
|
||||
} else {
|
||||
l.Infoln("Paused folder", cfg.Description())
|
||||
}
|
||||
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
|
||||
l.Infoln("Restarted folder", cfg.ID, fmt.Sprintf("(%s)", folderType))
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
@@ -404,7 +415,7 @@ func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
devs := m.cfg.Devices()
|
||||
conns := make(map[string]ConnectionInfo, len(devs))
|
||||
for device := range devs {
|
||||
for device, deviceCfg := range devs {
|
||||
hello := m.helloMessages[device]
|
||||
versionString := hello.ClientVersion
|
||||
if hello.ClientName != "syncthing" {
|
||||
@@ -412,7 +423,7 @@ func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
}
|
||||
ci := ConnectionInfo{
|
||||
ClientVersion: strings.TrimSpace(versionString),
|
||||
Paused: m.devicePaused[device],
|
||||
Paused: deviceCfg.Paused,
|
||||
}
|
||||
if conn, ok := m.conn[device]; ok {
|
||||
ci.Type = conn.Type()
|
||||
@@ -782,7 +793,17 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
}
|
||||
|
||||
m.fmut.Lock()
|
||||
var paused []string
|
||||
for _, folder := range cm.Folders {
|
||||
if folder.Paused {
|
||||
paused = append(paused, folder.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if cfg, ok := m.cfg.Folder(folder.ID); ok && cfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
if !m.folderSharedWithLocked(folder.ID, deviceID) {
|
||||
events.Default.Log(events.FolderRejected, map[string]string{
|
||||
"folder": folder.ID,
|
||||
@@ -870,6 +891,10 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
go sendIndexes(conn, folder.ID, fs, m.folderIgnores[folder.ID], startSequence, dbLocation, dropSymlinks)
|
||||
}
|
||||
|
||||
m.pmut.Lock()
|
||||
m.remotePausedFolders[deviceID] = paused
|
||||
m.pmut.Unlock()
|
||||
|
||||
// This breaks if we send multiple CM messages during the same connection.
|
||||
if len(tempIndexFolders) > 0 {
|
||||
m.pmut.RLock()
|
||||
@@ -1057,6 +1082,7 @@ func (m *Model) Closed(conn protocol.Connection, err error) {
|
||||
delete(m.conn, device)
|
||||
delete(m.helloMessages, device)
|
||||
delete(m.deviceDownloads, device)
|
||||
delete(m.remotePausedFolders, device)
|
||||
closed := m.closed[device]
|
||||
delete(m.closed, device)
|
||||
m.pmut.Unlock()
|
||||
@@ -1072,9 +1098,13 @@ func (m *Model) Closed(conn protocol.Connection, err error) {
|
||||
// close will close the underlying connection for a given device
|
||||
func (m *Model) close(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
conn, ok := m.conn[device]
|
||||
m.closeLocked(device)
|
||||
m.pmut.Unlock()
|
||||
}
|
||||
|
||||
// closeLocked will close the underlying connection for a given device
|
||||
func (m *Model) closeLocked(device protocol.DeviceID) {
|
||||
conn, ok := m.conn[device]
|
||||
if !ok {
|
||||
// There is no connection to close
|
||||
return
|
||||
@@ -1270,16 +1300,15 @@ func (m *Model) SetIgnores(folder string, content []string) error {
|
||||
// This allows us to extract some information from the Hello message
|
||||
// and add it to a list of known devices ahead of any checks.
|
||||
func (m *Model) OnHello(remoteID protocol.DeviceID, addr net.Addr, hello protocol.HelloResult) error {
|
||||
if m.IsPaused(remoteID) {
|
||||
return errDevicePaused
|
||||
}
|
||||
|
||||
if m.cfg.IgnoredDevice(remoteID) {
|
||||
return errDeviceIgnored
|
||||
}
|
||||
|
||||
if _, ok := m.cfg.Device(remoteID); ok {
|
||||
if cfg, ok := m.cfg.Device(remoteID); ok {
|
||||
// The device exists
|
||||
if cfg.Paused {
|
||||
return errDevicePaused
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1346,10 +1375,11 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
|
||||
l.Infof(`Device %s client is "%s %s" named "%s"`, deviceID, hello.ClientName, hello.ClientVersion, hello.DeviceName)
|
||||
|
||||
conn.Start()
|
||||
m.pmut.Unlock()
|
||||
|
||||
// Acquires fmut, so has to be done outside of pmut.
|
||||
cm := m.generateClusterConfig(deviceID)
|
||||
conn.ClusterConfig(cm)
|
||||
m.pmut.Unlock()
|
||||
|
||||
device, ok := m.cfg.Devices()[deviceID]
|
||||
if ok && (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) {
|
||||
@@ -1361,17 +1391,6 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
|
||||
m.deviceWasSeen(deviceID)
|
||||
}
|
||||
|
||||
func (m *Model) PauseDevice(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
m.devicePaused[device] = true
|
||||
conn, ok := m.conn[device]
|
||||
m.pmut.Unlock()
|
||||
if ok {
|
||||
closeRawConn(conn)
|
||||
}
|
||||
events.Default.Log(events.DevicePaused, map[string]string{"device": device.String()})
|
||||
}
|
||||
|
||||
func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, updates []protocol.FileDownloadProgressUpdate) {
|
||||
if !m.folderSharedWith(folder, device) {
|
||||
return
|
||||
@@ -1381,7 +1400,7 @@ func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, update
|
||||
cfg, ok := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok || cfg.Type == config.FolderTypeReadOnly || cfg.DisableTempIndexes {
|
||||
if !ok || cfg.Type == config.FolderTypeSendOnly || cfg.DisableTempIndexes {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1397,20 +1416,6 @@ func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, update
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Model) ResumeDevice(device protocol.DeviceID) {
|
||||
m.pmut.Lock()
|
||||
m.devicePaused[device] = false
|
||||
m.pmut.Unlock()
|
||||
events.Default.Log(events.DeviceResumed, map[string]string{"device": device.String()})
|
||||
}
|
||||
|
||||
func (m *Model) IsPaused(device protocol.DeviceID) bool {
|
||||
m.pmut.Lock()
|
||||
paused := m.devicePaused[device]
|
||||
m.pmut.Unlock()
|
||||
return paused
|
||||
}
|
||||
|
||||
func (m *Model) deviceStatRef(deviceID protocol.DeviceID) *stats.DeviceStatisticsReference {
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
@@ -1563,12 +1568,18 @@ func (m *Model) updateLocalsFromScanning(folder string, fs []protocol.FileInfo)
|
||||
m.fmut.RLock()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
// Fire the LocalChangeDetected event to notify listeners about local updates.
|
||||
m.localChangeDetected(folderCfg, fs)
|
||||
|
||||
m.diskChangeDetected(folderCfg, fs, events.LocalChangeDetected)
|
||||
}
|
||||
|
||||
func (m *Model) updateLocalsFromPulling(folder string, fs []protocol.FileInfo) {
|
||||
m.updateLocals(folder, fs)
|
||||
|
||||
m.fmut.RLock()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
m.diskChangeDetected(folderCfg, fs, events.RemoteChangeDetected)
|
||||
}
|
||||
|
||||
func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
@@ -1594,7 +1605,7 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo) {
|
||||
func (m *Model) diskChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo, typeOfEvent events.EventType) {
|
||||
path := strings.Replace(folderCfg.Path(), `\\?\`, "", 1)
|
||||
|
||||
for _, file := range files {
|
||||
@@ -1623,12 +1634,14 @@ func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files
|
||||
// for windows paths, strip unwanted chars from the front.
|
||||
path := filepath.Join(path, filepath.FromSlash(file.Name))
|
||||
|
||||
events.Default.Log(events.LocalChangeDetected, map[string]string{
|
||||
"folderID": folderCfg.ID,
|
||||
"label": folderCfg.Label,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
// Two different events can be fired here based on what EventType is passed into function
|
||||
events.Default.Log(typeOfEvent, map[string]string{
|
||||
"folderID": folderCfg.ID,
|
||||
"label": folderCfg.Label,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
"modifiedBy": file.ModifiedBy.String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1871,6 +1884,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
Size: f.Size,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
ModifiedBy: m.id.Short(),
|
||||
Permissions: f.Permissions,
|
||||
NoPermissions: f.NoPermissions,
|
||||
Invalid: true,
|
||||
@@ -1896,6 +1910,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
Size: 0,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
ModifiedBy: m.id.Short(),
|
||||
Deleted: true,
|
||||
Version: f.Version.Update(m.shortID),
|
||||
}
|
||||
@@ -1981,10 +1996,11 @@ func (m *Model) generateClusterConfig(device protocol.DeviceID) protocol.Cluster
|
||||
protocolFolder := protocol.Folder{
|
||||
ID: folder,
|
||||
Label: folderCfg.Label,
|
||||
ReadOnly: folderCfg.Type == config.FolderTypeReadOnly,
|
||||
ReadOnly: folderCfg.Type == config.FolderTypeSendOnly,
|
||||
IgnorePermissions: folderCfg.IgnorePerms,
|
||||
IgnoreDelete: folderCfg.IgnoreDelete,
|
||||
DisableTempIndexes: folderCfg.DisableTempIndexes,
|
||||
Paused: folderCfg.Paused,
|
||||
}
|
||||
|
||||
// Devices are sorted, so we always get the same order.
|
||||
@@ -2194,7 +2210,13 @@ func (m *Model) Availability(folder, file string, version protocol.Vector, block
|
||||
}
|
||||
|
||||
var availabilities []Availability
|
||||
next:
|
||||
for _, device := range fs.Availability(file) {
|
||||
for _, pausedFolder := range m.remotePausedFolders[device] {
|
||||
if pausedFolder == folder {
|
||||
continue next
|
||||
}
|
||||
}
|
||||
_, ok := m.conn[device]
|
||||
if ok {
|
||||
availabilities = append(availabilities, Availability{ID: device, FromTemporary: false})
|
||||
@@ -2352,16 +2374,6 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
l.Debugln(m, "adding folder", folderID)
|
||||
m.AddFolder(cfg)
|
||||
m.StartFolder(folderID)
|
||||
|
||||
// Drop connections to all devices that can now share the new
|
||||
// folder.
|
||||
m.pmut.Lock()
|
||||
for _, dev := range cfg.DeviceIDs() {
|
||||
if conn, ok := m.conn[dev]; ok {
|
||||
closeRawConn(conn)
|
||||
}
|
||||
}
|
||||
m.pmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2383,6 +2395,15 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if !reflect.DeepEqual(fromCfgCopy, toCfgCopy) {
|
||||
m.RestartFolder(toCfg)
|
||||
}
|
||||
|
||||
// Emit the folder pause/resume event
|
||||
if fromCfg.Paused != toCfg.Paused {
|
||||
eventType := events.FolderResumed
|
||||
if toCfg.Paused {
|
||||
eventType = events.FolderPaused
|
||||
}
|
||||
events.Default.Log(eventType, map[string]string{"id": toCfg.ID, "label": toCfg.Label})
|
||||
}
|
||||
}
|
||||
|
||||
// Removing a device. We actually don't need to do anything.
|
||||
@@ -2392,6 +2413,24 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// At some point model.Close() will get called for that device which will
|
||||
// clean residue device state that is not part of any folder.
|
||||
|
||||
// Pausing a device, unpausing is handled by the connection service.
|
||||
fromDevices := mapDeviceConfigs(from.Devices)
|
||||
toDevices := mapDeviceConfigs(to.Devices)
|
||||
for deviceID, toCfg := range toDevices {
|
||||
fromCfg, ok := fromDevices[deviceID]
|
||||
if !ok || fromCfg.Paused == toCfg.Paused {
|
||||
continue
|
||||
}
|
||||
|
||||
if toCfg.Paused {
|
||||
l.Infoln("Pausing", deviceID)
|
||||
m.close(deviceID)
|
||||
events.Default.Log(events.DevicePaused, map[string]string{"device": deviceID.String()})
|
||||
} else {
|
||||
events.Default.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
|
||||
}
|
||||
}
|
||||
|
||||
// Some options don't require restart as those components handle it fine
|
||||
// by themselves.
|
||||
from.Options.URAccepted = to.Options.URAccepted
|
||||
@@ -2433,6 +2472,16 @@ func mapDevices(devices []protocol.DeviceID) map[protocol.DeviceID]struct{} {
|
||||
return m
|
||||
}
|
||||
|
||||
// mapDeviceConfigs returns a map of device ID to device configuration for the given
|
||||
// slice of folder configurations.
|
||||
func mapDeviceConfigs(devices []config.DeviceConfiguration) map[protocol.DeviceID]config.DeviceConfiguration {
|
||||
m := make(map[protocol.DeviceID]config.DeviceConfiguration, len(devices))
|
||||
for _, dev := range devices {
|
||||
m[dev.DeviceID] = dev
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func symlinkInvalid(folder string, fi db.FileIntf) bool {
|
||||
if !symlinks.Supported && fi.IsSymlink() && !fi.IsInvalid() && !fi.IsDeleted() {
|
||||
symlinkWarning.Do(func() {
|
||||
|
||||
@@ -1028,7 +1028,7 @@ func TestROScanRecovery(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "default",
|
||||
RawPath: "testdata/rotestfolder",
|
||||
Type: config.FolderTypeReadOnly,
|
||||
Type: config.FolderTypeSendOnly,
|
||||
RescanIntervalS: 1,
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
@@ -1115,7 +1115,7 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "default",
|
||||
RawPath: "testdata/rwtestfolder",
|
||||
Type: config.FolderTypeReadWrite,
|
||||
Type: config.FolderTypeSendReceive,
|
||||
RescanIntervalS: 1,
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
@@ -1763,11 +1763,11 @@ func TestIssue3028(t *testing.T) {
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
|
||||
// Ugly hack for testing: reach into the model for the rwfolder and wait
|
||||
// Ugly hack for testing: reach into the model for the SendReceiveFolder and wait
|
||||
// for it to complete the initial scan. The risk is that it otherwise
|
||||
// runs during our modifications and screws up the test.
|
||||
m.fmut.RLock()
|
||||
folder := m.folderRunners["default"].(*rwFolder)
|
||||
folder := m.folderRunners["default"].(*sendReceiveFolder)
|
||||
m.fmut.RUnlock()
|
||||
<-folder.initialScanCompleted
|
||||
|
||||
@@ -1822,7 +1822,7 @@ func TestIssue3164(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fl := rwFolder{
|
||||
fl := sendReceiveFolder{
|
||||
dbUpdates: make(chan dbUpdateJob, 1),
|
||||
dir: "testdata",
|
||||
}
|
||||
@@ -2090,6 +2090,8 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue3496(t *testing.T) {
|
||||
t.Skip("This test deletes files that the other test depend on. Needs fixing.")
|
||||
|
||||
// It seems like lots of deleted files can cause negative completion
|
||||
// percentages. Lets make sure that doesn't happen. Also do some general
|
||||
// checks on the completion calculation stuff.
|
||||
@@ -2196,6 +2198,97 @@ func TestIssue3829(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoRequestsFromPausedDevices(t *testing.T) {
|
||||
dbi := db.OpenMemory()
|
||||
|
||||
fcfg := config.NewFolderConfiguration("default", "testdata")
|
||||
fcfg.Devices = []config.FolderDeviceConfiguration{
|
||||
{DeviceID: device1},
|
||||
{DeviceID: device2},
|
||||
}
|
||||
cfg := config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
Devices: []config.DeviceConfiguration{
|
||||
config.NewDeviceConfiguration(device1, "device1"),
|
||||
config.NewDeviceConfiguration(device2, "device2"),
|
||||
},
|
||||
Options: config.OptionsConfiguration{
|
||||
// Don't remove temporaries directly on startup
|
||||
KeepTemporariesH: 1,
|
||||
},
|
||||
}
|
||||
|
||||
wcfg := config.Wrap("/tmp/test", cfg)
|
||||
|
||||
m := NewModel(wcfg, protocol.LocalDeviceID, "device", "syncthing", "dev", dbi, nil)
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolder(fcfg.ID)
|
||||
m.ServeBackground()
|
||||
|
||||
file := testDataExpected["foo"]
|
||||
files := m.folderFiles["default"]
|
||||
files.Update(device1, []protocol.FileInfo{file})
|
||||
files.Update(device2, []protocol.FileInfo{file})
|
||||
|
||||
avail := m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 0 {
|
||||
t.Errorf("should not be available, no connections")
|
||||
}
|
||||
|
||||
addFakeConn(m, device1)
|
||||
addFakeConn(m, device2)
|
||||
|
||||
// !!! This is not what I'd expect to happen, as we don't even know if the peer has the original index !!!
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 2 {
|
||||
t.Errorf("should have two available")
|
||||
}
|
||||
|
||||
cc := protocol.ClusterConfig{
|
||||
Folders: []protocol.Folder{
|
||||
{
|
||||
ID: "default",
|
||||
Devices: []protocol.Device{
|
||||
{ID: device1},
|
||||
{ID: device2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m.ClusterConfig(device1, cc)
|
||||
m.ClusterConfig(device2, cc)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 2 {
|
||||
t.Errorf("should have two available")
|
||||
}
|
||||
|
||||
m.Closed(&fakeConnection{id: device1}, errDeviceUnknown)
|
||||
m.Closed(&fakeConnection{id: device2}, errDeviceUnknown)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 0 {
|
||||
t.Errorf("should have no available")
|
||||
}
|
||||
|
||||
// Test that remote paused folders are not used.
|
||||
|
||||
addFakeConn(m, device1)
|
||||
addFakeConn(m, device2)
|
||||
|
||||
m.ClusterConfig(device1, cc)
|
||||
ccp := cc
|
||||
ccp.Folders[0].Paused = true
|
||||
m.ClusterConfig(device1, ccp)
|
||||
|
||||
avail = m.Availability("default", file.Name, file.Version, file.Blocks[0])
|
||||
if len(avail) != 1 {
|
||||
t.Errorf("should have one available")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootedJoinedPath(t *testing.T) {
|
||||
type testcase struct {
|
||||
root string
|
||||
|
||||
@@ -15,25 +15,27 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
folderFactories[config.FolderTypeReadOnly] = newROFolder
|
||||
folderFactories[config.FolderTypeSendOnly] = newSendOnlyFolder
|
||||
}
|
||||
|
||||
type roFolder struct {
|
||||
type sendOnlyFolder struct {
|
||||
folder
|
||||
config.FolderConfiguration
|
||||
}
|
||||
|
||||
func newROFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
|
||||
return &roFolder{
|
||||
func newSendOnlyFolder(model *Model, cfg config.FolderConfiguration, _ versioner.Versioner, _ *fs.MtimeFS) service {
|
||||
return &sendOnlyFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker(cfg.ID),
|
||||
scan: newFolderScanner(cfg),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
FolderConfiguration: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *roFolder) Serve() {
|
||||
func (f *sendOnlyFolder) Serve() {
|
||||
l.Debugln(f, "starting")
|
||||
defer l.Debugln(f, "exiting")
|
||||
|
||||
@@ -49,7 +51,7 @@ func (f *roFolder) Serve() {
|
||||
|
||||
case <-f.scan.timer.C:
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping folder", f.folderID, "scan due to folder error:", err)
|
||||
l.Infoln("Skipping scan of", f.Description(), "due to folder error:", err)
|
||||
f.scan.Reschedule()
|
||||
continue
|
||||
}
|
||||
@@ -67,7 +69,7 @@ func (f *roFolder) Serve() {
|
||||
}
|
||||
|
||||
if !initialScanCompleted {
|
||||
l.Infoln("Completed initial scan (ro) of folder", f.folderID)
|
||||
l.Infoln("Completed initial scan (ro) of", f.Description())
|
||||
initialScanCompleted = true
|
||||
}
|
||||
|
||||
@@ -86,6 +88,6 @@ func (f *roFolder) Serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *roFolder) String() string {
|
||||
return fmt.Sprintf("roFolder/%s@%p", f.folderID, f)
|
||||
func (f *sendOnlyFolder) String() string {
|
||||
return fmt.Sprintf("sendOnlyFolder/%s@%p", f.folderID, f)
|
||||
}
|
||||
|
||||
@@ -28,10 +28,11 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/versioner"
|
||||
"github.com/syncthing/syncthing/lib/weakhash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
folderFactories[config.FolderTypeReadWrite] = newRWFolder
|
||||
folderFactories[config.FolderTypeSendReceive] = newSendReceiveFolder
|
||||
}
|
||||
|
||||
// A pullBlockState is passed to the puller routine for each block that needs
|
||||
@@ -77,24 +78,15 @@ type dbUpdateJob struct {
|
||||
jobType int
|
||||
}
|
||||
|
||||
type rwFolder struct {
|
||||
type sendReceiveFolder struct {
|
||||
folder
|
||||
config.FolderConfiguration
|
||||
|
||||
mtimeFS *fs.MtimeFS
|
||||
dir string
|
||||
versioner versioner.Versioner
|
||||
ignorePerms bool
|
||||
order config.PullOrder
|
||||
maxConflicts int
|
||||
sleep time.Duration
|
||||
pause time.Duration
|
||||
allowSparse bool
|
||||
checkFreeSpace bool
|
||||
ignoreDelete bool
|
||||
fsync bool
|
||||
|
||||
copiers int
|
||||
pullers int
|
||||
mtimeFS *fs.MtimeFS
|
||||
dir string
|
||||
versioner versioner.Versioner
|
||||
sleep time.Duration
|
||||
pause time.Duration
|
||||
|
||||
queue *jobQueue
|
||||
dbUpdates chan dbUpdateJob
|
||||
@@ -107,27 +99,19 @@ type rwFolder struct {
|
||||
initialScanCompleted chan (struct{}) // exposed for testing
|
||||
}
|
||||
|
||||
func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
|
||||
f := &rwFolder{
|
||||
func newSendReceiveFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
|
||||
f := &sendReceiveFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker(cfg.ID),
|
||||
scan: newFolderScanner(cfg),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
FolderConfiguration: cfg,
|
||||
|
||||
mtimeFS: mtimeFS,
|
||||
dir: cfg.Path(),
|
||||
versioner: ver,
|
||||
ignorePerms: cfg.IgnorePerms,
|
||||
copiers: cfg.Copiers,
|
||||
pullers: cfg.Pullers,
|
||||
order: cfg.Order,
|
||||
maxConflicts: cfg.MaxConflicts,
|
||||
allowSparse: !cfg.DisableSparseFiles,
|
||||
checkFreeSpace: cfg.MinDiskFreePct != 0,
|
||||
ignoreDelete: cfg.IgnoreDelete,
|
||||
fsync: cfg.Fsync,
|
||||
mtimeFS: mtimeFS,
|
||||
dir: cfg.Path(),
|
||||
versioner: ver,
|
||||
|
||||
queue: newJobQueue(),
|
||||
pullTimer: time.NewTimer(time.Second),
|
||||
@@ -138,42 +122,42 @@ func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Ver
|
||||
initialScanCompleted: make(chan struct{}),
|
||||
}
|
||||
|
||||
f.configureCopiersAndPullers(cfg)
|
||||
f.configureCopiersAndPullers()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *rwFolder) configureCopiersAndPullers(cfg config.FolderConfiguration) {
|
||||
if f.copiers == 0 {
|
||||
f.copiers = defaultCopiers
|
||||
func (f *sendReceiveFolder) configureCopiersAndPullers() {
|
||||
if f.Copiers == 0 {
|
||||
f.Copiers = defaultCopiers
|
||||
}
|
||||
if f.pullers == 0 {
|
||||
f.pullers = defaultPullers
|
||||
if f.Pullers == 0 {
|
||||
f.Pullers = defaultPullers
|
||||
}
|
||||
|
||||
if cfg.PullerPauseS == 0 {
|
||||
if f.PullerPauseS == 0 {
|
||||
f.pause = defaultPullerPause
|
||||
} else {
|
||||
f.pause = time.Duration(cfg.PullerPauseS) * time.Second
|
||||
f.pause = time.Duration(f.PullerPauseS) * time.Second
|
||||
}
|
||||
|
||||
if cfg.PullerSleepS == 0 {
|
||||
if f.PullerSleepS == 0 {
|
||||
f.sleep = defaultPullerSleep
|
||||
} else {
|
||||
f.sleep = time.Duration(cfg.PullerSleepS) * time.Second
|
||||
f.sleep = time.Duration(f.PullerSleepS) * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to check whether either the ignorePerm flag has been
|
||||
// set on the local host or the FlagNoPermBits has been set on the file/dir
|
||||
// which is being pulled.
|
||||
func (f *rwFolder) ignorePermissions(file protocol.FileInfo) bool {
|
||||
return f.ignorePerms || file.NoPermissions
|
||||
func (f *sendReceiveFolder) ignorePermissions(file protocol.FileInfo) bool {
|
||||
return f.IgnorePerms || file.NoPermissions
|
||||
}
|
||||
|
||||
// Serve will run scans and pulls. It will return when Stop()ed or on a
|
||||
// critical error.
|
||||
func (f *rwFolder) Serve() {
|
||||
func (f *sendReceiveFolder) Serve() {
|
||||
l.Debugln(f, "starting")
|
||||
defer l.Debugln(f, "exiting")
|
||||
|
||||
@@ -228,7 +212,7 @@ func (f *rwFolder) Serve() {
|
||||
}
|
||||
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping folder", f.folderID, "pull due to folder error:", err)
|
||||
l.Infoln("Skipping pull of", f.Description(), "due to folder error:", err)
|
||||
f.pullTimer.Reset(f.sleep)
|
||||
continue
|
||||
}
|
||||
@@ -301,7 +285,7 @@ func (f *rwFolder) Serve() {
|
||||
select {
|
||||
case <-f.initialScanCompleted:
|
||||
default:
|
||||
l.Infoln("Completed initial scan (rw) of folder", f.folderID)
|
||||
l.Infoln("Completed initial scan (rw) of", f.Description())
|
||||
close(f.initialScanCompleted)
|
||||
}
|
||||
|
||||
@@ -314,7 +298,7 @@ func (f *rwFolder) Serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) IndexUpdated() {
|
||||
func (f *sendReceiveFolder) IndexUpdated() {
|
||||
select {
|
||||
case f.remoteIndex <- struct{}{}:
|
||||
default:
|
||||
@@ -325,15 +309,15 @@ func (f *rwFolder) IndexUpdated() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) String() string {
|
||||
return fmt.Sprintf("rwFolder/%s@%p", f.folderID, f)
|
||||
func (f *sendReceiveFolder) String() string {
|
||||
return fmt.Sprintf("sendReceiveFolder/%s@%p", f.folderID, f)
|
||||
}
|
||||
|
||||
// pullerIteration runs a single puller iteration for the given folder and
|
||||
// returns the number items that should have been synced (even those that
|
||||
// might have failed). One puller iteration handles all files currently
|
||||
// flagged as needed in the folder.
|
||||
func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
func (f *sendReceiveFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
pullChan := make(chan pullBlockState)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
finisherChan := make(chan *sharedPullerState)
|
||||
@@ -343,7 +327,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
pullWg := sync.NewWaitGroup()
|
||||
doneWg := sync.NewWaitGroup()
|
||||
|
||||
l.Debugln(f, "c", f.copiers, "p", f.pullers)
|
||||
l.Debugln(f, "c", f.Copiers, "p", f.Pullers)
|
||||
|
||||
f.dbUpdates = make(chan dbUpdateJob)
|
||||
updateWg.Add(1)
|
||||
@@ -353,7 +337,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
updateWg.Done()
|
||||
}()
|
||||
|
||||
for i := 0; i < f.copiers; i++ {
|
||||
for i := 0; i < f.Copiers; i++ {
|
||||
copyWg.Add(1)
|
||||
go func() {
|
||||
// copierRoutine finishes when copyChan is closed
|
||||
@@ -362,7 +346,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < f.pullers; i++ {
|
||||
for i := 0; i < f.Pullers; i++ {
|
||||
pullWg.Add(1)
|
||||
go func() {
|
||||
// pullerRoutine finishes when pullChan is closed
|
||||
@@ -391,7 +375,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
// pile.
|
||||
|
||||
folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
if shouldIgnore(intf, ignores, f.ignoreDelete, defTempNamer) {
|
||||
if shouldIgnore(intf, ignores, f.IgnoreDelete, defTempNamer) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -488,7 +472,7 @@ func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
|
||||
// Now do the file queue. Reorder it according to configuration.
|
||||
|
||||
switch f.order {
|
||||
switch f.Order {
|
||||
case config.OrderRandom:
|
||||
f.queue.Shuffle()
|
||||
case config.OrderAlphabetic:
|
||||
@@ -510,7 +494,7 @@ nextFile:
|
||||
select {
|
||||
case <-f.stop:
|
||||
// Stop processing files if the puller has been told to stop.
|
||||
break
|
||||
break nextFile
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -600,7 +584,7 @@ nextFile:
|
||||
}
|
||||
|
||||
// handleDir creates or updates the given directory
|
||||
func (f *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) handleDir(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -702,7 +686,7 @@ func (f *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
}
|
||||
|
||||
// handleSymlink creates or updates the given symlink
|
||||
func (f *rwFolder) handleSymlink(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -776,7 +760,7 @@ func (f *rwFolder) handleSymlink(file protocol.FileInfo) {
|
||||
}
|
||||
|
||||
// deleteDir attempts to delete the given directory
|
||||
func (f *rwFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -834,7 +818,7 @@ func (f *rwFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
}
|
||||
|
||||
// deleteFile attempts to delete the given file
|
||||
func (f *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -892,7 +876,7 @@ func (f *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
|
||||
// renameFile attempts to rename an existing file to a destination
|
||||
// and set the right attributes on it.
|
||||
func (f *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
func (f *sendReceiveFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// Used in the defer closure below, updated by the function body. Take
|
||||
// care not declare another err.
|
||||
var err error
|
||||
@@ -1016,7 +1000,7 @@ func (f *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
|
||||
// handleFile queues the copies and pulls as necessary for a single new or
|
||||
// changed file.
|
||||
func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
|
||||
|
||||
if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
|
||||
@@ -1130,7 +1114,7 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
blocksSize = file.Size
|
||||
}
|
||||
|
||||
if f.checkFreeSpace {
|
||||
if f.MinDiskFreePct > 0 {
|
||||
if free, err := osutil.DiskFreeBytes(f.dir); err == nil && free < blocksSize {
|
||||
l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024)
|
||||
f.newError(file.Name, errors.New("insufficient space"))
|
||||
@@ -1165,11 +1149,11 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
ignorePerms: f.ignorePermissions(file),
|
||||
version: curFile.Version,
|
||||
mut: sync.NewRWMutex(),
|
||||
sparse: f.allowSparse,
|
||||
sparse: !f.DisableSparseFiles,
|
||||
created: time.Now(),
|
||||
}
|
||||
|
||||
l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), reused)
|
||||
l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), len(reused))
|
||||
|
||||
cs := copyBlocksState{
|
||||
sharedPullerState: &s,
|
||||
@@ -1180,7 +1164,7 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
|
||||
// shortcutFile sets file mode and modification time, when that's the only
|
||||
// thing that has changed.
|
||||
func (f *rwFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
realName, err := rootedJoinedPath(f.dir, file.Name)
|
||||
if err != nil {
|
||||
f.newError(file.Name, err)
|
||||
@@ -1207,7 +1191,7 @@ func (f *rwFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
|
||||
// copierRoutine reads copierStates until the in channel closes and performs
|
||||
// the relevant copies when possible, or passes it to the puller routine.
|
||||
func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
buf := make([]byte, protocol.BlockSize)
|
||||
|
||||
for state := range in {
|
||||
@@ -1231,8 +1215,23 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
}
|
||||
f.model.fmut.RUnlock()
|
||||
|
||||
var weakHashFinder *weakhash.Finder
|
||||
if !f.DisableWeakHash {
|
||||
hashesToFind := make([]uint32, 0, len(state.blocks))
|
||||
for _, block := range state.blocks {
|
||||
if block.WeakHash != 0 {
|
||||
hashesToFind = append(hashesToFind, block.WeakHash)
|
||||
}
|
||||
}
|
||||
|
||||
weakHashFinder, err = weakhash.NewFinder(state.realName, protocol.BlockSize, hashesToFind)
|
||||
if err != nil {
|
||||
l.Debugln("weak hasher", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range state.blocks {
|
||||
if f.allowSparse && state.reused == 0 && block.IsEmpty() {
|
||||
if !f.DisableSparseFiles && state.reused == 0 && block.IsEmpty() {
|
||||
// The block is a block of all zeroes, and we are not reusing
|
||||
// a temp file, so there is no need to do anything with it.
|
||||
// If we were reusing a temp file and had this block to copy,
|
||||
@@ -1245,45 +1244,70 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
}
|
||||
|
||||
buf = buf[:int(block.Size)]
|
||||
found := f.model.finder.Iterate(folders, block.Hash, func(folder, file string, index int32) bool {
|
||||
inFile, err := rootedJoinedPath(folderRoots[folder], file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fd, err := os.Open(inFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
hash, err := scanner.VerifyBuffer(buf, block)
|
||||
if err != nil {
|
||||
if hash != nil {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
err = f.model.finder.Fix(folder, file, index, block.Hash, hash)
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
} else {
|
||||
l.Debugln("Finder failed to verify buffer", err)
|
||||
}
|
||||
return false
|
||||
found, err := weakHashFinder.Iterate(block.WeakHash, buf, func(offset int64) bool {
|
||||
if _, err := scanner.VerifyBuffer(buf, block); err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
if err != nil {
|
||||
state.fail("dst write", err)
|
||||
|
||||
}
|
||||
if file == state.file.Name {
|
||||
if offset == block.Offset {
|
||||
state.copiedFromOrigin()
|
||||
} else {
|
||||
state.copiedFromOriginShifted()
|
||||
}
|
||||
return true
|
||||
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
l.Debugln("weak hasher iter", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
found = f.model.finder.Iterate(folders, block.Hash, func(folder, file string, index int32) bool {
|
||||
inFile, err := rootedJoinedPath(folderRoots[folder], file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fd, err := os.Open(inFile)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
hash, err := scanner.VerifyBuffer(buf, block)
|
||||
if err != nil {
|
||||
if hash != nil {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
err = f.model.finder.Fix(folder, file, index, block.Hash, hash)
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
} else {
|
||||
l.Debugln("Finder failed to verify buffer", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
if err != nil {
|
||||
state.fail("dst write", err)
|
||||
}
|
||||
if file == state.file.Name {
|
||||
state.copiedFromOrigin()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
if state.failed() != nil {
|
||||
break
|
||||
@@ -1300,11 +1324,12 @@ func (f *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
|
||||
state.copyDone(block)
|
||||
}
|
||||
}
|
||||
weakHashFinder.Close()
|
||||
out <- state.sharedPullerState
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||
for state := range in {
|
||||
if state.failed() != nil {
|
||||
out <- state.sharedPullerState
|
||||
@@ -1320,7 +1345,7 @@ func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
|
||||
continue
|
||||
}
|
||||
|
||||
if f.allowSparse && state.reused == 0 && state.block.IsEmpty() {
|
||||
if !f.DisableSparseFiles && state.reused == 0 && state.block.IsEmpty() {
|
||||
// There is no need to request a block of all zeroes. Pretend we
|
||||
// requested it and handled it correctly.
|
||||
state.pullDone(state.block)
|
||||
@@ -1377,7 +1402,7 @@ func (f *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
func (f *sendReceiveFolder) performFinish(state *sharedPullerState) error {
|
||||
// Set the correct permission bits on the new file
|
||||
if !f.ignorePermissions(state.file) {
|
||||
if err := os.Chmod(state.tempName, os.FileMode(state.file.Permissions&0777)); err != nil {
|
||||
@@ -1439,7 +1464,7 @@ func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
for state := range in {
|
||||
if closed, err := state.finalClose(); closed {
|
||||
l.Debugln(f, "closing", state.file.Name)
|
||||
@@ -1470,17 +1495,17 @@ func (f *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
}
|
||||
|
||||
// Moves the given filename to the front of the job queue
|
||||
func (f *rwFolder) BringToFront(filename string) {
|
||||
func (f *sendReceiveFolder) BringToFront(filename string) {
|
||||
f.queue.BringToFront(filename)
|
||||
}
|
||||
|
||||
func (f *rwFolder) Jobs() ([]string, []string) {
|
||||
func (f *sendReceiveFolder) Jobs() ([]string, []string) {
|
||||
return f.queue.Jobs()
|
||||
}
|
||||
|
||||
// dbUpdaterRoutine aggregates db updates and commits them in batches no
|
||||
// larger than 1000 items, and no more delayed than 2 seconds.
|
||||
func (f *rwFolder) dbUpdaterRoutine() {
|
||||
func (f *sendReceiveFolder) dbUpdaterRoutine() {
|
||||
const (
|
||||
maxBatchSize = 1000
|
||||
maxBatchTime = 2 * time.Second
|
||||
@@ -1493,7 +1518,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
|
||||
var changedFiles []string
|
||||
var changedDirs []string
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
changedFiles = make([]string, 0, maxBatchSize)
|
||||
changedDirs = make([]string, 0, maxBatchSize)
|
||||
}
|
||||
@@ -1518,7 +1543,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
|
||||
for _, job := range batch {
|
||||
files = append(files, job.file)
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
// collect changed files and dirs
|
||||
switch job.jobType {
|
||||
case dbUpdateHandleFile, dbUpdateShortcutFile:
|
||||
@@ -1544,7 +1569,7 @@ func (f *rwFolder) dbUpdaterRoutine() {
|
||||
lastFile = job.file
|
||||
}
|
||||
|
||||
if f.fsync {
|
||||
if f.Fsync {
|
||||
// sync files and dirs to disk
|
||||
syncFilesOnce(changedFiles, osutil.SyncFile)
|
||||
changedFiles = changedFiles[:0]
|
||||
@@ -1591,7 +1616,7 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
func (f *rwFolder) inConflict(current, replacement protocol.Vector) bool {
|
||||
func (f *sendReceiveFolder) inConflict(current, replacement protocol.Vector) bool {
|
||||
if current.Concurrent(replacement) {
|
||||
// Obvious case
|
||||
return true
|
||||
@@ -1617,7 +1642,7 @@ func removeAvailability(availabilities []Availability, availability Availability
|
||||
return availabilities
|
||||
}
|
||||
|
||||
func (f *rwFolder) moveForConflict(name string) error {
|
||||
func (f *sendReceiveFolder) moveForConflict(name string) error {
|
||||
if strings.Contains(filepath.Base(name), ".sync-conflict-") {
|
||||
l.Infoln("Conflict for", name, "which is already a conflict copy; not copying again.")
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
@@ -1626,7 +1651,7 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.maxConflicts == 0 {
|
||||
if f.MaxConflicts == 0 {
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -1644,11 +1669,11 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
// matter, go ahead as if the move succeeded.
|
||||
err = nil
|
||||
}
|
||||
if f.maxConflicts > -1 {
|
||||
if f.MaxConflicts > -1 {
|
||||
matches, gerr := osutil.Glob(withoutExt + ".sync-conflict-????????-??????" + ext)
|
||||
if gerr == nil && len(matches) > f.maxConflicts {
|
||||
if gerr == nil && len(matches) > f.MaxConflicts {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
|
||||
for _, match := range matches[f.maxConflicts:] {
|
||||
for _, match := range matches[f.MaxConflicts:] {
|
||||
gerr = os.Remove(match)
|
||||
if gerr != nil {
|
||||
l.Debugln(f, "removing extra conflict", gerr)
|
||||
@@ -1661,7 +1686,7 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *rwFolder) newError(path string, err error) {
|
||||
func (f *sendReceiveFolder) newError(path string, err error) {
|
||||
f.errorsMut.Lock()
|
||||
defer f.errorsMut.Unlock()
|
||||
|
||||
@@ -1675,13 +1700,13 @@ func (f *rwFolder) newError(path string, err error) {
|
||||
f.errors[path] = err.Error()
|
||||
}
|
||||
|
||||
func (f *rwFolder) clearErrors() {
|
||||
func (f *sendReceiveFolder) clearErrors() {
|
||||
f.errorsMut.Lock()
|
||||
f.errors = make(map[string]string)
|
||||
f.errorsMut.Unlock()
|
||||
}
|
||||
|
||||
func (f *rwFolder) currentErrors() []fileError {
|
||||
func (f *sendReceiveFolder) currentErrors() []fileError {
|
||||
f.errorsMut.Lock()
|
||||
errors := make([]fileError, 0, len(f.errors))
|
||||
for path, err := range f.errors {
|
||||
|
||||
@@ -7,12 +7,15 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -66,12 +69,14 @@ func setUpModel(file protocol.FileInfo) *Model {
|
||||
return model
|
||||
}
|
||||
|
||||
func setUpRwFolder(model *Model) rwFolder {
|
||||
return rwFolder{
|
||||
func setUpSendReceiveFolder(model *Model) sendReceiveFolder {
|
||||
return sendReceiveFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker("default"),
|
||||
model: model,
|
||||
},
|
||||
|
||||
mtimeFS: fs.NewMtimeFS(db.NewNamespacedKV(model.db, "mtime")),
|
||||
dir: "testdata",
|
||||
queue: newJobQueue(),
|
||||
errors: make(map[string]string),
|
||||
@@ -95,7 +100,7 @@ func TestHandleFile(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState, 1)
|
||||
|
||||
f.handleFile(requiredFile, copyChan, nil)
|
||||
@@ -136,7 +141,7 @@ func TestHandleFileWithTemp(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState, 1)
|
||||
|
||||
f.handleFile(requiredFile, copyChan, nil)
|
||||
@@ -184,7 +189,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
requiredFile.Name = "file2"
|
||||
|
||||
m := setUpModel(existingFile)
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, 4)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
@@ -199,7 +204,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
|
||||
select {
|
||||
case <-pullChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
t.Fatal("Pull channel has data to be read")
|
||||
case <-finisherChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
default:
|
||||
@@ -240,6 +245,133 @@ func TestCopierFinder(t *testing.T) {
|
||||
os.Remove(tempFile)
|
||||
}
|
||||
|
||||
func TestWeakHash(t *testing.T) {
|
||||
tempFile := filepath.Join("testdata", defTempNamer.TempName("weakhash"))
|
||||
var shift int64 = 10
|
||||
var size int64 = 1 << 20
|
||||
expectBlocks := int(size / protocol.BlockSize)
|
||||
expectPulls := int(shift / protocol.BlockSize)
|
||||
if shift > 0 {
|
||||
expectPulls++
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
for _, path := range []string{tempFile, "testdata/weakhash"} {
|
||||
os.Remove(path)
|
||||
}
|
||||
}
|
||||
|
||||
cleanup()
|
||||
defer cleanup()
|
||||
|
||||
f, err := os.Create("testdata/weakhash")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.CopyN(f, rand.Reader, size)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Create two files, second file has `shifted` bytes random prefix, yet
|
||||
// both are of the same length, for example:
|
||||
// File 1: abcdefgh
|
||||
// File 2: xyabcdef
|
||||
f.Seek(0, os.SEEK_SET)
|
||||
existing, err := scanner.Blocks(f, protocol.BlockSize, size, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
f.Seek(0, os.SEEK_SET)
|
||||
remainder := io.LimitReader(f, size-shift)
|
||||
prefix := io.LimitReader(rand.Reader, shift)
|
||||
nf := io.MultiReader(prefix, remainder)
|
||||
desired, err := scanner.Blocks(nf, protocol.BlockSize, size, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
existingFile := protocol.FileInfo{
|
||||
Name: "weakhash",
|
||||
Blocks: existing,
|
||||
Size: size,
|
||||
ModifiedS: info.ModTime().Unix(),
|
||||
ModifiedNs: int32(info.ModTime().Nanosecond()),
|
||||
}
|
||||
desiredFile := protocol.FileInfo{
|
||||
Name: "weakhash",
|
||||
Size: size,
|
||||
Blocks: desired,
|
||||
ModifiedS: info.ModTime().Unix() + 1,
|
||||
}
|
||||
|
||||
// Setup the model/pull environment
|
||||
m := setUpModel(existingFile)
|
||||
fo := setUpSendReceiveFolder(m)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, expectBlocks)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single fetcher routine
|
||||
go fo.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
|
||||
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
|
||||
fo.DisableWeakHash = true
|
||||
fo.handleFile(desiredFile, copyChan, finisherChan)
|
||||
|
||||
var pulls []pullBlockState
|
||||
for len(pulls) < expectBlocks {
|
||||
select {
|
||||
case pull := <-pullChan:
|
||||
pulls = append(pulls, pull)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
|
||||
}
|
||||
}
|
||||
finish := <-finisherChan
|
||||
|
||||
select {
|
||||
case <-pullChan:
|
||||
t.Fatal("Pull channel has data to be read")
|
||||
case <-finisherChan:
|
||||
t.Fatal("Finisher channel has data to be read")
|
||||
default:
|
||||
}
|
||||
|
||||
finish.fd.Close()
|
||||
if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Test 2 - using weak hash, expectPulls blocks pulled.
|
||||
fo.DisableWeakHash = false
|
||||
fo.handleFile(desiredFile, copyChan, finisherChan)
|
||||
|
||||
pulls = pulls[:0]
|
||||
for len(pulls) < expectPulls {
|
||||
select {
|
||||
case pull := <-pullChan:
|
||||
pulls = append(pulls, pull)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
|
||||
}
|
||||
}
|
||||
|
||||
finish = <-finisherChan
|
||||
finish.fd.Close()
|
||||
|
||||
expectShifted := expectBlocks - expectPulls
|
||||
if finish.copyOriginShifted != expectShifted {
|
||||
t.Errorf("did not copy %d shifted", expectShifted)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that updating a file removes it's old blocks from the blockmap
|
||||
func TestCopierCleanup(t *testing.T) {
|
||||
iterFn := func(folder, file string, index int32) bool {
|
||||
@@ -293,7 +425,7 @@ func TestLastResortPulling(t *testing.T) {
|
||||
return true
|
||||
}
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, 1)
|
||||
@@ -331,7 +463,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
f.queue.Push("filex", 0, time.Time{})
|
||||
@@ -404,7 +536,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
f := setUpRwFolder(m)
|
||||
f := setUpSendReceiveFolder(m)
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
f.queue.Push("filex", 0, time.Time{})
|
||||
|
||||
@@ -31,30 +31,32 @@ type sharedPullerState struct {
|
||||
created time.Time
|
||||
|
||||
// Mutable, must be locked for access
|
||||
err error // The first error we hit
|
||||
fd *os.File // The fd of the temp file
|
||||
copyTotal int // Total number of copy actions for the whole job
|
||||
pullTotal int // Total number of pull actions for the whole job
|
||||
copyOrigin int // Number of blocks copied from the original file
|
||||
copyNeeded int // Number of copy actions still pending
|
||||
pullNeeded int // Number of block pulls still pending
|
||||
updated time.Time // Time when any of the counters above were last updated
|
||||
closed bool // True if the file has been finalClosed.
|
||||
available []int32 // Indexes of the blocks that are available in the temporary file
|
||||
availableUpdated time.Time // Time when list of available blocks was last updated
|
||||
mut sync.RWMutex // Protects the above
|
||||
err error // The first error we hit
|
||||
fd *os.File // The fd of the temp file
|
||||
copyTotal int // Total number of copy actions for the whole job
|
||||
pullTotal int // Total number of pull actions for the whole job
|
||||
copyOrigin int // Number of blocks copied from the original file
|
||||
copyOriginShifted int // Number of blocks copied from the original file but shifted
|
||||
copyNeeded int // Number of copy actions still pending
|
||||
pullNeeded int // Number of block pulls still pending
|
||||
updated time.Time // Time when any of the counters above were last updated
|
||||
closed bool // True if the file has been finalClosed.
|
||||
available []int32 // Indexes of the blocks that are available in the temporary file
|
||||
availableUpdated time.Time // Time when list of available blocks was last updated
|
||||
mut sync.RWMutex // Protects the above
|
||||
}
|
||||
|
||||
// A momentary state representing the progress of the puller
|
||||
type pullerProgress struct {
|
||||
Total int `json:"total"`
|
||||
Reused int `json:"reused"`
|
||||
CopiedFromOrigin int `json:"copiedFromOrigin"`
|
||||
CopiedFromElsewhere int `json:"copiedFromElsewhere"`
|
||||
Pulled int `json:"pulled"`
|
||||
Pulling int `json:"pulling"`
|
||||
BytesDone int64 `json:"bytesDone"`
|
||||
BytesTotal int64 `json:"bytesTotal"`
|
||||
Total int `json:"total"`
|
||||
Reused int `json:"reused"`
|
||||
CopiedFromOrigin int `json:"copiedFromOrigin"`
|
||||
CopiedFromOriginShifted int `json:"copiedFromOriginShifted"`
|
||||
CopiedFromElsewhere int `json:"copiedFromElsewhere"`
|
||||
Pulled int `json:"pulled"`
|
||||
Pulling int `json:"pulling"`
|
||||
BytesDone int64 `json:"bytesDone"`
|
||||
BytesTotal int64 `json:"bytesTotal"`
|
||||
}
|
||||
|
||||
// A lockedWriterAt synchronizes WriteAt calls with an external mutex.
|
||||
@@ -241,6 +243,14 @@ func (s *sharedPullerState) copiedFromOrigin() {
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) copiedFromOriginShifted() {
|
||||
s.mut.Lock()
|
||||
s.copyOrigin++
|
||||
s.copyOriginShifted++
|
||||
s.updated = time.Now()
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *sharedPullerState) pullStarted() {
|
||||
s.mut.Lock()
|
||||
s.copyTotal--
|
||||
|
||||
@@ -37,6 +37,7 @@ func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
|
||||
|
||||
go func() {
|
||||
mut.Lock()
|
||||
_ = 1 // empty critical section
|
||||
mut.Unlock()
|
||||
ok <- true
|
||||
}()
|
||||
|
||||
@@ -22,10 +22,7 @@ func SyncFile(path string) error {
|
||||
}
|
||||
defer fd.Close()
|
||||
// MacOS and Windows do not flush the disk cache
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fd.Sync()
|
||||
}
|
||||
|
||||
func SyncDir(path string) error {
|
||||
|
||||
@@ -246,6 +246,7 @@ type Folder struct {
|
||||
IgnorePermissions bool `protobuf:"varint,4,opt,name=ignore_permissions,json=ignorePermissions,proto3" json:"ignore_permissions,omitempty"`
|
||||
IgnoreDelete bool `protobuf:"varint,5,opt,name=ignore_delete,json=ignoreDelete,proto3" json:"ignore_delete,omitempty"`
|
||||
DisableTempIndexes bool `protobuf:"varint,6,opt,name=disable_temp_indexes,json=disableTempIndexes,proto3" json:"disable_temp_indexes,omitempty"`
|
||||
Paused bool `protobuf:"varint,7,opt,name=paused,proto3" json:"paused,omitempty"`
|
||||
Devices []Device `protobuf:"bytes,16,rep,name=devices" json:"devices"`
|
||||
}
|
||||
|
||||
@@ -298,12 +299,13 @@ type FileInfo struct {
|
||||
Permissions uint32 `protobuf:"varint,4,opt,name=permissions,proto3" json:"permissions,omitempty"`
|
||||
ModifiedS int64 `protobuf:"varint,5,opt,name=modified_s,json=modifiedS,proto3" json:"modified_s,omitempty"`
|
||||
ModifiedNs int32 `protobuf:"varint,11,opt,name=modified_ns,json=modifiedNs,proto3" json:"modified_ns,omitempty"`
|
||||
ModifiedBy ShortID `protobuf:"varint,12,opt,name=modified_by,json=modifiedBy,proto3,customtype=ShortID" json:"modified_by"`
|
||||
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
|
||||
Invalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid,omitempty"`
|
||||
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"no_permissions,omitempty"`
|
||||
Version Vector `protobuf:"bytes,9,opt,name=version" json:"version"`
|
||||
Sequence int64 `protobuf:"varint,10,opt,name=sequence,proto3" json:"sequence,omitempty"`
|
||||
Blocks []BlockInfo `protobuf:"bytes,16,rep,name=Blocks" json:"Blocks"`
|
||||
Blocks []BlockInfo `protobuf:"bytes,16,rep,name=Blocks,json=blocks" json:"Blocks"`
|
||||
SymlinkTarget string `protobuf:"bytes,17,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"`
|
||||
}
|
||||
|
||||
@@ -312,9 +314,10 @@ func (*FileInfo) ProtoMessage() {}
|
||||
func (*FileInfo) Descriptor() ([]byte, []int) { return fileDescriptorBep, []int{7} }
|
||||
|
||||
type BlockInfo struct {
|
||||
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
|
||||
Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"`
|
||||
WeakHash uint32 `protobuf:"varint,4,opt,name=weak_hash,json=weakHash,proto3" json:"weak_hash,omitempty"`
|
||||
}
|
||||
|
||||
func (m *BlockInfo) Reset() { *m = BlockInfo{} }
|
||||
@@ -380,7 +383,7 @@ type FileDownloadProgressUpdate struct {
|
||||
UpdateType FileDownloadProgressUpdateType `protobuf:"varint,1,opt,name=update_type,json=updateType,proto3,enum=protocol.FileDownloadProgressUpdateType" json:"update_type,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Version Vector `protobuf:"bytes,3,opt,name=version" json:"version"`
|
||||
BlockIndexes []int32 `protobuf:"varint,4,rep,name=block_indexes,json=blockIndexes" json:"block_indexes,omitempty"`
|
||||
BlockIndexes []int32 `protobuf:"varint,4,rep,packed,name=block_indexes,json=blockIndexes" json:"block_indexes,omitempty"`
|
||||
}
|
||||
|
||||
func (m *FileDownloadProgressUpdate) Reset() { *m = FileDownloadProgressUpdate{} }
|
||||
@@ -591,6 +594,16 @@ func (m *Folder) MarshalTo(data []byte) (int, error) {
|
||||
}
|
||||
i++
|
||||
}
|
||||
if m.Paused {
|
||||
data[i] = 0x38
|
||||
i++
|
||||
if m.Paused {
|
||||
data[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(m.Devices) > 0 {
|
||||
for _, msg := range m.Devices {
|
||||
data[i] = 0x82
|
||||
@@ -857,6 +870,11 @@ func (m *FileInfo) MarshalTo(data []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintBep(data, i, uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
data[i] = 0x60
|
||||
i++
|
||||
i = encodeVarintBep(data, i, uint64(m.ModifiedBy))
|
||||
}
|
||||
if len(m.Blocks) > 0 {
|
||||
for _, msg := range m.Blocks {
|
||||
data[i] = 0x82
|
||||
@@ -913,6 +931,11 @@ func (m *BlockInfo) MarshalTo(data []byte) (int, error) {
|
||||
i = encodeVarintBep(data, i, uint64(len(m.Hash)))
|
||||
i += copy(data[i:], m.Hash)
|
||||
}
|
||||
if m.WeakHash != 0 {
|
||||
data[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintBep(data, i, uint64(m.WeakHash))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -1140,11 +1163,22 @@ func (m *FileDownloadProgressUpdate) MarshalTo(data []byte) (int, error) {
|
||||
}
|
||||
i += n3
|
||||
if len(m.BlockIndexes) > 0 {
|
||||
for _, num := range m.BlockIndexes {
|
||||
data[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintBep(data, i, uint64(num))
|
||||
data5 := make([]byte, len(m.BlockIndexes)*10)
|
||||
var j4 int
|
||||
for _, num1 := range m.BlockIndexes {
|
||||
num := uint64(num1)
|
||||
for num >= 1<<7 {
|
||||
data5[j4] = uint8(uint64(num)&0x7f | 0x80)
|
||||
num >>= 7
|
||||
j4++
|
||||
}
|
||||
data5[j4] = uint8(num)
|
||||
j4++
|
||||
}
|
||||
data[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintBep(data, i, uint64(j4))
|
||||
i += copy(data[i:], data5[:j4])
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
@@ -1283,6 +1317,9 @@ func (m *Folder) ProtoSize() (n int) {
|
||||
if m.DisableTempIndexes {
|
||||
n += 2
|
||||
}
|
||||
if m.Paused {
|
||||
n += 2
|
||||
}
|
||||
if len(m.Devices) > 0 {
|
||||
for _, e := range m.Devices {
|
||||
l = e.ProtoSize()
|
||||
@@ -1397,6 +1434,9 @@ func (m *FileInfo) ProtoSize() (n int) {
|
||||
if m.ModifiedNs != 0 {
|
||||
n += 1 + sovBep(uint64(m.ModifiedNs))
|
||||
}
|
||||
if m.ModifiedBy != 0 {
|
||||
n += 1 + sovBep(uint64(m.ModifiedBy))
|
||||
}
|
||||
if len(m.Blocks) > 0 {
|
||||
for _, e := range m.Blocks {
|
||||
l = e.ProtoSize()
|
||||
@@ -1423,6 +1463,9 @@ func (m *BlockInfo) ProtoSize() (n int) {
|
||||
if l > 0 {
|
||||
n += 1 + l + sovBep(uint64(l))
|
||||
}
|
||||
if m.WeakHash != 0 {
|
||||
n += 1 + sovBep(uint64(m.WeakHash))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -1525,9 +1568,11 @@ func (m *FileDownloadProgressUpdate) ProtoSize() (n int) {
|
||||
l = m.Version.ProtoSize()
|
||||
n += 1 + l + sovBep(uint64(l))
|
||||
if len(m.BlockIndexes) > 0 {
|
||||
l = 0
|
||||
for _, e := range m.BlockIndexes {
|
||||
n += 1 + sovBep(uint64(e))
|
||||
l += sovBep(uint64(e))
|
||||
}
|
||||
n += 1 + sovBep(uint64(l)) + l
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -2034,6 +2079,26 @@ func (m *Folder) Unmarshal(data []byte) error {
|
||||
}
|
||||
}
|
||||
m.DisableTempIndexes = bool(v != 0)
|
||||
case 7:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Paused = bool(v != 0)
|
||||
case 16:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
|
||||
@@ -2832,6 +2897,25 @@ func (m *FileInfo) Unmarshal(data []byte) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 12:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ModifiedBy", wireType)
|
||||
}
|
||||
m.ModifiedBy = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
m.ModifiedBy |= (ShortID(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 16:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType)
|
||||
@@ -3011,6 +3095,25 @@ func (m *BlockInfo) Unmarshal(data []byte) error {
|
||||
m.Hash = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field WeakHash", wireType)
|
||||
}
|
||||
m.WeakHash = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
m.WeakHash |= (uint32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipBep(data[iNdEx:])
|
||||
@@ -3754,25 +3857,67 @@ func (m *FileDownloadProgressUpdate) Unmarshal(data []byte) error {
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BlockIndexes", wireType)
|
||||
}
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
if wireType == 2 {
|
||||
var packedLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
packedLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
if packedLen < 0 {
|
||||
return ErrInvalidLengthBep
|
||||
}
|
||||
postIndex := iNdEx + packedLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
for iNdEx < postIndex {
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.BlockIndexes = append(m.BlockIndexes, v)
|
||||
}
|
||||
} else if wireType == 0 {
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.BlockIndexes = append(m.BlockIndexes, v)
|
||||
} else {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BlockIndexes", wireType)
|
||||
}
|
||||
m.BlockIndexes = append(m.BlockIndexes, v)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipBep(data[iNdEx:])
|
||||
@@ -4029,108 +4174,110 @@ var (
|
||||
)
|
||||
|
||||
var fileDescriptorBep = []byte{
|
||||
// 1645 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x73, 0xdb, 0x5a,
|
||||
0x15, 0x8f, 0x6d, 0xf9, 0xdf, 0xb5, 0x93, 0xe7, 0xdc, 0xa6, 0x79, 0x46, 0xcd, 0x4b, 0x82, 0xde,
|
||||
0x2b, 0x04, 0x0f, 0x4d, 0xa1, 0x05, 0x3a, 0xc3, 0x00, 0x33, 0x8e, 0xad, 0xa4, 0x9a, 0x3a, 0xb2,
|
||||
0x2b, 0xdb, 0x29, 0x65, 0x81, 0x46, 0xb6, 0xae, 0x1d, 0x4d, 0x64, 0x5d, 0x23, 0xc9, 0x6d, 0xc3,
|
||||
0x47, 0x80, 0x2f, 0xc0, 0x86, 0x99, 0x6e, 0xd9, 0xf3, 0x21, 0xca, 0xae, 0xd3, 0x25, 0x8b, 0x0e,
|
||||
0x94, 0x0d, 0x4b, 0x36, 0xec, 0x39, 0xf7, 0x5e, 0x49, 0x96, 0xf3, 0x87, 0xe9, 0xe2, 0x2d, 0x32,
|
||||
0xd6, 0x3d, 0xe7, 0x77, 0xcf, 0xb9, 0xe7, 0x77, 0xfe, 0xdc, 0x1b, 0x54, 0x1e, 0x91, 0xf9, 0xe1,
|
||||
0xdc, 0xa7, 0x21, 0xc5, 0x25, 0xfe, 0x33, 0xa6, 0xae, 0xfc, 0x60, 0xea, 0x84, 0xe7, 0x8b, 0xd1,
|
||||
0xe1, 0x98, 0xce, 0x1e, 0x4e, 0xe9, 0x94, 0x3e, 0xe4, 0x9a, 0xd1, 0x62, 0xc2, 0x57, 0x7c, 0xc1,
|
||||
0xbf, 0xc4, 0x46, 0x65, 0x8e, 0xf2, 0x4f, 0x89, 0xeb, 0x52, 0xbc, 0x87, 0x2a, 0x36, 0x79, 0xe5,
|
||||
0x8c, 0x89, 0xe9, 0x59, 0x33, 0x52, 0xcf, 0xec, 0x67, 0x0e, 0xca, 0x06, 0x12, 0x22, 0x1d, 0x24,
|
||||
0x0c, 0x30, 0x76, 0x1d, 0xe2, 0x85, 0x02, 0x90, 0x15, 0x00, 0x21, 0xe2, 0x80, 0xfb, 0x68, 0x23,
|
||||
0x02, 0xbc, 0x22, 0x7e, 0xe0, 0x50, 0xaf, 0x9e, 0xe3, 0x98, 0x75, 0x21, 0x3d, 0x13, 0x42, 0x25,
|
||||
0x40, 0x85, 0xa7, 0xc4, 0xb2, 0x89, 0x8f, 0x7f, 0x80, 0xa4, 0xf0, 0x72, 0x2e, 0x7c, 0x6d, 0x3c,
|
||||
0xba, 0x7b, 0x18, 0xc7, 0x70, 0x78, 0x4a, 0x82, 0xc0, 0x9a, 0x92, 0x01, 0x28, 0x0d, 0x0e, 0xc1,
|
||||
0xbf, 0x02, 0xe7, 0x74, 0x36, 0xf7, 0x41, 0xc1, 0x0c, 0x67, 0xf9, 0x8e, 0x9d, 0x6b, 0x3b, 0x5a,
|
||||
0x4b, 0x8c, 0x91, 0xde, 0xa0, 0x34, 0xd1, 0x7a, 0xcb, 0x5d, 0x04, 0x21, 0xf1, 0x5b, 0xd4, 0x9b,
|
||||
0x38, 0x53, 0xfc, 0x23, 0x54, 0x9c, 0x50, 0x17, 0x4e, 0x11, 0x80, 0xfb, 0xdc, 0x41, 0xe5, 0x51,
|
||||
0x6d, 0x69, 0xec, 0x98, 0x2b, 0x8e, 0xa4, 0x77, 0x1f, 0xf7, 0xd6, 0x8c, 0x18, 0xa6, 0xfc, 0x31,
|
||||
0x8b, 0x0a, 0x42, 0x83, 0xb7, 0x51, 0xd6, 0xb1, 0x05, 0x45, 0x47, 0x85, 0x4f, 0x1f, 0xf7, 0xb2,
|
||||
0x5a, 0xdb, 0x00, 0x09, 0xde, 0x42, 0x79, 0xd7, 0x1a, 0x11, 0x37, 0x22, 0x47, 0x2c, 0xf0, 0x3d,
|
||||
0x54, 0xf6, 0x21, 0x60, 0x93, 0x7a, 0xee, 0x25, 0xa7, 0xa4, 0x64, 0x94, 0x98, 0xa0, 0x0b, 0x6b,
|
||||
0xfc, 0x00, 0x61, 0x67, 0xea, 0x51, 0x9f, 0x98, 0x73, 0xe2, 0xcf, 0x1c, 0x7e, 0xda, 0xa0, 0x2e,
|
||||
0x71, 0xd4, 0xa6, 0xd0, 0xf4, 0x96, 0x0a, 0xfc, 0x35, 0x5a, 0x8f, 0xe0, 0x36, 0x71, 0x49, 0x48,
|
||||
0xea, 0x79, 0x8e, 0xac, 0x0a, 0x61, 0x9b, 0xcb, 0x20, 0xb6, 0x2d, 0xdb, 0x09, 0xac, 0x91, 0x4b,
|
||||
0xcc, 0x90, 0xcc, 0xe6, 0xa6, 0xe3, 0xd9, 0xe4, 0x0d, 0x09, 0xea, 0x05, 0x8e, 0xc5, 0x91, 0x6e,
|
||||
0x00, 0x2a, 0x4d, 0x68, 0x18, 0x1b, 0x22, 0xd3, 0x41, 0xbd, 0x76, 0x95, 0x8d, 0x36, 0x57, 0xc4,
|
||||
0x6c, 0x44, 0x30, 0xe5, 0x3f, 0xc0, 0x86, 0xd0, 0xe0, 0xef, 0x25, 0x6c, 0x54, 0x8f, 0xb6, 0x19,
|
||||
0xea, 0xef, 0x1f, 0xf7, 0x4a, 0x42, 0xa7, 0xb5, 0x53, 0xec, 0x60, 0x24, 0xa5, 0x2a, 0x87, 0x7f,
|
||||
0xe3, 0x1d, 0x54, 0xb6, 0x6c, 0x9b, 0x65, 0x09, 0x5c, 0xe7, 0xc0, 0x75, 0xd9, 0x58, 0x0a, 0xf0,
|
||||
0x93, 0xd5, 0xac, 0x4b, 0x57, 0xeb, 0xe4, 0xb6, 0x74, 0x33, 0xca, 0xc7, 0xc4, 0x8f, 0x2a, 0x35,
|
||||
0xcf, 0xfd, 0x95, 0x98, 0x80, 0xd7, 0xe9, 0x77, 0x51, 0x75, 0x66, 0xbd, 0x31, 0x03, 0xf2, 0xbb,
|
||||
0x05, 0xf1, 0xc6, 0x84, 0xd3, 0x92, 0x33, 0x2a, 0x20, 0xeb, 0x47, 0x22, 0xbc, 0x8b, 0x90, 0xe3,
|
||||
0x85, 0x3e, 0xb5, 0x17, 0xb0, 0xab, 0x5e, 0xe4, 0xbc, 0xa5, 0x24, 0xf8, 0xa7, 0xa8, 0xc4, 0x49,
|
||||
0x35, 0x21, 0xf0, 0x12, 0x68, 0xa5, 0x23, 0x39, 0x0a, 0xbc, 0xc8, 0x29, 0xe5, 0x71, 0xc7, 0x9f,
|
||||
0x46, 0x91, 0x63, 0x35, 0x1b, 0xff, 0x02, 0xc9, 0xc1, 0x85, 0xc3, 0x12, 0x22, 0x2c, 0x85, 0x70,
|
||||
0x56, 0xd3, 0x27, 0x33, 0xfa, 0xca, 0x72, 0x83, 0x7a, 0x99, 0xbb, 0xa9, 0x33, 0x84, 0x96, 0x02,
|
||||
0x18, 0x91, 0x5e, 0xe9, 0xa2, 0x3c, 0xb7, 0x08, 0xe5, 0x57, 0x10, 0x45, 0x19, 0x75, 0x69, 0xb4,
|
||||
0xc2, 0x87, 0x28, 0x3f, 0x71, 0x5c, 0x20, 0x32, 0xcb, 0x73, 0x88, 0x53, 0x15, 0x0d, 0x62, 0xcd,
|
||||
0x9b, 0xd0, 0x28, 0x8b, 0x02, 0xa6, 0x0c, 0x51, 0x85, 0x1b, 0x1c, 0xce, 0x6d, 0x0b, 0xca, 0xe6,
|
||||
0xdb, 0x32, 0xfb, 0xb7, 0x1c, 0x2a, 0xc5, 0x9a, 0x24, 0xe9, 0x99, 0x54, 0xd2, 0x1b, 0x51, 0xdf,
|
||||
0x8b, 0x2e, 0xde, 0xbe, 0x6e, 0x2f, 0xd5, 0xf8, 0xb0, 0x3f, 0x70, 0x7e, 0x4f, 0x78, 0xdf, 0xe4,
|
||||
0x0c, 0xfe, 0x8d, 0xf7, 0x51, 0xe5, 0x6a, 0xb3, 0xac, 0x1b, 0x69, 0x11, 0xfe, 0x0a, 0xa1, 0x19,
|
||||
0xb5, 0x9d, 0x89, 0x43, 0x6c, 0x33, 0xe0, 0x05, 0x90, 0x33, 0xca, 0xb1, 0xa4, 0x8f, 0xeb, 0xac,
|
||||
0xdc, 0x59, 0xab, 0xd8, 0x51, 0x4f, 0xc4, 0x4b, 0xa6, 0x71, 0x3c, 0x60, 0x1b, 0xf2, 0x2a, 0xb2,
|
||||
0x1e, 0x2f, 0xd9, 0x74, 0xf3, 0xe8, 0x4a, 0x93, 0x96, 0x38, 0x60, 0xdd, 0xa3, 0xe9, 0x06, 0x85,
|
||||
0x4e, 0x8a, 0xa7, 0x1f, 0xcb, 0xe7, 0x4a, 0x27, 0x9d, 0x91, 0x71, 0x48, 0x93, 0xb9, 0x12, 0xc1,
|
||||
0xb0, 0x8c, 0x4a, 0x49, 0x29, 0x22, 0x7e, 0xd2, 0x64, 0xcd, 0x66, 0x6e, 0x12, 0x07, 0x78, 0xac,
|
||||
0x80, 0x3a, 0x6f, 0x24, 0xa1, 0xe9, 0x01, 0xfe, 0x31, 0x2a, 0x1c, 0xb9, 0x74, 0x7c, 0x11, 0xf7,
|
||||
0xed, 0x9d, 0xa5, 0x37, 0x2e, 0x4f, 0x65, 0x27, 0x02, 0xb2, 0x40, 0x82, 0xcb, 0x99, 0xeb, 0x78,
|
||||
0x17, 0x66, 0x68, 0xf9, 0x53, 0x12, 0xd6, 0x37, 0xc5, 0x98, 0x8e, 0xa4, 0x03, 0x2e, 0xfc, 0xb9,
|
||||
0xf4, 0xa7, 0xb7, 0x7b, 0x6b, 0xca, 0x73, 0x54, 0x4e, 0xec, 0xb0, 0x02, 0xa1, 0x93, 0x49, 0x00,
|
||||
0x3b, 0x32, 0xfc, 0x9c, 0xd1, 0x2a, 0xc9, 0x51, 0x96, 0x1f, 0x4f, 0xe4, 0x08, 0x64, 0xe7, 0x56,
|
||||
0x70, 0xce, 0xf3, 0x56, 0x35, 0xf8, 0x77, 0x64, 0xf2, 0x97, 0xa8, 0x20, 0x88, 0xc0, 0x8f, 0x51,
|
||||
0x69, 0x4c, 0x17, 0x5e, 0xb8, 0x1c, 0xc2, 0x9b, 0xe9, 0xde, 0xe6, 0x9a, 0xe8, 0xf0, 0x09, 0x50,
|
||||
0x39, 0x46, 0xc5, 0x48, 0x05, 0x91, 0xc4, 0x83, 0x47, 0x3a, 0xba, 0x1b, 0xf7, 0x5f, 0xff, 0x9c,
|
||||
0xfa, 0xe1, 0xca, 0xdc, 0x81, 0xa9, 0x0c, 0x29, 0x5c, 0x88, 0xf3, 0x49, 0x86, 0x58, 0x28, 0x7f,
|
||||
0xcd, 0xa0, 0xa2, 0xc1, 0x78, 0x0e, 0xc2, 0xd4, 0x3c, 0xcf, 0xaf, 0xcc, 0xf3, 0x65, 0x47, 0x64,
|
||||
0x57, 0x3a, 0x22, 0x2e, 0xea, 0x5c, 0xaa, 0xa8, 0x97, 0xe4, 0x48, 0x37, 0x92, 0x93, 0xbf, 0x81,
|
||||
0x9c, 0xc2, 0x92, 0x1c, 0x96, 0x96, 0x89, 0x4f, 0x67, 0x7c, 0x62, 0x53, 0xdf, 0xf2, 0x2f, 0xa3,
|
||||
0x02, 0x5c, 0x67, 0xd2, 0x41, 0x2c, 0x54, 0x4c, 0x54, 0x32, 0x48, 0x30, 0x87, 0x52, 0x23, 0xb7,
|
||||
0x1e, 0x1b, 0xcc, 0x43, 0x43, 0x5b, 0xfc, 0xd0, 0x60, 0x9e, 0x7d, 0xe3, 0xef, 0x23, 0x69, 0x4c,
|
||||
0x6d, 0x71, 0xe4, 0x8d, 0x74, 0x99, 0xa8, 0xbe, 0x4f, 0xe1, 0x52, 0xb4, 0xa1, 0xe1, 0x18, 0x00,
|
||||
0x1e, 0x04, 0xb5, 0x36, 0x7d, 0xed, 0xb9, 0xd4, 0xb2, 0x7b, 0x3e, 0x9d, 0xb2, 0x89, 0x7a, 0xeb,
|
||||
0x64, 0x68, 0xa3, 0xe2, 0x82, 0xcf, 0x8e, 0x78, 0x36, 0x7c, 0xb3, 0xda, 0xcb, 0x57, 0x0d, 0x89,
|
||||
0x41, 0x13, 0x37, 0x40, 0xb4, 0x55, 0xf9, 0x90, 0x41, 0xf2, 0xed, 0x68, 0xac, 0xa1, 0x8a, 0x40,
|
||||
0x9a, 0xa9, 0xc7, 0xc2, 0xc1, 0xe7, 0x38, 0xe2, 0x63, 0x04, 0x2d, 0x92, 0xef, 0x1b, 0x6f, 0xa0,
|
||||
0x54, 0xc3, 0xe6, 0x3e, 0xaf, 0x61, 0xe1, 0x0e, 0x1e, 0xb1, 0x9e, 0x48, 0xee, 0x55, 0x09, 0x62,
|
||||
0xcf, 0x1b, 0xd5, 0x91, 0x68, 0x14, 0x2e, 0x53, 0x0a, 0x48, 0xea, 0x39, 0xde, 0x54, 0xd9, 0x43,
|
||||
0xf9, 0x96, 0x4b, 0x79, 0xb2, 0x0a, 0x70, 0xe9, 0x07, 0xe0, 0x26, 0xe2, 0x50, 0xac, 0x1a, 0x1f,
|
||||
0xb2, 0xa8, 0x92, 0x7a, 0xef, 0xc0, 0x79, 0x36, 0x5a, 0x9d, 0x61, 0x7f, 0xa0, 0x1a, 0x66, 0xab,
|
||||
0xab, 0x1f, 0x6b, 0x27, 0xb5, 0x35, 0x79, 0xe7, 0x0f, 0x7f, 0xde, 0xaf, 0xcf, 0x96, 0xa0, 0xd5,
|
||||
0xa7, 0x0c, 0xb8, 0xd0, 0xf4, 0xb6, 0xfa, 0xeb, 0x5a, 0x46, 0xde, 0x02, 0x60, 0x2d, 0x05, 0x14,
|
||||
0xf7, 0xc5, 0x0f, 0x51, 0x95, 0x03, 0xcc, 0x61, 0xaf, 0xdd, 0x1c, 0xa8, 0xb5, 0xac, 0x2c, 0x03,
|
||||
0x6e, 0xfb, 0x2a, 0x2e, 0xe2, 0xfb, 0x6b, 0xe8, 0x0b, 0xf5, 0xf9, 0x50, 0xed, 0x0f, 0x6a, 0x39,
|
||||
0x79, 0x1b, 0x80, 0x38, 0x05, 0x8c, 0x3b, 0xe6, 0x3e, 0x94, 0xa1, 0xda, 0xef, 0x75, 0xf5, 0xbe,
|
||||
0x5a, 0x93, 0xe4, 0x2f, 0x01, 0x75, 0x67, 0x05, 0x15, 0x55, 0xe8, 0xcf, 0xd0, 0x66, 0xbb, 0xfb,
|
||||
0x42, 0xef, 0x74, 0x9b, 0x6d, 0xb3, 0x67, 0x74, 0x4f, 0x60, 0x4f, 0xbf, 0x96, 0x97, 0xf7, 0x00,
|
||||
0x7f, 0x2f, 0x85, 0xbf, 0x56, 0x70, 0x5f, 0x01, 0x7b, 0x9a, 0x7e, 0x52, 0x2b, 0xc8, 0x77, 0x00,
|
||||
0xfa, 0x45, 0x0a, 0xca, 0x48, 0x65, 0x11, 0xb7, 0x3a, 0x5d, 0x70, 0x5d, 0xbc, 0x16, 0x31, 0x27,
|
||||
0xbb, 0xf1, 0x5b, 0x84, 0xaf, 0xbf, 0x08, 0xf1, 0x37, 0x48, 0xd2, 0xbb, 0xba, 0x0a, 0x84, 0xf2,
|
||||
0xf8, 0xaf, 0x23, 0x74, 0xea, 0x11, 0xac, 0xa0, 0x5c, 0xe7, 0x37, 0x3f, 0x01, 0x32, 0xbf, 0x03,
|
||||
0xa0, 0xbb, 0xd7, 0x41, 0xa0, 0x6c, 0x50, 0x54, 0x49, 0x1b, 0x56, 0x50, 0xe9, 0x54, 0x1d, 0x34,
|
||||
0x81, 0xdc, 0x26, 0x18, 0xe7, 0x47, 0x8a, 0xd5, 0xa7, 0x24, 0xb4, 0x78, 0x03, 0xee, 0xa0, 0xbc,
|
||||
0xae, 0x9e, 0xa9, 0x06, 0x18, 0xde, 0x04, 0xc0, 0x7a, 0x0c, 0xd0, 0x09, 0xd4, 0x15, 0x3c, 0x38,
|
||||
0x0a, 0xcd, 0xce, 0x8b, 0xe6, 0xcb, 0x3e, 0x24, 0x07, 0x83, 0x7a, 0x23, 0x56, 0x37, 0xdd, 0xd7,
|
||||
0xd6, 0x65, 0xd0, 0xf8, 0x6f, 0x06, 0x55, 0xd3, 0xb7, 0x23, 0x6c, 0x90, 0x8e, 0xb5, 0x8e, 0x1a,
|
||||
0xbb, 0x4b, 0xeb, 0xd8, 0x37, 0x3e, 0x40, 0xe5, 0xb6, 0x66, 0xa8, 0xad, 0x41, 0xd7, 0x78, 0x19,
|
||||
0xc7, 0x92, 0x06, 0xb5, 0x1d, 0x9f, 0x17, 0x37, 0x7b, 0x81, 0x56, 0xfb, 0x2f, 0x4f, 0x3b, 0x9a,
|
||||
0xfe, 0xcc, 0xe4, 0x16, 0xb3, 0xf2, 0x3d, 0x00, 0x7f, 0x99, 0x06, 0xf7, 0xc5, 0xcd, 0xc0, 0x0d,
|
||||
0x3f, 0x41, 0x9b, 0x31, 0x7c, 0xe9, 0x20, 0x27, 0xef, 0xc3, 0x9e, 0x9d, 0x1b, 0xf6, 0x2c, 0xfd,
|
||||
0x3c, 0x46, 0x5f, 0xc4, 0x1b, 0x87, 0xfa, 0x33, 0x1d, 0xca, 0x02, 0x2a, 0x67, 0x17, 0xb6, 0xc9,
|
||||
0x37, 0x6c, 0x1b, 0x7a, 0x17, 0x1e, 0x14, 0x45, 0xe3, 0x2f, 0x19, 0x54, 0x4e, 0x26, 0x14, 0xe3,
|
||||
0x59, 0xef, 0x9a, 0xaa, 0x61, 0x74, 0x8d, 0x38, 0xf0, 0x44, 0xa9, 0x53, 0xfe, 0x09, 0xaf, 0xbb,
|
||||
0xe2, 0x89, 0xaa, 0xab, 0x86, 0xd6, 0x8a, 0xfb, 0x21, 0x81, 0x9c, 0x10, 0x8f, 0xf8, 0xce, 0x18,
|
||||
0xfe, 0xef, 0xa8, 0x82, 0x99, 0xfe, 0xb0, 0xf5, 0x34, 0x8e, 0x98, 0x17, 0x70, 0xca, 0x54, 0x7f,
|
||||
0x31, 0x3e, 0xe7, 0xd1, 0x36, 0x58, 0xeb, 0x9c, 0x35, 0x3b, 0x5a, 0x5b, 0x40, 0x73, 0x72, 0x1d,
|
||||
0xa0, 0x5b, 0x09, 0x54, 0x13, 0xaf, 0x03, 0x86, 0x6d, 0xd8, 0x68, 0xf7, 0xff, 0xcf, 0x22, 0x78,
|
||||
0xb8, 0x14, 0x9a, 0xbd, 0x9e, 0xaa, 0xb7, 0xe3, 0xd3, 0x2f, 0x75, 0xcd, 0xf9, 0x9c, 0x78, 0x36,
|
||||
0x43, 0x1c, 0x77, 0x8d, 0x13, 0x75, 0x10, 0x1f, 0x7e, 0x89, 0x38, 0xa6, 0xec, 0x5e, 0x3e, 0xda,
|
||||
0x79, 0xf7, 0xcf, 0xdd, 0xb5, 0xf7, 0xf0, 0xf7, 0xee, 0xd3, 0x6e, 0xe6, 0x3d, 0xfc, 0xfd, 0xe3,
|
||||
0xd3, 0xee, 0xda, 0xbf, 0xe1, 0xf7, 0xed, 0xbf, 0x76, 0x33, 0xa3, 0x02, 0x9f, 0x5d, 0x8f, 0xff,
|
||||
0x17, 0x00, 0x00, 0xff, 0xff, 0x20, 0x74, 0xd7, 0x8f, 0x1b, 0x0e, 0x00, 0x00,
|
||||
// 1670 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x73, 0xdb, 0x4c,
|
||||
0x19, 0x8f, 0x6d, 0xf9, 0xdf, 0xda, 0xc9, 0xeb, 0x6c, 0xd3, 0xbc, 0x46, 0xcd, 0x9b, 0x04, 0xbd,
|
||||
0x6f, 0x21, 0x78, 0x68, 0x0a, 0x2d, 0xd0, 0x19, 0x06, 0x98, 0x71, 0x6c, 0x25, 0xd1, 0xd4, 0x91,
|
||||
0x8d, 0x6c, 0xa7, 0x94, 0x03, 0x1a, 0xd9, 0x5a, 0x3b, 0x9a, 0xc8, 0x5a, 0x23, 0xc9, 0x6d, 0xc3,
|
||||
0x47, 0xe0, 0x13, 0x70, 0x61, 0xa6, 0x33, 0x9c, 0xb8, 0xf3, 0x21, 0xca, 0xad, 0xd3, 0x23, 0x87,
|
||||
0x0e, 0x94, 0x0b, 0x47, 0x2e, 0xdc, 0x79, 0x76, 0x57, 0x92, 0xe5, 0xfc, 0x61, 0x7a, 0xe0, 0x90,
|
||||
0xb1, 0xf6, 0x79, 0x7e, 0xfb, 0x3c, 0xfb, 0xfc, 0xf9, 0x3d, 0xbb, 0x41, 0xe5, 0x11, 0x99, 0x1f,
|
||||
0xce, 0x7d, 0x1a, 0x52, 0x5c, 0xe2, 0x3f, 0x63, 0xea, 0xca, 0x8f, 0xa6, 0x4e, 0x78, 0xb1, 0x18,
|
||||
0x1d, 0x8e, 0xe9, 0xec, 0xf1, 0x94, 0x4e, 0xe9, 0x63, 0xae, 0x19, 0x2d, 0x26, 0x7c, 0xc5, 0x17,
|
||||
0xfc, 0x4b, 0x6c, 0x54, 0xe6, 0x28, 0x7f, 0x4a, 0x5c, 0x97, 0xe2, 0x3d, 0x54, 0xb1, 0xc9, 0x2b,
|
||||
0x67, 0x4c, 0x4c, 0xcf, 0x9a, 0x91, 0x7a, 0x66, 0x3f, 0x73, 0x50, 0x36, 0x90, 0x10, 0xe9, 0x20,
|
||||
0x61, 0x80, 0xb1, 0xeb, 0x10, 0x2f, 0x14, 0x80, 0xac, 0x00, 0x08, 0x11, 0x07, 0x3c, 0x44, 0x1b,
|
||||
0x11, 0xe0, 0x15, 0xf1, 0x03, 0x87, 0x7a, 0xf5, 0x1c, 0xc7, 0xac, 0x0b, 0xe9, 0xb9, 0x10, 0x2a,
|
||||
0x01, 0x2a, 0x9c, 0x12, 0xcb, 0x26, 0x3e, 0xfe, 0x1e, 0x92, 0xc2, 0xab, 0xb9, 0xf0, 0xb5, 0xf1,
|
||||
0xe4, 0xfe, 0x61, 0x1c, 0xc3, 0xe1, 0x19, 0x09, 0x02, 0x6b, 0x4a, 0x06, 0xa0, 0x34, 0x38, 0x04,
|
||||
0xff, 0x02, 0x9c, 0xd3, 0xd9, 0xdc, 0x07, 0x05, 0x33, 0x9c, 0xe5, 0x3b, 0x76, 0x6e, 0xec, 0x68,
|
||||
0x2d, 0x31, 0x46, 0x7a, 0x83, 0xd2, 0x44, 0xeb, 0x2d, 0x77, 0x11, 0x84, 0xc4, 0x6f, 0x51, 0x6f,
|
||||
0xe2, 0x4c, 0xf1, 0x0f, 0x50, 0x71, 0x42, 0x5d, 0x38, 0x45, 0x00, 0xee, 0x73, 0x07, 0x95, 0x27,
|
||||
0xb5, 0xa5, 0xb1, 0x63, 0xae, 0x38, 0x92, 0xde, 0x7d, 0xdc, 0x5b, 0x33, 0x62, 0x98, 0xf2, 0xa7,
|
||||
0x2c, 0x2a, 0x08, 0x0d, 0xde, 0x46, 0x59, 0xc7, 0x16, 0x29, 0x3a, 0x2a, 0x7c, 0xfa, 0xb8, 0x97,
|
||||
0xd5, 0xda, 0x06, 0x48, 0xf0, 0x16, 0xca, 0xbb, 0xd6, 0x88, 0xb8, 0x51, 0x72, 0xc4, 0x02, 0x3f,
|
||||
0x40, 0x65, 0x1f, 0x02, 0x36, 0xa9, 0xe7, 0x5e, 0xf1, 0x94, 0x94, 0x8c, 0x12, 0x13, 0x74, 0x61,
|
||||
0x8d, 0x1f, 0x21, 0xec, 0x4c, 0x3d, 0xea, 0x13, 0x73, 0x4e, 0xfc, 0x99, 0xc3, 0x4f, 0x1b, 0xd4,
|
||||
0x25, 0x8e, 0xda, 0x14, 0x9a, 0xde, 0x52, 0x81, 0xbf, 0x46, 0xeb, 0x11, 0xdc, 0x26, 0x2e, 0x09,
|
||||
0x49, 0x3d, 0xcf, 0x91, 0x55, 0x21, 0x6c, 0x73, 0x19, 0xc4, 0xb6, 0x65, 0x3b, 0x81, 0x35, 0x72,
|
||||
0x89, 0x19, 0x92, 0xd9, 0xdc, 0x74, 0x3c, 0x9b, 0xbc, 0x21, 0x41, 0xbd, 0xc0, 0xb1, 0x38, 0xd2,
|
||||
0x0d, 0x40, 0xa5, 0x09, 0x0d, 0x04, 0x54, 0x98, 0x5b, 0x8b, 0x80, 0xd8, 0xf5, 0x22, 0xc7, 0x44,
|
||||
0x2b, 0x96, 0x25, 0xd1, 0x01, 0x41, 0xbd, 0x76, 0x3d, 0x4b, 0x6d, 0xae, 0x88, 0xb3, 0x14, 0xc1,
|
||||
0x94, 0x7f, 0x43, 0x96, 0x84, 0x06, 0x7f, 0x27, 0xc9, 0x52, 0xf5, 0x68, 0x9b, 0xa1, 0xfe, 0xf6,
|
||||
0x71, 0xaf, 0x24, 0x74, 0x5a, 0x3b, 0x95, 0x35, 0x8c, 0xa4, 0x54, 0x47, 0xf1, 0x6f, 0xbc, 0x83,
|
||||
0xca, 0x96, 0x6d, 0xb3, 0xea, 0x81, 0xeb, 0x1c, 0xb8, 0x2e, 0x1b, 0x4b, 0x01, 0x7e, 0xb6, 0xda,
|
||||
0x0d, 0xd2, 0xf5, 0xfe, 0xb9, 0xab, 0x0d, 0x58, 0x29, 0xc6, 0xc4, 0x8f, 0x3a, 0x38, 0xcf, 0xfd,
|
||||
0x95, 0x98, 0x80, 0xf7, 0xef, 0xb7, 0x51, 0x75, 0x66, 0xbd, 0x31, 0x03, 0xf2, 0xdb, 0x05, 0xf1,
|
||||
0xc6, 0x84, 0xa7, 0x2b, 0x67, 0x54, 0x40, 0xd6, 0x8f, 0x44, 0x78, 0x17, 0x21, 0xc7, 0x0b, 0x7d,
|
||||
0x6a, 0x2f, 0x60, 0x57, 0x94, 0xab, 0x94, 0x04, 0xff, 0x18, 0x95, 0x78, 0xb2, 0x4d, 0x08, 0xbc,
|
||||
0x04, 0x5a, 0xe9, 0x48, 0x8e, 0x02, 0x2f, 0xf2, 0x54, 0xf3, 0xb8, 0xe3, 0x4f, 0xa3, 0xc8, 0xb1,
|
||||
0x9a, 0x8d, 0x7f, 0x86, 0xe4, 0xe0, 0xd2, 0x61, 0x85, 0x12, 0x96, 0x42, 0x38, 0xab, 0xe9, 0x93,
|
||||
0x19, 0x7d, 0x65, 0xb9, 0x41, 0xbd, 0xcc, 0xdd, 0xd4, 0x19, 0x42, 0x4b, 0x01, 0x8c, 0x48, 0xaf,
|
||||
0x74, 0x51, 0x9e, 0x5b, 0x64, 0x55, 0x14, 0xcd, 0x1a, 0xb1, 0x37, 0x5a, 0xe1, 0x43, 0x94, 0x9f,
|
||||
0x38, 0x2e, 0x24, 0x32, 0xcb, 0x6b, 0x88, 0x53, 0x9d, 0x0e, 0x62, 0xcd, 0x9b, 0xd0, 0xa8, 0x8a,
|
||||
0x02, 0xa6, 0x0c, 0x51, 0x85, 0x1b, 0x1c, 0xce, 0x6d, 0x0b, 0xda, 0xe9, 0xff, 0x65, 0xf6, 0xaf,
|
||||
0x39, 0x54, 0x8a, 0x35, 0x49, 0xd1, 0x33, 0xa9, 0xa2, 0x37, 0xa2, 0x79, 0x20, 0xd8, 0xbd, 0x7d,
|
||||
0xd3, 0x5e, 0x6a, 0x20, 0xc0, 0xfe, 0xc0, 0xf9, 0x1d, 0xe1, 0x7c, 0xca, 0x19, 0xfc, 0x1b, 0xef,
|
||||
0xa3, 0xca, 0x75, 0x12, 0xad, 0x1b, 0x69, 0x11, 0xfe, 0x0a, 0xa1, 0x19, 0xb5, 0x9d, 0x89, 0x43,
|
||||
0x6c, 0x33, 0xe0, 0x0d, 0x90, 0x33, 0xca, 0xb1, 0xa4, 0x8f, 0xeb, 0xac, 0xdd, 0x19, 0x85, 0xec,
|
||||
0x88, 0x2b, 0xf1, 0x92, 0x69, 0x1c, 0x0f, 0xb2, 0xed, 0xc4, 0x0c, 0x89, 0x97, 0x6c, 0xea, 0x79,
|
||||
0x74, 0x85, 0xbc, 0x25, 0x0e, 0x58, 0xf7, 0x68, 0x9a, 0xb8, 0xc0, 0xa4, 0x78, 0x2a, 0xb2, 0x7a,
|
||||
0xae, 0x30, 0xe9, 0x9c, 0x8c, 0x43, 0x9a, 0xcc, 0x9b, 0x08, 0x86, 0x65, 0x54, 0x4a, 0x5a, 0x11,
|
||||
0xf1, 0x93, 0x26, 0x6b, 0x36, 0x8b, 0x93, 0x38, 0xc0, 0x63, 0x05, 0xd4, 0x79, 0x23, 0x09, 0x4d,
|
||||
0x0f, 0xf0, 0x0f, 0x51, 0xe1, 0xc8, 0xa5, 0xe3, 0xcb, 0x98, 0xb7, 0xf7, 0x96, 0xde, 0xb8, 0x3c,
|
||||
0x55, 0x9d, 0xc2, 0x88, 0x03, 0x59, 0x20, 0xc1, 0xd5, 0xcc, 0x75, 0xbc, 0x4b, 0x33, 0xb4, 0xfc,
|
||||
0x29, 0x09, 0xeb, 0x9b, 0x62, 0x7c, 0x47, 0xd2, 0x01, 0x17, 0xfe, 0x54, 0xfa, 0xc3, 0xdb, 0xbd,
|
||||
0x35, 0xc5, 0x43, 0xe5, 0xc4, 0x0e, 0x6b, 0x10, 0x3a, 0x99, 0x04, 0xb0, 0x23, 0xc3, 0xcf, 0x19,
|
||||
0xad, 0x92, 0x1a, 0x65, 0xf9, 0xf1, 0x44, 0x8d, 0x40, 0x76, 0x61, 0x05, 0x17, 0xbc, 0x6e, 0x55,
|
||||
0x83, 0x7f, 0x33, 0x56, 0xbe, 0x26, 0xd6, 0xa5, 0xc9, 0x15, 0xa2, 0x6a, 0x25, 0x26, 0x38, 0x85,
|
||||
0x75, 0xe4, 0xef, 0xe7, 0xa8, 0x20, 0xb2, 0x84, 0x9f, 0xa2, 0xd2, 0x98, 0x2e, 0xbc, 0x70, 0x39,
|
||||
0xb9, 0x37, 0xd3, 0xc4, 0xe7, 0x9a, 0x28, 0xb2, 0x04, 0xa8, 0x1c, 0xa3, 0x62, 0xa4, 0x82, 0x30,
|
||||
0xe3, 0xa9, 0x24, 0x1d, 0xdd, 0x8f, 0xc9, 0xd9, 0xbf, 0xa0, 0x7e, 0xb8, 0x32, 0x94, 0x60, 0x94,
|
||||
0x43, 0x7d, 0x17, 0xe2, 0xf0, 0x92, 0x21, 0x16, 0xca, 0x5f, 0x32, 0xa8, 0x68, 0xb0, 0x22, 0x04,
|
||||
0x61, 0xea, 0x12, 0xc8, 0xaf, 0x5c, 0x02, 0x4b, 0xba, 0x64, 0x57, 0xe8, 0x12, 0x77, 0x7c, 0x2e,
|
||||
0xd5, 0xf1, 0xcb, 0xcc, 0x49, 0xb7, 0x66, 0x2e, 0x7f, 0x4b, 0xe6, 0x0a, 0xa9, 0xcc, 0x41, 0xcd,
|
||||
0x26, 0x3e, 0x9d, 0xf1, 0x31, 0x4f, 0x7d, 0xcb, 0xbf, 0x8a, 0xba, 0x73, 0x9d, 0x49, 0x07, 0xb1,
|
||||
0x50, 0x31, 0x51, 0xc9, 0x20, 0xc1, 0x1c, 0xfa, 0x90, 0xdc, 0x79, 0x6c, 0x30, 0x0f, 0x6c, 0xb7,
|
||||
0xf8, 0xa1, 0xc1, 0x3c, 0xfb, 0xc6, 0xdf, 0x45, 0xd2, 0x98, 0xda, 0xe2, 0xc8, 0x1b, 0xe9, 0x1e,
|
||||
0x52, 0x7d, 0x9f, 0xc2, 0x4d, 0x6a, 0x03, 0x1b, 0x19, 0x00, 0x5e, 0x11, 0xb5, 0x36, 0x7d, 0xed,
|
||||
0xb9, 0xd4, 0xb2, 0x7b, 0x3e, 0x9d, 0xb2, 0x71, 0x7b, 0xe7, 0xd8, 0x68, 0xa3, 0xe2, 0x82, 0x0f,
|
||||
0x96, 0x78, 0x70, 0x7c, 0xb3, 0x4a, 0xf4, 0xeb, 0x86, 0xc4, 0x14, 0x8a, 0xd9, 0x11, 0x6d, 0x55,
|
||||
0x3e, 0x64, 0x90, 0x7c, 0x37, 0x1a, 0x6b, 0xa8, 0x22, 0x90, 0x66, 0xea, 0x85, 0x71, 0xf0, 0x39,
|
||||
0x8e, 0xf8, 0x8c, 0x41, 0x8b, 0xe4, 0xfb, 0xd6, 0xeb, 0x29, 0xc5, 0xe6, 0xdc, 0xe7, 0xb1, 0x19,
|
||||
0x2e, 0x6e, 0xce, 0xb3, 0xe4, 0x32, 0x96, 0x20, 0xf6, 0xbc, 0x51, 0x1d, 0x09, 0x16, 0x71, 0x99,
|
||||
0x52, 0x40, 0x52, 0xcf, 0xf1, 0xa6, 0xca, 0x1e, 0xca, 0xb7, 0x5c, 0xca, 0x8b, 0x55, 0x80, 0x97,
|
||||
0x42, 0x00, 0x6e, 0xa2, 0x1c, 0x8a, 0x55, 0xe3, 0x43, 0x16, 0x55, 0x52, 0x8f, 0x24, 0x38, 0xcf,
|
||||
0x46, 0xab, 0x33, 0xec, 0x0f, 0x54, 0xc3, 0x6c, 0x75, 0xf5, 0x63, 0xed, 0xa4, 0xb6, 0x26, 0xef,
|
||||
0xfc, 0xfe, 0x8f, 0xfb, 0xf5, 0xd9, 0x12, 0xb4, 0xfa, 0xfe, 0x01, 0x17, 0x9a, 0xde, 0x56, 0x7f,
|
||||
0x55, 0xcb, 0xc8, 0x5b, 0x00, 0xac, 0xa5, 0x80, 0xe2, 0x32, 0xf9, 0x3e, 0xaa, 0x72, 0x80, 0x39,
|
||||
0xec, 0xb5, 0x9b, 0x03, 0xb5, 0x96, 0x95, 0x65, 0xc0, 0x6d, 0x5f, 0xc7, 0x45, 0xf9, 0xfe, 0x1a,
|
||||
0x78, 0xa1, 0xfe, 0x72, 0xa8, 0xf6, 0x07, 0xb5, 0x9c, 0xbc, 0x0d, 0x40, 0x9c, 0x02, 0xc6, 0x8c,
|
||||
0x79, 0x08, 0x6d, 0xa8, 0xf6, 0x7b, 0x5d, 0xbd, 0xaf, 0xd6, 0x24, 0xf9, 0x4b, 0x40, 0xdd, 0x5b,
|
||||
0x41, 0x45, 0x1d, 0xfa, 0x13, 0xb4, 0xd9, 0xee, 0xbe, 0xd0, 0x3b, 0xdd, 0x66, 0xdb, 0xec, 0x19,
|
||||
0xdd, 0x13, 0xd8, 0xd3, 0xaf, 0xe5, 0xe5, 0x3d, 0xc0, 0x3f, 0x48, 0xe1, 0x6f, 0x34, 0xdc, 0x57,
|
||||
0x90, 0x3d, 0x4d, 0x3f, 0xa9, 0x15, 0xe4, 0x7b, 0x00, 0xfd, 0x22, 0x05, 0x65, 0x49, 0x65, 0x11,
|
||||
0xb7, 0x3a, 0x5d, 0x70, 0x5d, 0xbc, 0x11, 0x31, 0x4f, 0x76, 0xe3, 0x37, 0x08, 0xdf, 0x7c, 0x46,
|
||||
0xe2, 0x6f, 0x90, 0xa4, 0x77, 0x75, 0x15, 0x12, 0xca, 0xe3, 0xbf, 0x89, 0xd0, 0xa9, 0x47, 0xb0,
|
||||
0x82, 0x72, 0x9d, 0x5f, 0xff, 0x08, 0x92, 0xf9, 0x2d, 0x00, 0xdd, 0xbf, 0x09, 0x02, 0x65, 0x83,
|
||||
0xa2, 0x4a, 0xda, 0xb0, 0x82, 0x4a, 0x67, 0xea, 0xa0, 0x09, 0xc9, 0x6d, 0x82, 0x71, 0x7e, 0xa4,
|
||||
0x58, 0x7d, 0x46, 0x42, 0x8b, 0x13, 0x70, 0x07, 0xe5, 0x75, 0xf5, 0x5c, 0x35, 0xc0, 0xf0, 0x26,
|
||||
0x00, 0xd6, 0x63, 0x80, 0x4e, 0xa0, 0xaf, 0xe0, 0x35, 0x52, 0x68, 0x76, 0x5e, 0x34, 0x5f, 0xf6,
|
||||
0xa1, 0x38, 0x18, 0xd4, 0x1b, 0xb1, 0xba, 0xe9, 0xbe, 0xb6, 0xae, 0x82, 0xc6, 0x7f, 0x32, 0xa8,
|
||||
0x9a, 0xbe, 0x3a, 0x61, 0x83, 0x74, 0xac, 0x75, 0xd4, 0xd8, 0x5d, 0x5a, 0xc7, 0xbe, 0xf1, 0x01,
|
||||
0x2a, 0xb7, 0x35, 0x43, 0x6d, 0x0d, 0xba, 0xc6, 0xcb, 0x38, 0x96, 0x34, 0xa8, 0xed, 0xf8, 0xbc,
|
||||
0xb9, 0xd9, 0xb3, 0xb5, 0xda, 0x7f, 0x79, 0xd6, 0xd1, 0xf4, 0xe7, 0x26, 0xb7, 0x98, 0x95, 0x1f,
|
||||
0x00, 0xf8, 0xcb, 0x34, 0xb8, 0x2f, 0xae, 0x0d, 0x6e, 0xf8, 0x19, 0xda, 0x8c, 0xe1, 0x4b, 0x07,
|
||||
0x39, 0x79, 0x1f, 0xf6, 0xec, 0xdc, 0xb2, 0x67, 0xe9, 0xe7, 0x29, 0xfa, 0x22, 0xde, 0x38, 0xd4,
|
||||
0x9f, 0xeb, 0xd0, 0x16, 0xd0, 0x39, 0xbb, 0xb0, 0x4d, 0xbe, 0x65, 0xdb, 0xd0, 0xbb, 0xf4, 0xa0,
|
||||
0x29, 0x1a, 0x7f, 0xce, 0xa0, 0x72, 0x32, 0xa1, 0x58, 0x9e, 0xf5, 0xae, 0xa9, 0x1a, 0x46, 0xd7,
|
||||
0x88, 0x03, 0x4f, 0x94, 0x3a, 0xe5, 0x9f, 0xf0, 0xf4, 0x2b, 0x9e, 0xa8, 0xba, 0x6a, 0x68, 0xad,
|
||||
0x98, 0x0f, 0x09, 0xe4, 0x84, 0x78, 0xc4, 0x77, 0xc6, 0xf0, 0xcf, 0x4a, 0x15, 0xcc, 0xf4, 0x87,
|
||||
0xad, 0xd3, 0x38, 0x62, 0xde, 0xc0, 0x29, 0x53, 0xfd, 0xc5, 0xf8, 0x82, 0x47, 0xdb, 0x60, 0xd4,
|
||||
0x39, 0x6f, 0x76, 0xb4, 0xb6, 0x80, 0xe6, 0xe4, 0x3a, 0x40, 0xb7, 0x12, 0xa8, 0x26, 0x9e, 0x0e,
|
||||
0x0c, 0xdb, 0xb0, 0xd1, 0xee, 0xff, 0x9e, 0x45, 0xf0, 0xaa, 0x29, 0x34, 0x7b, 0x3d, 0x55, 0x6f,
|
||||
0xc7, 0xa7, 0x5f, 0xea, 0x9a, 0xf3, 0x39, 0xf1, 0x6c, 0x86, 0x38, 0xee, 0x1a, 0x27, 0xea, 0x20,
|
||||
0x3e, 0xfc, 0x12, 0x71, 0x4c, 0xd9, 0xa5, 0x7d, 0xb4, 0xf3, 0xee, 0x1f, 0xbb, 0x6b, 0xef, 0xe1,
|
||||
0xef, 0xdd, 0xa7, 0xdd, 0xcc, 0x7b, 0xf8, 0xfb, 0xfb, 0xa7, 0xdd, 0xb5, 0x7f, 0xc1, 0xef, 0xdb,
|
||||
0x7f, 0xee, 0x66, 0x46, 0x05, 0x3e, 0xbb, 0x9e, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x93, 0x35,
|
||||
0x80, 0x49, 0x50, 0x0e, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ message Folder {
|
||||
bool ignore_permissions = 4;
|
||||
bool ignore_delete = 5;
|
||||
bool disable_temp_indexes = 6;
|
||||
bool paused = 7;
|
||||
|
||||
repeated Device devices = 16 [(gogoproto.nullable) = false];
|
||||
}
|
||||
@@ -100,6 +101,7 @@ message FileInfo {
|
||||
uint32 permissions = 4;
|
||||
int64 modified_s = 5;
|
||||
int32 modified_ns = 11;
|
||||
uint64 modified_by = 12 [(gogoproto.customtype) = "ShortID", (gogoproto.nullable) = false];
|
||||
bool deleted = 6;
|
||||
bool invalid = 7;
|
||||
bool no_permissions = 8;
|
||||
@@ -120,9 +122,10 @@ enum FileInfoType {
|
||||
|
||||
message BlockInfo {
|
||||
option (gogoproto.goproto_stringer) = false;
|
||||
int64 offset = 1;
|
||||
int32 size = 2;
|
||||
bytes hash = 3;
|
||||
int64 offset = 1;
|
||||
int32 size = 2;
|
||||
bytes hash = 3;
|
||||
uint32 weak_hash = 4;
|
||||
}
|
||||
|
||||
message Vector {
|
||||
|
||||
@@ -114,7 +114,7 @@ func (f FileInfo) WinsConflict(other FileInfo) bool {
|
||||
}
|
||||
|
||||
func (b BlockInfo) String() string {
|
||||
return fmt.Sprintf("Block{%d/%d/%x}", b.Offset, b.Size, b.Hash)
|
||||
return fmt.Sprintf("Block{%d/%d/%d/%x}", b.Offset, b.Size, b.WeakHash, b.Hash)
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the block is a full block of zeroes.
|
||||
@@ -148,5 +148,8 @@ func NewIndexID() IndexID {
|
||||
|
||||
func (f Folder) Description() string {
|
||||
// used by logging stuff
|
||||
if f.Label == "" {
|
||||
return f.ID
|
||||
}
|
||||
return fmt.Sprintf("%q (%s)", f.Label, f.ID)
|
||||
}
|
||||
|
||||
@@ -17,10 +17,10 @@ func (p *bufferPool) get(size int) []byte {
|
||||
return p.new(size)
|
||||
}
|
||||
|
||||
bs := intf.([]byte)
|
||||
bs := *intf.(*[]byte)
|
||||
if cap(bs) < size {
|
||||
// Buffer was too small, leave it for someone else and allocate.
|
||||
p.put(bs)
|
||||
p.pool.Put(intf)
|
||||
return p.new(size)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (p *bufferPool) upgrade(bs []byte, size int) []byte {
|
||||
|
||||
// put returns the buffer to the pool
|
||||
func (p *bufferPool) put(bs []byte) {
|
||||
p.pool.Put(bs)
|
||||
p.pool.Put(&bs)
|
||||
}
|
||||
|
||||
// new creates a new buffer of the requested size, taking the minimum
|
||||
|
||||
@@ -88,6 +88,9 @@ func (n *DeviceID) MarshalText() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (s ShortID) String() string {
|
||||
if s == 0 {
|
||||
return ""
|
||||
}
|
||||
var bs [8]byte
|
||||
binary.BigEndian.PutUint64(bs[:], uint64(s))
|
||||
return base32.StdEncoding.EncodeToString(bs[:])[:7]
|
||||
|
||||
@@ -105,11 +105,7 @@ func readHello(c io.Reader) (HelloResult, error) {
|
||||
if err := hello.UnmarshalXDR(buf); err != nil {
|
||||
return HelloResult{}, err
|
||||
}
|
||||
res := HelloResult{
|
||||
DeviceName: hello.DeviceName,
|
||||
ClientName: hello.ClientName,
|
||||
ClientVersion: hello.ClientVersion,
|
||||
}
|
||||
res := HelloResult(hello)
|
||||
return res, ErrTooOldVersion13
|
||||
|
||||
case 0x00010001, 0x00010000:
|
||||
|
||||
@@ -27,7 +27,7 @@ func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileI
|
||||
|
||||
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, fromTemporary bool, buf []byte) error {
|
||||
if strings.Contains(name, `\`) {
|
||||
l.Warnln("Dropping request for %s, contains invalid path separator", name)
|
||||
l.Warnf("Dropping request for %s, contains invalid path separator", name)
|
||||
return ErrNoSuchFile
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func fixupFiles(files []FileInfo) []FileInfo {
|
||||
var out []FileInfo
|
||||
for i := range files {
|
||||
if strings.Contains(files[i].Name, `\`) {
|
||||
l.Warnln("Dropping index entry for %s, contains invalid path separator", files[i].Name)
|
||||
l.Warnf("Dropping index entry for %s, contains invalid path separator", files[i].Name)
|
||||
if out == nil {
|
||||
// Most incoming updates won't contain anything invalid, so
|
||||
// we delay the allocation and copy to output slice until we
|
||||
|
||||
@@ -21,8 +21,6 @@ const (
|
||||
|
||||
// MaxMessageLen is the largest message size allowed on the wire. (500 MB)
|
||||
MaxMessageLen = 500 * 1000 * 1000
|
||||
|
||||
hdrSize = 6
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -108,7 +106,7 @@ type rawConnection struct {
|
||||
outbox chan asyncMessage
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
pool sync.Pool
|
||||
pool bufferPool
|
||||
compression Compression
|
||||
}
|
||||
|
||||
@@ -149,19 +147,15 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiv
|
||||
cw := &countingWriter{Writer: writer}
|
||||
|
||||
c := rawConnection{
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
awaiting: make(map[int32]chan asyncResult),
|
||||
outbox: make(chan asyncMessage),
|
||||
closed: make(chan struct{}),
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, BlockSize)
|
||||
},
|
||||
},
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
awaiting: make(map[int32]chan asyncResult),
|
||||
outbox: make(chan asyncMessage),
|
||||
closed: make(chan struct{}),
|
||||
pool: bufferPool{minSize: BlockSize},
|
||||
compression: compress,
|
||||
}
|
||||
|
||||
@@ -518,13 +512,13 @@ func (c *rawConnection) handleRequest(req Request) {
|
||||
var done chan struct{}
|
||||
|
||||
if usePool {
|
||||
buf = c.pool.Get().([]byte)[:size]
|
||||
buf = c.pool.get(size)
|
||||
done = make(chan struct{})
|
||||
} else {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
err := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), req.Hash, req.FromTemporary, buf)
|
||||
err := c.receiver.Request(c.id, req.Folder, req.Name, req.Offset, req.Hash, req.FromTemporary, buf)
|
||||
if err != nil {
|
||||
c.send(&Response{
|
||||
ID: req.ID,
|
||||
@@ -541,7 +535,7 @@ func (c *rawConnection) handleRequest(req Request) {
|
||||
|
||||
if usePool {
|
||||
<-done
|
||||
c.pool.Put(buf)
|
||||
c.pool.put(buf)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -764,11 +758,12 @@ func (c *rawConnection) close(err error) {
|
||||
// results in an effecting ping interval of somewhere between
|
||||
// PingSendInterval/2 and PingSendInterval.
|
||||
func (c *rawConnection) pingSender() {
|
||||
ticker := time.Tick(PingSendInterval / 2)
|
||||
ticker := time.NewTicker(PingSendInterval / 2)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
case <-ticker.C:
|
||||
d := time.Since(c.cw.Last())
|
||||
if d < PingSendInterval/2 {
|
||||
l.Debugln(c.id, "ping skipped after wr", d)
|
||||
@@ -788,11 +783,12 @@ func (c *rawConnection) pingSender() {
|
||||
// but we expect pings in the absence of other messages) within the last
|
||||
// ReceiveTimeout. If not, we close the connection with an ErrTimeout.
|
||||
func (c *rawConnection) pingReceiver() {
|
||||
ticker := time.Tick(ReceiveTimeout / 2)
|
||||
ticker := time.NewTicker(ReceiveTimeout / 2)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
case <-ticker.C:
|
||||
d := time.Since(c.cr.Last())
|
||||
if d > ReceiveTimeout {
|
||||
l.Debugln(c.id, "ping timeout", d)
|
||||
|
||||
@@ -437,8 +437,6 @@ func (p *Process) eventLoop() {
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
events, err := p.Events(since)
|
||||
if err != nil {
|
||||
if time.Since(start) < 5*time.Second {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/weakhash"
|
||||
)
|
||||
|
||||
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||
@@ -25,6 +26,7 @@ type Counter interface {
|
||||
func Blocks(r io.Reader, blocksize int, sizehint int64, counter Counter) ([]protocol.BlockInfo, error) {
|
||||
hf := sha256.New()
|
||||
hashLength := hf.Size()
|
||||
whf := weakhash.NewHash(blocksize)
|
||||
|
||||
var blocks []protocol.BlockInfo
|
||||
var hashes, thisHash []byte
|
||||
@@ -44,7 +46,7 @@ func Blocks(r io.Reader, blocksize int, sizehint int64, counter Counter) ([]prot
|
||||
var offset int64
|
||||
for {
|
||||
lr := io.LimitReader(r, int64(blocksize))
|
||||
n, err := copyBuffer(hf, lr, buf)
|
||||
n, err := io.CopyBuffer(hf, io.TeeReader(lr, whf), buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -54,7 +56,7 @@ func Blocks(r io.Reader, blocksize int, sizehint int64, counter Counter) ([]prot
|
||||
}
|
||||
|
||||
if counter != nil {
|
||||
counter.Update(int64(n))
|
||||
counter.Update(n)
|
||||
}
|
||||
|
||||
// Carve out a hash-sized chunk of "hashes" to store the hash for this
|
||||
@@ -63,15 +65,17 @@ func Blocks(r io.Reader, blocksize int, sizehint int64, counter Counter) ([]prot
|
||||
thisHash, hashes = hashes[:hashLength], hashes[hashLength:]
|
||||
|
||||
b := protocol.BlockInfo{
|
||||
Size: int32(n),
|
||||
Offset: offset,
|
||||
Hash: thisHash,
|
||||
Size: int32(n),
|
||||
Offset: offset,
|
||||
Hash: thisHash,
|
||||
WeakHash: whf.Sum32(),
|
||||
}
|
||||
|
||||
blocks = append(blocks, b)
|
||||
offset += int64(n)
|
||||
offset += n
|
||||
|
||||
hf.Reset()
|
||||
whf.Reset()
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
@@ -123,9 +127,12 @@ func BlockDiff(src, tgt []protocol.BlockInfo) (have, need []protocol.BlockInfo)
|
||||
// list and actual reader contents
|
||||
func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
|
||||
hf := sha256.New()
|
||||
// A 32k buffer is used for copying into the hash function.
|
||||
buf := make([]byte, 32<<10)
|
||||
|
||||
for i, block := range blocks {
|
||||
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
|
||||
_, err := io.Copy(hf, lr)
|
||||
_, err := io.CopyBuffer(hf, lr, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -180,48 +187,3 @@ func BlocksEqual(src, tgt []protocol.BlockInfo) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// This is a copy & paste of io.copyBuffer from the Go 1.5 standard library,
|
||||
// as we want this but also want to build with Go 1.3+.
|
||||
|
||||
// copyBuffer is the actual implementation of Copy and CopyBuffer.
|
||||
// if buf is nil, one is allocated.
|
||||
func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
|
||||
// If the reader has a WriteTo method, use it to do the copy.
|
||||
// Avoids an allocation and a copy.
|
||||
if wt, ok := src.(io.WriterTo); ok {
|
||||
return wt.WriteTo(dst)
|
||||
}
|
||||
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
||||
if rt, ok := dst.(io.ReaderFrom); ok {
|
||||
return rt.ReadFrom(src)
|
||||
}
|
||||
if buf == nil {
|
||||
buf = make([]byte, 32*1024)
|
||||
}
|
||||
for {
|
||||
nr, er := src.Read(buf)
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write(buf[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er == io.EOF {
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
err = er
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ func (w *walker) walk() (chan protocol.FileInfo, error) {
|
||||
|
||||
for file := range toHashChan {
|
||||
filesToHash = append(filesToHash, file)
|
||||
total += int64(file.Size)
|
||||
total += file.Size
|
||||
}
|
||||
|
||||
realToHashChan := make(chan protocol.FileInfo)
|
||||
@@ -326,6 +326,7 @@ func (w *walker) walkRegular(relPath string, info os.FileInfo, fchan chan protoc
|
||||
NoPermissions: w.IgnorePerms,
|
||||
ModifiedS: info.ModTime().Unix(),
|
||||
ModifiedNs: int32(info.ModTime().Nanosecond()),
|
||||
ModifiedBy: w.ShortID,
|
||||
Size: info.Size(),
|
||||
}
|
||||
l.Debugln("to hash:", relPath, f)
|
||||
@@ -361,6 +362,7 @@ func (w *walker) walkDir(relPath string, info os.FileInfo, dchan chan protocol.F
|
||||
NoPermissions: w.IgnorePerms,
|
||||
ModifiedS: info.ModTime().Unix(),
|
||||
ModifiedNs: int32(info.ModTime().Nanosecond()),
|
||||
ModifiedBy: w.ShortID,
|
||||
}
|
||||
l.Debugln("dir:", relPath, f)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
threshold = time.Duration(100 * time.Millisecond)
|
||||
threshold = 100 * time.Millisecond
|
||||
l = logger.DefaultLogger.NewFacility("sync", "Mutexes")
|
||||
|
||||
// We make an exception in this package and have an actual "if debug { ...
|
||||
|
||||
@@ -80,7 +80,7 @@ func (h holder) String() string {
|
||||
if h.at == "" {
|
||||
return "not held"
|
||||
}
|
||||
return fmt.Sprintf("at %s goid: %d for %s", h.at, h.goid, time.Now().Sub(h.time))
|
||||
return fmt.Sprintf("at %s goid: %d for %s", h.at, h.goid, time.Since(h.time))
|
||||
}
|
||||
|
||||
type loggedMutex struct {
|
||||
@@ -95,7 +95,7 @@ func (m *loggedMutex) Lock() {
|
||||
|
||||
func (m *loggedMutex) Unlock() {
|
||||
currentHolder := m.holder.Load().(holder)
|
||||
duration := time.Now().Sub(currentHolder.time)
|
||||
duration := time.Since(currentHolder.time)
|
||||
if duration >= threshold {
|
||||
l.Debugf("Mutex held for %v. Locked at %s unlocked at %s", duration, currentHolder.at, getHolder().at)
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func (m *loggedRWMutex) Lock() {
|
||||
|
||||
func (m *loggedRWMutex) Unlock() {
|
||||
currentHolder := m.holder.Load().(holder)
|
||||
duration := time.Now().Sub(currentHolder.time)
|
||||
duration := time.Since(currentHolder.time)
|
||||
if duration >= threshold {
|
||||
l.Debugf("RWMutex held for %v. Locked at %s unlocked at %s", duration, currentHolder.at, getHolder().at)
|
||||
}
|
||||
@@ -201,7 +201,7 @@ type loggedWaitGroup struct {
|
||||
func (wg *loggedWaitGroup) Wait() {
|
||||
start := time.Now()
|
||||
wg.WaitGroup.Wait()
|
||||
duration := time.Now().Sub(start)
|
||||
duration := time.Since(start)
|
||||
if duration >= threshold {
|
||||
l.Debugf("WaitGroup took %v at %s", duration, getHolder())
|
||||
}
|
||||
|
||||
@@ -157,6 +157,7 @@ func TestRWMutex(t *testing.T) {
|
||||
}()
|
||||
|
||||
mut.Lock()
|
||||
_ = 1 // skip empty critical section check
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) != 2 {
|
||||
@@ -170,6 +171,7 @@ func TestRWMutex(t *testing.T) {
|
||||
mut.RLock()
|
||||
mut.RLock()
|
||||
mut.RLock()
|
||||
_ = 1 // skip empty critical section check
|
||||
mut.RUnlock()
|
||||
mut.RUnlock()
|
||||
mut.RUnlock()
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestSetDefaults(t *testing.T) {
|
||||
t.Error("int failed")
|
||||
} else if x.C != 0 {
|
||||
t.Errorf("float failed")
|
||||
} else if x.D != false {
|
||||
} else if x.D {
|
||||
t.Errorf("bool failed")
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestSetDefaults(t *testing.T) {
|
||||
t.Error("int failed")
|
||||
} else if x.C != 2.2 {
|
||||
t.Errorf("float failed")
|
||||
} else if x.D != true {
|
||||
} else if !x.D {
|
||||
t.Errorf("bool failed")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +73,14 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
|
||||
l.Debugf("instantiated %#v", s)
|
||||
|
||||
go func() {
|
||||
// TODO: This should be converted to a Serve() method.
|
||||
s.clean()
|
||||
if testCleanDone != nil {
|
||||
close(testCleanDone)
|
||||
}
|
||||
for range time.Tick(time.Duration(cleanInterval) * time.Second) {
|
||||
tck := time.NewTicker(time.Duration(cleanInterval) * time.Second)
|
||||
defer tck.Stop()
|
||||
for range tck.C {
|
||||
s.clean()
|
||||
}
|
||||
}()
|
||||
|
||||
30
lib/weakhash/benchmark_test.go
Normal file
30
lib/weakhash/benchmark_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package weakhash
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const testFile = "../model/testdata/~syncthing~file.tmp"
|
||||
|
||||
func BenchmarkFind1MFile(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1 << 20)
|
||||
for i := 0; i < b.N; i++ {
|
||||
fd, err := os.Open(testFile)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = Find(fd, []uint32{0, 1, 2}, 128<<10)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
}
|
||||
169
lib/weakhash/weakhash.go
Normal file
169
lib/weakhash/weakhash.go
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package weakhash
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
Size = 4
|
||||
)
|
||||
|
||||
func NewHash(size int) hash.Hash32 {
|
||||
return &digest{
|
||||
buf: make([]byte, size),
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Find finds all the blocks of the given size within io.Reader that matches
|
||||
// the hashes provided, and returns a hash -> slice of offsets within reader
|
||||
// map, that produces the same weak hash.
|
||||
func Find(ir io.Reader, hashesToFind []uint32, size int) (map[uint32][]int64, error) {
|
||||
if ir == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r := bufio.NewReader(ir)
|
||||
hf := NewHash(size)
|
||||
|
||||
n, err := io.CopyN(hf, r, int64(size))
|
||||
if err == io.EOF {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int64(size) {
|
||||
return nil, io.ErrShortBuffer
|
||||
}
|
||||
|
||||
offsets := make(map[uint32][]int64)
|
||||
for _, hashToFind := range hashesToFind {
|
||||
offsets[hashToFind] = nil
|
||||
}
|
||||
|
||||
var i int64
|
||||
var hash uint32
|
||||
for {
|
||||
hash = hf.Sum32()
|
||||
if existing, ok := offsets[hash]; ok {
|
||||
offsets[hash] = append(existing, i)
|
||||
}
|
||||
i++
|
||||
|
||||
bt, err := r.ReadByte()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return offsets, err
|
||||
}
|
||||
hf.Write([]byte{bt})
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
// Using this: http://tutorials.jenkov.com/rsync/checksums.html
|
||||
// Example implementations: https://gist.github.com/csabahenk/1096262/revisions
|
||||
// Alternative that could be used is adler32 http://blog.liw.fi/posts/rsync-in-python/#comment-fee8d5e07794fdba3fe2d76aa2706a13
|
||||
type digest struct {
|
||||
buf []byte
|
||||
size int
|
||||
a uint16
|
||||
b uint16
|
||||
j int
|
||||
}
|
||||
|
||||
func (d *digest) Write(data []byte) (int, error) {
|
||||
for _, c := range data {
|
||||
// TODO: Use this in Go 1.6
|
||||
// d.a = d.a - uint16(d.buf[d.j]) + uint16(c)
|
||||
// d.b = d.b - uint16(d.size)*uint16(d.buf[d.j]) + d.a
|
||||
d.a -= uint16(d.buf[d.j])
|
||||
d.a += uint16(c)
|
||||
d.b -= uint16(d.size) * uint16(d.buf[d.j])
|
||||
d.b += d.a
|
||||
|
||||
d.buf[d.j] = c
|
||||
d.j = (d.j + 1) % d.size
|
||||
}
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (d *digest) Reset() {
|
||||
for i := range d.buf {
|
||||
d.buf[i] = 0x0
|
||||
}
|
||||
d.a = 0
|
||||
d.b = 0
|
||||
d.j = 0
|
||||
}
|
||||
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
r := d.Sum32()
|
||||
return append(b, byte(r>>24), byte(r>>16), byte(r>>8), byte(r))
|
||||
}
|
||||
|
||||
func (d *digest) Sum32() uint32 { return uint32(d.a) | (uint32(d.b) << 16) }
|
||||
func (digest) Size() int { return Size }
|
||||
func (digest) BlockSize() int { return 1 }
|
||||
|
||||
func NewFinder(path string, size int, hashesToFind []uint32) (*Finder, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offsets, err := Find(file, hashesToFind, size)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Finder{
|
||||
file: file,
|
||||
size: size,
|
||||
offsets: offsets,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Finder struct {
|
||||
file *os.File
|
||||
size int
|
||||
offsets map[uint32][]int64
|
||||
}
|
||||
|
||||
// Iterate iterates all available blocks that matches the provided hash, reads
|
||||
// them into buf, and calls the iterator function. The iterator function should
|
||||
// return wether it wishes to continue interating.
|
||||
func (h *Finder) Iterate(hash uint32, buf []byte, iterFunc func(int64) bool) (bool, error) {
|
||||
if h == nil || hash == 0 || len(buf) != h.size {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, offset := range h.offsets[hash] {
|
||||
_, err := h.file.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !iterFunc(offset) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Close releases any resource associated with the finder
|
||||
func (h *Finder) Close() {
|
||||
if h != nil {
|
||||
h.file.Close()
|
||||
}
|
||||
}
|
||||
188
lib/weakhash/weakhash_test.go
Normal file
188
lib/weakhash/weakhash_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// The existence of this file means we get 0% test coverage rather than no
|
||||
// test coverage at all. Remove when implementing an actual test.
|
||||
|
||||
package weakhash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var payload = []byte("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz")
|
||||
var hashes = []uint32{
|
||||
64225674,
|
||||
64881038,
|
||||
65536402,
|
||||
66191766,
|
||||
66847130,
|
||||
67502494,
|
||||
68157858,
|
||||
68813222,
|
||||
69468586,
|
||||
70123950,
|
||||
70779314,
|
||||
71434678,
|
||||
72090042,
|
||||
72745406,
|
||||
73400770,
|
||||
74056134,
|
||||
74711498,
|
||||
75366862,
|
||||
76022226,
|
||||
76677590,
|
||||
77332954,
|
||||
77988318,
|
||||
78643682,
|
||||
77595084,
|
||||
74842550,
|
||||
70386080,
|
||||
64225674,
|
||||
64881038,
|
||||
65536402,
|
||||
66191766,
|
||||
66847130,
|
||||
67502494,
|
||||
68157858,
|
||||
68813222,
|
||||
69468586,
|
||||
70123950,
|
||||
70779314,
|
||||
71434678,
|
||||
72090042,
|
||||
72745406,
|
||||
73400770,
|
||||
74056134,
|
||||
74711498,
|
||||
75366862,
|
||||
76022226,
|
||||
76677590,
|
||||
77332954,
|
||||
77988318,
|
||||
78643682,
|
||||
77595084,
|
||||
74842550,
|
||||
70386080,
|
||||
64225674,
|
||||
64881038,
|
||||
65536402,
|
||||
66191766,
|
||||
66847130,
|
||||
67502494,
|
||||
68157858,
|
||||
68813222,
|
||||
69468586,
|
||||
70123950,
|
||||
70779314,
|
||||
71434678,
|
||||
72090042,
|
||||
72745406,
|
||||
73400770,
|
||||
74056134,
|
||||
74711498,
|
||||
75366862,
|
||||
76022226,
|
||||
76677590,
|
||||
77332954,
|
||||
77988318,
|
||||
78643682,
|
||||
77595084,
|
||||
74842550,
|
||||
70386080,
|
||||
64225674,
|
||||
64881038,
|
||||
65536402,
|
||||
66191766,
|
||||
66847130,
|
||||
67502494,
|
||||
68157858,
|
||||
68813222,
|
||||
69468586,
|
||||
70123950,
|
||||
70779314,
|
||||
71434678,
|
||||
72090042,
|
||||
72745406,
|
||||
73400770,
|
||||
74056134,
|
||||
74711498,
|
||||
75366862,
|
||||
76022226,
|
||||
76677590,
|
||||
77332954,
|
||||
77988318,
|
||||
78643682,
|
||||
71893365,
|
||||
71893365,
|
||||
}
|
||||
|
||||
// Tested using an alternative C implementation at https://gist.github.com/csabahenk/1096262
|
||||
func TestHashCorrect(t *testing.T) {
|
||||
h := NewHash(Size)
|
||||
pos := 0
|
||||
for pos < Size {
|
||||
h.Write([]byte{payload[pos]})
|
||||
pos++
|
||||
}
|
||||
|
||||
for i := 0; pos < len(payload); i++ {
|
||||
if h.Sum32() != hashes[i] {
|
||||
t.Errorf("mismatch at %d", i)
|
||||
}
|
||||
h.Write([]byte{payload[pos]})
|
||||
pos++
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinder(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
|
||||
if _, err := f.Write(payload); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
hashes := []uint32{64881038, 65536402}
|
||||
finder, err := NewFinder(f.Name(), 4, hashes)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer finder.Close()
|
||||
|
||||
expected := map[uint32][]int64{
|
||||
64881038: []int64{1, 27, 53, 79},
|
||||
65536402: []int64{2, 28, 54, 80},
|
||||
}
|
||||
actual := make(map[uint32][]int64)
|
||||
|
||||
b := make([]byte, Size)
|
||||
|
||||
for _, hash := range hashes {
|
||||
_, err := finder.Iterate(hash, b[:4], func(offset int64) bool {
|
||||
if !bytes.Equal(b, payload[offset:offset+4]) {
|
||||
t.Errorf("Not equal at %d: %s != %s", offset, string(b), string(payload[offset:offset+4]))
|
||||
}
|
||||
actual[hash] = append(actual[hash], offset)
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("Not equal: %#v != %#v", actual, expected)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STDISCOSRV" "1" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "STDISCOSRV" "1" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
stdiscosrv \- Syncthing Discovery Server
|
||||
.
|
||||
@@ -245,7 +245,7 @@ ssl_verify_client optional_no_ca;
|
||||
.UNINDENT
|
||||
.sp
|
||||
The following is a complete example Nginx configuration file. With this setup,
|
||||
clients can use \fI\%https://discovery.mydomain.com\fP as the discovery server URL in
|
||||
clients can use \fI\%https://discovery.example.com\fP as the discovery server URL in
|
||||
the Syncthing settings.
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
@@ -262,18 +262,18 @@ proxy_set_header X\-Real\-IP $remote_addr;
|
||||
proxy_set_header X\-Forwarded\-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X\-Forwarded\-Proto $proxy_x_forwarded_proto;
|
||||
proxy_set_header X\-SSL\-Cert $ssl_client_cert;
|
||||
upstream discovery.mydomain.com {
|
||||
upstream discovery.example.com {
|
||||
# Local IP address:port for discovery server
|
||||
server 172.17.0.6:8443;
|
||||
server 192.0.2.1:8443;
|
||||
}
|
||||
server {
|
||||
server_name discovery.mydomain.com;
|
||||
server_name discovery.example.com;
|
||||
listen 80;
|
||||
access_log /var/log/nginx/access.log vhost;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
server {
|
||||
server_name discovery.mydomain.com;
|
||||
server_name discovery.example.com;
|
||||
listen 443 ssl http2;
|
||||
access_log /var/log/nginx/access.log vhost;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
|
||||
@@ -281,13 +281,13 @@ server {
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_timeout 5m;
|
||||
ssl_session_cache shared:SSL:50m;
|
||||
ssl_certificate /etc/nginx/certs/discovery.mydomain.com.crt;
|
||||
ssl_certificate_key /etc/nginx/certs/discovery.mydomain.com.key;
|
||||
ssl_dhparam /etc/nginx/certs/discovery.mydomain.com.dhparam.pem;
|
||||
ssl_certificate /etc/nginx/certs/discovery.example.com.crt;
|
||||
ssl_certificate_key /etc/nginx/certs/discovery.example.com.key;
|
||||
ssl_dhparam /etc/nginx/certs/discovery.example.com.dhparam.pem;
|
||||
add_header Strict\-Transport\-Security "max\-age=31536000";
|
||||
ssl_verify_client optional_no_ca;
|
||||
location / {
|
||||
proxy_pass http://discovery.mydomain.com;
|
||||
proxy_pass http://discovery.example.com;
|
||||
}
|
||||
}
|
||||
.ft P
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STRELAYSRV" "1" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "STRELAYSRV" "1" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
strelaysrv \- Syncthing Relay Server
|
||||
.
|
||||
@@ -48,7 +48,7 @@ strelaysrv [\-debug] [\-ext\-address=<address>] [\-global\-rate=<bytes/s>] [\-ke
|
||||
Syncthing relies on a network of community\-contributed relay servers. Anyone
|
||||
can run a relay server, and it will automatically join the relay pool and be
|
||||
available to Syncthing users. The current list of relays can be found at
|
||||
\fI\%https://relays.syncthing.net\fP\&.
|
||||
\fI\%http://relays.syncthing.net/\fP\&.
|
||||
.SH OPTIONS
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
@@ -105,7 +105,7 @@ How often pings are sent (default 1m0s).
|
||||
.TP
|
||||
.B \-pools=<pool addresses>
|
||||
Comma separated list of relay pool addresses to join (default
|
||||
"\fI\%https://relays.syncthing.net/endpoint\fP"). Blank to disable announcement to
|
||||
"\fI\%http://relays.syncthing.net/endpoint\fP"). Blank to disable announcement to
|
||||
a pool, thereby remaining a private relay.
|
||||
.UNINDENT
|
||||
.INDENT 0.0
|
||||
@@ -196,7 +196,7 @@ although your milage may vary.
|
||||
.SH FIREWALL CONSIDERATIONS
|
||||
.sp
|
||||
The relay server listens on two ports by default. One for data connections and the other
|
||||
for providing public statistics at \fI\%https://relays.syncthing.net\fP\&. The firewall, such as
|
||||
for providing public statistics at \fI\%http://relays.syncthing.net/\fP\&. The firewall, such as
|
||||
\fBiptables\fP, must permit incoming TCP connetions to the following ports:
|
||||
.INDENT 0.0
|
||||
.IP \(bu 2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
@@ -123,7 +123,7 @@ MFZWI3DBONSGYYLTMRWGC43ENRQXGZDMMFZWI3DBONSGYYLTMRWA====
|
||||
.sp
|
||||
The padding (\fB====\fP) is stripped away, the device ID split into four
|
||||
groups, and \fI\%check
|
||||
digits\fP <\fBhttps://forum.syncthing.net/t/v0-9-0-new-device-id-format/478\fP>
|
||||
digits\fP <\fBhttps://forum.syncthing.net/t/v0-9-0-new-node-id-format/478\fP>
|
||||
are added for each group. For presentation purposes the device ID is
|
||||
grouped with dashes, resulting in the final value:
|
||||
.INDENT 0.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
@@ -113,12 +113,12 @@ itself.
|
||||
"id": 50,
|
||||
"type": "ConfigSaved",
|
||||
"time": "2014\-12\-13T00:09:13.5166486Z",
|
||||
"data":{
|
||||
"data": {
|
||||
"Version": 7,
|
||||
"Options": { ... },
|
||||
"GUI": { ... },
|
||||
"Devices": [ ... ],
|
||||
"Folders": [ ... ]
|
||||
"Options": {"..."},
|
||||
"GUI": {"..."},
|
||||
"Devices": [{"..."}],
|
||||
"Folders": [{"..."}]
|
||||
}
|
||||
}
|
||||
.ft P
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
@@ -56,10 +56,10 @@ Phone, Amazon Kindle Fire and BSD. [1] Syncthing is an open source file
|
||||
synchronization tool.
|
||||
.sp
|
||||
Syncthing uses an open and documented protocol, and likewise the security
|
||||
mechanisms in use are well defined and visible in the source code. BitTorrent
|
||||
mechanisms in use are well defined and visible in the source code. Resilio
|
||||
Sync uses an undocumented, closed protocol with unknown security properties.
|
||||
.IP [1] 5
|
||||
\fI\%http://en.wikipedia.org/wiki/BitTorrent_Sync\fP
|
||||
\fI\%https://en.wikipedia.org/wiki/Resilio_Sync\fP
|
||||
.SH USAGE
|
||||
.SS What things are synced?
|
||||
.sp
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "December 17, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "December 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v4
|
||||
.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user