mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-05 04:19:10 -05:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03e17ec1dd | ||
|
|
892e470ba5 | ||
|
|
f8b835c6be | ||
|
|
5fab25effd | ||
|
|
d746b043c8 | ||
|
|
beb643c666 | ||
|
|
d039df46c0 | ||
|
|
b7e5948b09 | ||
|
|
e3dd072022 | ||
|
|
b98a8bafb3 | ||
|
|
4b5c44b61c | ||
|
|
41c131d840 | ||
|
|
66df2e57ae | ||
|
|
4a6e9c189c | ||
|
|
2eece95ed2 | ||
|
|
008b07a52f | ||
|
|
4fd01ae552 | ||
|
|
8e6cf4bf4e | ||
|
|
ace8451604 | ||
|
|
feffc0416f | ||
|
|
42dfa45d52 | ||
|
|
706926543e | ||
|
|
a024cefd35 | ||
|
|
42dcb3d2cc | ||
|
|
ed1852f8f6 |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,6 +1,4 @@
|
||||
syncthing
|
||||
!gui/syncthing
|
||||
!Godeps/_workspace/src/github.com/syncthing
|
||||
./syncthing
|
||||
syncthing.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
@@ -11,6 +9,8 @@ files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
syncthing.sig
|
||||
!gui/scripts/syncthing
|
||||
syncthing.md5
|
||||
syncthing.exe.md5
|
||||
RELEASE
|
||||
deb
|
||||
|
||||
6
AUTHORS
6
AUTHORS
@@ -36,7 +36,6 @@ Frank Isemann <frank@isemann.name>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jacek Szafarkiewicz <szafar@linux.pl>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
Jake Peterson <jake@acogdev.com>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec <dzardacz@gmail.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
@@ -49,8 +48,7 @@ Lord Landon Agahnim <lordlandon@gmail.com>
|
||||
Marc Laporte <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol <kilburn@la3.org>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Mateusz Naściszewski <matin1111@wp.pl>
|
||||
Matt Burke <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Burke <mburke@amplify.com>
|
||||
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
@@ -61,11 +59,9 @@ Piotr Bejda <piotrb10@gmail.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Sergey Mishin <ralder@yandex.ru>
|
||||
Stefan Tatschner <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Stefan Kuntz <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Tim Abell <tim@timwise.co.uk>
|
||||
Tobias Nygren <tnn@nygren.pp.se>
|
||||
Tomas Cerveny <kozec@kozec.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
Yannic A. <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
|
||||
28
Godeps/Godeps.json
generated
28
Godeps/Godeps.json
generated
@@ -1,25 +1,29 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.5.1",
|
||||
"GoVersion": "go1.5",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "74ddf82598bc4745b965729e9c6a463bedd33049"
|
||||
"Rev": "d47913b1412890a261b9fefae99d72d2bf5aebd8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/du",
|
||||
"Rev": "3c0690cca16228b97741327b1b6781397afbdb24"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
"Rev": "c96f6a1a8c7b6bf2f4860c667867d90174799eb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/luhn",
|
||||
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "47c0042d09a827b81ee62497f99e5e0c7f0bd31c"
|
||||
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
@@ -33,9 +37,13 @@
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "388a29bbe21d8772ee4c29f4520aa8040309607d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
"Rev": "b743d92d3215f11c9b5ce8830fafe1f16786adf4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/thejerf/suture",
|
||||
@@ -56,23 +64,19 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "81bf7719a6b7ce9b665598222362b50122dfc13b"
|
||||
"Rev": "c16968172724c0b5e8bdc6ad33f5a79443a44cd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "81bf7719a6b7ce9b665598222362b50122dfc13b"
|
||||
"Rev": "c16968172724c0b5e8bdc6ad33f5a79443a44cd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/iana",
|
||||
"Rev": "4b709d93778b93d2f34943e3142c71578d83ad31"
|
||||
"Rev": "66f0418ca41253f8d1a024eb9754e9441a8e79b9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv6",
|
||||
"Rev": "4b709d93778b93d2f34943e3142c71578d83ad31"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/proxy",
|
||||
"Rev": "4b709d93778b93d2f34943e3142c71578d83ad31"
|
||||
"Rev": "66f0418ca41253f8d1a024eb9754e9441a8e79b9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
|
||||
3
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
3
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
@@ -4,6 +4,5 @@ go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
0
lib/logger/LICENSE → Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
0
lib/logger/LICENSE → Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
logger
|
||||
======
|
||||
|
||||
A small wrapper around `log` to provide log levels.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/logger
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
187
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
187
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type LogLevel int
|
||||
|
||||
const (
|
||||
LevelDebug LogLevel = iota
|
||||
LevelVerbose
|
||||
LevelInfo
|
||||
LevelOK
|
||||
LevelWarn
|
||||
LevelFatal
|
||||
NumLevels
|
||||
)
|
||||
|
||||
// A MessageHandler is called with the log level and message text.
|
||||
type MessageHandler func(l LogLevel, msg string)
|
||||
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
handlers [NumLevels][]MessageHandler
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// The default logger logs to standard output with a time prefix.
|
||||
var DefaultLogger = New()
|
||||
|
||||
func New() *Logger {
|
||||
if os.Getenv("LOGGER_DISCARD") != "" {
|
||||
// Hack to completely disable logging, for example when running benchmarks.
|
||||
return &Logger{
|
||||
logger: log.New(ioutil.Discard, "", 0),
|
||||
}
|
||||
}
|
||||
|
||||
return &Logger{
|
||||
logger: log.New(os.Stdout, "", log.Ltime),
|
||||
}
|
||||
}
|
||||
|
||||
// AddHandler registers a new MessageHandler to receive messages with the
|
||||
// specified log level or above.
|
||||
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.handlers[level] = append(l.handlers[level], h)
|
||||
}
|
||||
|
||||
// See log.SetFlags
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.logger.SetFlags(flag)
|
||||
}
|
||||
|
||||
// See log.SetPrefix
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.logger.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
func (l *Logger) callHandlers(level LogLevel, s string) {
|
||||
for _, h := range l.handlers[level] {
|
||||
h(level, strings.TrimSpace(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln logs a line with a DEBUG prefix.
|
||||
func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted line with a DEBUG prefix.
|
||||
func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with a VERBOSE prefix.
|
||||
func (l *Logger) Verboseln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "VERBOSE: "+s)
|
||||
l.callHandlers(LevelVerbose, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with a VERBOSE prefix.
|
||||
func (l *Logger) Verbosef(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "VERBOSE: "+s)
|
||||
l.callHandlers(LevelVerbose, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with an INFO prefix.
|
||||
func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with an INFO prefix.
|
||||
func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Okln logs a line with an OK prefix.
|
||||
func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Okf logs a formatted line with an OK prefix.
|
||||
func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Warnln logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Fatalln logs a line with a FATAL prefix and exits the process with exit
|
||||
// code 1.
|
||||
func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
|
||||
// exit code 1.
|
||||
func (l *Logger) Fatalf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
l := New()
|
||||
l.SetFlags(0)
|
||||
l.SetPrefix("testing")
|
||||
|
||||
debug := 0
|
||||
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
|
||||
info := 0
|
||||
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
|
||||
warn := 0
|
||||
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
|
||||
ok := 0
|
||||
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
|
||||
|
||||
l.Debugf("test %d", 0)
|
||||
l.Debugln("test", 0)
|
||||
l.Infof("test %d", 1)
|
||||
l.Infoln("test", 1)
|
||||
l.Warnf("test %d", 2)
|
||||
l.Warnln("test", 2)
|
||||
l.Okf("test %d", 3)
|
||||
l.Okln("test", 3)
|
||||
|
||||
if debug != 2 {
|
||||
t.Errorf("Debug handler called %d != 2 times", debug)
|
||||
}
|
||||
if info != 2 {
|
||||
t.Errorf("Info handler called %d != 2 times", info)
|
||||
}
|
||||
if warn != 2 {
|
||||
t.Errorf("Warn handler called %d != 2 times", warn)
|
||||
}
|
||||
if ok != 2 {
|
||||
t.Errorf("Ok handler called %d != 2 times", ok)
|
||||
}
|
||||
}
|
||||
|
||||
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
|
||||
return func(l LogLevel, msg string) {
|
||||
*counter++
|
||||
if l != expectl {
|
||||
t.Errorf("Incorrect message level %d != %d", l, expectl)
|
||||
}
|
||||
if !strings.HasSuffix(msg, expectmsg) {
|
||||
t.Errorf("%q does not end with %q", msg, expectmsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
2
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
2
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
@@ -4,7 +4,7 @@ go:
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get golang.org/x/tools/cover
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
|
||||
2
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
2
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[](https://circleci.com/gh/calmh/xdr)
|
||||
[](https://travis-ci.org/calmh/xdr)
|
||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
37
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
37
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -28,7 +28,6 @@ type fieldInfo struct {
|
||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||
Max int // max size for slices and strings
|
||||
Submax int // max size for strings inside slices
|
||||
}
|
||||
|
||||
type structInfo struct {
|
||||
@@ -157,11 +156,7 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Submax 1}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Submax}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
@@ -171,7 +166,7 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`(?:\Wmax:)(\d+)(?:\s*,\s*(\d+))?`)
|
||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
||||
|
||||
type typeSet struct {
|
||||
Type string
|
||||
@@ -203,15 +198,11 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
}
|
||||
|
||||
fn := sf.Names[0].Name
|
||||
var max1, max2 int
|
||||
var max = 0
|
||||
if sf.Comment != nil {
|
||||
c := sf.Comment.List[0].Text
|
||||
m := maxRe.FindStringSubmatch(c)
|
||||
if len(m) >= 2 {
|
||||
max1, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if len(m) >= 3 {
|
||||
max2, _ = strconv.Atoi(m[2])
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if strings.Contains(c, "noencode") {
|
||||
continue
|
||||
@@ -229,16 +220,14 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,8 +245,7 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
@@ -267,16 +255,14 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,8 +270,7 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
FieldType: ft.Sel.Name,
|
||||
Max: max1,
|
||||
Submax: max2,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
0
lib/protocol/AUTHORS → Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
0
lib/protocol/AUTHORS → Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
0
lib/protocol/LICENSE → Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
0
lib/protocol/LICENSE → Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
0
lib/protocol/doc.go → Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
0
lib/protocol/doc.go → Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
8
lib/protocol/errors.go → Godeps/_workspace/src/github.com/syncthing/protocol/errors.go
generated
vendored
Executable file → Normal file
8
lib/protocol/errors.go → Godeps/_workspace/src/github.com/syncthing/protocol/errors.go
generated
vendored
Executable file → Normal file
@@ -14,10 +14,10 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoError error
|
||||
ErrGeneric = errors.New("generic error")
|
||||
ErrNoSuchFile = errors.New("no such file")
|
||||
ErrInvalid = errors.New("file is invalid")
|
||||
ErrNoError error = nil
|
||||
ErrGeneric = errors.New("generic error")
|
||||
ErrNoSuchFile = errors.New("no such file")
|
||||
ErrInvalid = errors.New("file is invalid")
|
||||
)
|
||||
|
||||
var lookupError = map[int32]error{
|
||||
0
lib/protocol/fuzz.go → Godeps/_workspace/src/github.com/syncthing/protocol/fuzz.go
generated
vendored
0
lib/protocol/fuzz.go → Godeps/_workspace/src/github.com/syncthing/protocol/fuzz.go
generated
vendored
20
lib/protocol/message.go → Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
20
lib/protocol/message.go → Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
@@ -1,6 +1,6 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
//go:generate -command genxdr go run ../../Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
|
||||
//go:generate -command genxdr go run ../syncthing/Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
|
||||
//go:generate genxdr -o message_xdr.go message.go
|
||||
|
||||
package protocol
|
||||
@@ -8,7 +8,7 @@ package protocol
|
||||
import "fmt"
|
||||
|
||||
type IndexMessage struct {
|
||||
Folder string // max:256
|
||||
Folder string
|
||||
Files []FileInfo // max:1000000
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
@@ -20,7 +20,6 @@ type FileInfo struct {
|
||||
Modified int64
|
||||
Version Vector
|
||||
LocalVersion int64
|
||||
CachedSize int64 // noencode (cache only)
|
||||
Blocks []BlockInfo // max:1000000
|
||||
}
|
||||
|
||||
@@ -33,13 +32,9 @@ func (f FileInfo) Size() (bytes int64) {
|
||||
if f.IsDeleted() || f.IsDirectory() {
|
||||
return 128
|
||||
}
|
||||
if f.CachedSize > 0 {
|
||||
return f.CachedSize
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
bytes += int64(b.Size)
|
||||
}
|
||||
f.CachedSize = bytes
|
||||
return
|
||||
}
|
||||
|
||||
@@ -99,7 +94,7 @@ func (b BlockInfo) String() string {
|
||||
}
|
||||
|
||||
type RequestMessage struct {
|
||||
Folder string // max:256
|
||||
Folder string // max:64
|
||||
Name string // max:8192
|
||||
Offset int64
|
||||
Size int32
|
||||
@@ -114,7 +109,6 @@ type ResponseMessage struct {
|
||||
}
|
||||
|
||||
type ClusterConfigMessage struct {
|
||||
DeviceName string // max:64
|
||||
ClientName string // max:64
|
||||
ClientVersion string // max:64
|
||||
Folders []Folder // max:1000000
|
||||
@@ -131,18 +125,14 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
ID string // max:256
|
||||
ID string // max:64
|
||||
Devices []Device // max:1000000
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
ID []byte // max:32
|
||||
Name string // max:64
|
||||
Addresses []string // max:64,2083
|
||||
Compression uint32
|
||||
CertName string // max:64
|
||||
ID []byte // max:32
|
||||
MaxLocalVersion int64
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
@@ -41,7 +41,7 @@ IndexMessage Structure:
|
||||
|
||||
|
||||
struct IndexMessage {
|
||||
string Folder<256>;
|
||||
string Folder<>;
|
||||
FileInfo Files<1000000>;
|
||||
unsigned int Flags;
|
||||
Option Options<64>;
|
||||
@@ -74,9 +74,6 @@ func (o IndexMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o IndexMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Folder); l > 256 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 256)
|
||||
}
|
||||
xw.WriteString(o.Folder)
|
||||
if l := len(o.Files); l > 1000000 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Files", l, 1000000)
|
||||
@@ -114,7 +111,7 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *IndexMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadStringMax(256)
|
||||
o.Folder = xr.ReadString()
|
||||
_FilesSize := int(xr.ReadUint32())
|
||||
if _FilesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Files", _FilesSize, 1000000)
|
||||
@@ -383,7 +380,7 @@ RequestMessage Structure:
|
||||
|
||||
|
||||
struct RequestMessage {
|
||||
string Folder<256>;
|
||||
string Folder<64>;
|
||||
string Name<8192>;
|
||||
hyper Offset;
|
||||
int Size;
|
||||
@@ -419,8 +416,8 @@ func (o RequestMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o RequestMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Folder); l > 256 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 256)
|
||||
if l := len(o.Folder); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Folder)
|
||||
if l := len(o.Name); l > 8192 {
|
||||
@@ -459,7 +456,7 @@ func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *RequestMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadStringMax(256)
|
||||
o.Folder = xr.ReadStringMax(64)
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Offset = int64(xr.ReadUint64())
|
||||
o.Size = int32(xr.ReadUint32())
|
||||
@@ -557,12 +554,6 @@ ClusterConfigMessage Structure:
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Device Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Device Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Client Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
@@ -590,7 +581,6 @@ ClusterConfigMessage Structure:
|
||||
|
||||
|
||||
struct ClusterConfigMessage {
|
||||
string DeviceName<64>;
|
||||
string ClientName<64>;
|
||||
string ClientVersion<64>;
|
||||
Folder Folders<1000000>;
|
||||
@@ -624,10 +614,6 @@ func (o ClusterConfigMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.DeviceName); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("DeviceName", l, 64)
|
||||
}
|
||||
xw.WriteString(o.DeviceName)
|
||||
if l := len(o.ClientName); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ClientName", l, 64)
|
||||
}
|
||||
@@ -671,7 +657,6 @@ func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.DeviceName = xr.ReadStringMax(64)
|
||||
o.ClientName = xr.ReadStringMax(64)
|
||||
o.ClientVersion = xr.ReadStringMax(64)
|
||||
_FoldersSize := int(xr.ReadUint32())
|
||||
@@ -729,7 +714,7 @@ Folder Structure:
|
||||
|
||||
|
||||
struct Folder {
|
||||
string ID<256>;
|
||||
string ID<64>;
|
||||
Device Devices<1000000>;
|
||||
unsigned int Flags;
|
||||
Option Options<64>;
|
||||
@@ -762,8 +747,8 @@ func (o Folder) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o Folder) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.ID); l > 256 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 256)
|
||||
if l := len(o.ID); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ID)
|
||||
if l := len(o.Devices); l > 1000000 {
|
||||
@@ -802,7 +787,7 @@ func (o *Folder) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *Folder) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadStringMax(256)
|
||||
o.ID = xr.ReadStringMax(64)
|
||||
_DevicesSize := int(xr.ReadUint32())
|
||||
if _DevicesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Devices", _DevicesSize, 1000000)
|
||||
@@ -842,28 +827,6 @@ Device Structure:
|
||||
\ ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Addresses |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Addresses |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Addresses (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Compression |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Cert Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Cert Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Max Local Version (64 bits) +
|
||||
| |
|
||||
@@ -880,10 +843,6 @@ Device Structure:
|
||||
|
||||
struct Device {
|
||||
opaque ID<32>;
|
||||
string Name<64>;
|
||||
string Addresses<64>;
|
||||
unsigned int Compression;
|
||||
string CertName<64>;
|
||||
hyper MaxLocalVersion;
|
||||
unsigned int Flags;
|
||||
Option Options<64>;
|
||||
@@ -920,22 +879,6 @@ func (o Device) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
|
||||
}
|
||||
xw.WriteBytes(o.ID)
|
||||
if l := len(o.Name); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
if l := len(o.Addresses); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Addresses", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Addresses)))
|
||||
for i := range o.Addresses {
|
||||
xw.WriteString(o.Addresses[i])
|
||||
}
|
||||
xw.WriteUint32(o.Compression)
|
||||
if l := len(o.CertName); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("CertName", l, 64)
|
||||
}
|
||||
xw.WriteString(o.CertName)
|
||||
xw.WriteUint64(uint64(o.MaxLocalVersion))
|
||||
xw.WriteUint32(o.Flags)
|
||||
if l := len(o.Options); l > 64 {
|
||||
@@ -964,20 +907,6 @@ func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||
|
||||
func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
o.Name = xr.ReadStringMax(64)
|
||||
_AddressesSize := int(xr.ReadUint32())
|
||||
if _AddressesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Addresses", _AddressesSize, 64)
|
||||
}
|
||||
if _AddressesSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Addresses", _AddressesSize, 64)
|
||||
}
|
||||
o.Addresses = make([]string, _AddressesSize)
|
||||
for i := range o.Addresses {
|
||||
o.Addresses[i] = xr.ReadStringMax(2083)
|
||||
}
|
||||
o.Compression = xr.ReadUint32()
|
||||
o.CertName = xr.ReadStringMax(64)
|
||||
o.MaxLocalVersion = int64(xr.ReadUint64())
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
159
lib/protocol/protocol.go → Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
159
lib/protocol/protocol.go → Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
@@ -15,10 +15,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the standard ata block size (128 KiB)
|
||||
// Data block size (128 KiB)
|
||||
BlockSize = 128 << 10
|
||||
|
||||
// MaxMessageLen is the largest message size allowed on the wire. (64 MiB)
|
||||
// We reject messages larger than this when encountered on the wire. (64 MiB)
|
||||
MaxMessageLen = 64 << 20
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ const (
|
||||
messageTypeRequest = 2
|
||||
messageTypeResponse = 3
|
||||
messageTypePing = 4
|
||||
messageTypePong = 5
|
||||
messageTypeIndexUpdate = 6
|
||||
messageTypeClose = 7
|
||||
)
|
||||
@@ -61,13 +62,6 @@ const (
|
||||
FlagRequestTemporary uint32 = 1 << iota
|
||||
)
|
||||
|
||||
// ClusterConfigMessage.Folders flags
|
||||
const (
|
||||
FlagFolderReadOnly uint32 = 1 << 0
|
||||
FlagFolderIgnorePerms = 1 << 1
|
||||
FlagFolderIgnoreDelete = 1 << 2
|
||||
)
|
||||
|
||||
// ClusterConfigMessage.Folders.Devices flags
|
||||
const (
|
||||
FlagShareTrusted uint32 = 1 << 0
|
||||
@@ -77,12 +71,13 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrClosed = errors.New("connection closed")
|
||||
ErrTimeout = errors.New("read timeout")
|
||||
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
|
||||
ErrClosed = errors.New("connection closed")
|
||||
)
|
||||
|
||||
// Specific variants of empty messages...
|
||||
type pingMessage struct{ EmptyMessage }
|
||||
type pongMessage struct{ EmptyMessage }
|
||||
|
||||
type Model interface {
|
||||
// An index was received from the peer device
|
||||
@@ -151,13 +146,9 @@ type isEofer interface {
|
||||
IsEOF() bool
|
||||
}
|
||||
|
||||
const (
|
||||
// PingSendInterval is how often we make sure to send a message, by
|
||||
// triggering pings if necessary.
|
||||
PingSendInterval = 90 * time.Second
|
||||
// ReceiveTimeout is the longest we'll wait for a message from the other
|
||||
// side before closing the connection.
|
||||
ReceiveTimeout = 300 * time.Second
|
||||
var (
|
||||
PingTimeout = 30 * time.Second
|
||||
PingIdleTime = 60 * time.Second
|
||||
)
|
||||
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
|
||||
@@ -189,8 +180,7 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiv
|
||||
func (c *rawConnection) Start() {
|
||||
go c.readerLoop()
|
||||
go c.writerLoop()
|
||||
go c.pingSender()
|
||||
go c.pingReceiver()
|
||||
go c.pingerLoop()
|
||||
go c.idGenerator()
|
||||
}
|
||||
|
||||
@@ -288,7 +278,18 @@ func (c *rawConnection) ping() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return c.send(id, messageTypePing, nil, nil)
|
||||
rc := make(chan asyncResult, 1)
|
||||
c.awaitingMut.Lock()
|
||||
c.awaiting[id] = rc
|
||||
c.awaitingMut.Unlock()
|
||||
|
||||
ok := c.send(id, messageTypePing, nil, nil)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
res, ok := <-rc
|
||||
return ok && res.err == nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) readerLoop() (err error) {
|
||||
@@ -351,7 +352,13 @@ func (c *rawConnection) readerLoop() (err error) {
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: ping message in state %d", state)
|
||||
}
|
||||
// Nothing
|
||||
c.send(hdr.msgID, messageTypePong, pongMessage{}, nil)
|
||||
|
||||
case pongMessage:
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: pong message in state %d", state)
|
||||
}
|
||||
c.handlePong(hdr.msgID)
|
||||
|
||||
case CloseMessage:
|
||||
return errors.New(msg.Reason)
|
||||
@@ -376,7 +383,9 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4]))
|
||||
msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8]))
|
||||
|
||||
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
|
||||
if debug {
|
||||
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
|
||||
}
|
||||
|
||||
if msglen > MaxMessageLen {
|
||||
err = fmt.Errorf("message length %d exceeds maximum %d", msglen, MaxMessageLen)
|
||||
@@ -398,7 +407,9 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugf("read %d bytes", len(c.rdbuf0))
|
||||
if debug {
|
||||
l.Debugf("read %d bytes", len(c.rdbuf0))
|
||||
}
|
||||
|
||||
msgBuf := c.rdbuf0
|
||||
if hdr.compression && msglen > 0 {
|
||||
@@ -408,10 +419,12 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
return
|
||||
}
|
||||
msgBuf = c.rdbuf1
|
||||
l.Debugf("decompressed to %d bytes", len(msgBuf))
|
||||
if debug {
|
||||
l.Debugf("decompressed to %d bytes", len(msgBuf))
|
||||
}
|
||||
}
|
||||
|
||||
if shouldDebug() {
|
||||
if debug {
|
||||
if len(msgBuf) > 1024 {
|
||||
l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024]))
|
||||
} else {
|
||||
@@ -454,6 +467,9 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
case messageTypePing:
|
||||
msg = pingMessage{}
|
||||
|
||||
case messageTypePong:
|
||||
msg = pongMessage{}
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
var cc ClusterConfigMessage
|
||||
err = cc.UnmarshalXDR(msgBuf)
|
||||
@@ -478,12 +494,16 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndex(im IndexMessage) {
|
||||
l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
|
||||
if debug {
|
||||
l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
|
||||
}
|
||||
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
|
||||
l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
|
||||
if debug {
|
||||
l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
|
||||
}
|
||||
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
|
||||
}
|
||||
|
||||
@@ -633,7 +653,9 @@ func (c *rawConnection) writerLoop() {
|
||||
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf)))
|
||||
msgBuf = msgBuf[0 : len(tempBuf)+8]
|
||||
|
||||
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
|
||||
if debug {
|
||||
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
|
||||
}
|
||||
} else {
|
||||
// No point in compressing very short messages
|
||||
hm.hdr.compression = false
|
||||
@@ -647,10 +669,14 @@ func (c *rawConnection) writerLoop() {
|
||||
msgBuf = msgBuf[0 : len(uncBuf)+8]
|
||||
copy(msgBuf[8:], uncBuf)
|
||||
|
||||
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
|
||||
if debug {
|
||||
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
l.Debugf("write empty message; %v", hm.hdr)
|
||||
if debug {
|
||||
l.Debugf("write empty message; %v", hm.hdr)
|
||||
}
|
||||
binary.BigEndian.PutUint32(msgBuf[4:8], 0)
|
||||
msgBuf = msgBuf[:8]
|
||||
}
|
||||
@@ -660,7 +686,9 @@ func (c *rawConnection) writerLoop() {
|
||||
if err == nil {
|
||||
var n int
|
||||
n, err = c.cw.Write(msgBuf)
|
||||
l.Debugf("wrote %d bytes on the wire", n)
|
||||
if debug {
|
||||
l.Debugf("wrote %d bytes on the wire", n)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
@@ -701,48 +729,43 @@ func (c *rawConnection) idGenerator() {
|
||||
}
|
||||
}
|
||||
|
||||
// The pingSender makes sure that we've sent a message within the last
|
||||
// PingSendInterval. If we already have something sent in the last
|
||||
// PingSendInterval/2, we do nothing. Otherwise we send a ping message. This
|
||||
// results in an effecting ping interval of somewhere between
|
||||
// PingSendInterval/2 and PingSendInterval.
|
||||
func (c *rawConnection) pingSender() {
|
||||
ticker := time.Tick(PingSendInterval / 2)
|
||||
|
||||
func (c *rawConnection) pingerLoop() {
|
||||
var rc = make(chan bool, 1)
|
||||
ticker := time.Tick(PingIdleTime / 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
d := time.Since(c.cw.Last())
|
||||
if d < PingSendInterval/2 {
|
||||
l.Debugln(c.id, "ping skipped after wr", d)
|
||||
if d := time.Since(c.cr.Last()); d < PingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after rd", d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
l.Debugln(c.id, "ping -> after", d)
|
||||
c.ping()
|
||||
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The pingReciever checks that we've received a message (any message will do,
|
||||
// but we expect pings in the absence of other messages) within the last
|
||||
// ReceiveTimeout. If not, we close the connection with an ErrTimeout.
|
||||
func (c *rawConnection) pingReceiver() {
|
||||
ticker := time.Tick(ReceiveTimeout / 2)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
d := time.Since(c.cr.Last())
|
||||
if d > ReceiveTimeout {
|
||||
l.Debugln(c.id, "ping timeout", d)
|
||||
c.close(ErrTimeout)
|
||||
if d := time.Since(c.cw.Last()); d < PingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after wr", d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping ->")
|
||||
}
|
||||
rc <- c.ping()
|
||||
}()
|
||||
select {
|
||||
case ok := <-rc:
|
||||
if debug {
|
||||
l.Debugln(c.id, "<- pong")
|
||||
}
|
||||
if !ok {
|
||||
c.close(fmt.Errorf("ping failure"))
|
||||
}
|
||||
case <-time.After(PingTimeout):
|
||||
c.close(fmt.Errorf("ping timeout"))
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugln(c.id, "last read within", d)
|
||||
|
||||
case <-c.closed:
|
||||
return
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -20,21 +20,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
c0ID = NewDeviceID([]byte{1})
|
||||
c1ID = NewDeviceID([]byte{2})
|
||||
quickCfg = &quick.Config{}
|
||||
c0ID = NewDeviceID([]byte{1})
|
||||
c1ID = NewDeviceID([]byte{2})
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if flag.Lookup("test.short").Value.String() != "false" {
|
||||
quickCfg.MaxCount = 10
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestHeaderFunctions(t *testing.T) {
|
||||
t.Parallel()
|
||||
f := func(ver, id, typ int) bool {
|
||||
ver = int(uint(ver) % 16)
|
||||
id = int(uint(id) % 4096)
|
||||
@@ -49,7 +39,6 @@ func TestHeaderFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeaderLayout(t *testing.T) {
|
||||
t.Parallel()
|
||||
var e, a uint32
|
||||
|
||||
// Version are the first four bits
|
||||
@@ -75,7 +64,6 @@ func TestHeaderLayout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
t.Parallel()
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
@@ -94,8 +82,95 @@ func TestPing(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErr(t *testing.T) {
|
||||
e := errors.New("something broke")
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
for j := 0; j < 32; j++ {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c0.Start()
|
||||
c1 := NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
|
||||
c1.Start()
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
res := c0.ping()
|
||||
if (i < 8 || j < 8) && res {
|
||||
// This should have resulted in failure, as there is no way an empty ClusterConfig plus a Ping message fits in eight bytes.
|
||||
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
|
||||
} else if (i >= 28 && j >= 28) && !res {
|
||||
// This should have worked though, as 28 bytes is plenty for both.
|
||||
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// func TestRequestResponseErr(t *testing.T) {
|
||||
// e := errors.New("something broke")
|
||||
|
||||
// var pass bool
|
||||
// for i := 0; i < 48; i++ {
|
||||
// for j := 0; j < 38; j++ {
|
||||
// m0 := newTestModel()
|
||||
// m0.data = []byte("response data")
|
||||
// m1 := newTestModel()
|
||||
|
||||
// ar, aw := io.Pipe()
|
||||
// br, bw := io.Pipe()
|
||||
// eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
// ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
// NewConnection(c0ID, ar, ebw, m0, nil)
|
||||
// c1 := NewConnection(c1ID, br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection)
|
||||
|
||||
// d, err := c1.Request("default", "tn", 1234, 5678)
|
||||
// if err == e || err == ErrClosed {
|
||||
// t.Logf("Error at %d+%d bytes", i, j)
|
||||
// if !m1.isClosed() {
|
||||
// t.Fatal("c1 not closed")
|
||||
// }
|
||||
// if !m0.isClosed() {
|
||||
// t.Fatal("c0 not closed")
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// if string(d) != "response data" {
|
||||
// t.Fatalf("Incorrect response data %q", string(d))
|
||||
// }
|
||||
// if m0.folder != "default" {
|
||||
// t.Fatalf("Incorrect folder %q", m0.folder)
|
||||
// }
|
||||
// if m0.name != "tn" {
|
||||
// t.Fatalf("Incorrect name %q", m0.name)
|
||||
// }
|
||||
// if m0.offset != 1234 {
|
||||
// t.Fatalf("Incorrect offset %d", m0.offset)
|
||||
// }
|
||||
// if m0.size != 5678 {
|
||||
// t.Fatalf("Incorrect size %d", m0.size)
|
||||
// }
|
||||
// t.Logf("Pass at %d+%d bytes", i, j)
|
||||
// pass = true
|
||||
// }
|
||||
// }
|
||||
// if !pass {
|
||||
// t.Fatal("Never passed")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestVersionErr(t *testing.T) {
|
||||
t.Parallel()
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
@@ -123,7 +198,6 @@ func TestVersionErr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTypeErr(t *testing.T) {
|
||||
t.Parallel()
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
@@ -151,7 +225,6 @@ func TestTypeErr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
@@ -187,9 +260,10 @@ func TestClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestElementSizeExceededNested(t *testing.T) {
|
||||
t.Parallel()
|
||||
m := ClusterConfigMessage{
|
||||
ClientName: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon",
|
||||
Folders: []Folder{
|
||||
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
|
||||
},
|
||||
}
|
||||
_, err := m.EncodeXDR(ioutil.Discard)
|
||||
if err == nil {
|
||||
@@ -198,14 +272,17 @@ func TestElementSizeExceededNested(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarshalIndexMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 IndexMessage) bool {
|
||||
for i, f := range m1.Files {
|
||||
m1.Files[i].CachedSize = 0
|
||||
for j := range f.Blocks {
|
||||
f.Blocks[j].Offset = 0
|
||||
if len(f.Blocks[j].Hash) == 0 {
|
||||
f.Blocks[j].Hash = nil
|
||||
for _, f := range m1.Files {
|
||||
for i := range f.Blocks {
|
||||
f.Blocks[i].Offset = 0
|
||||
if len(f.Blocks[i].Hash) == 0 {
|
||||
f.Blocks[i].Hash = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -219,7 +296,11 @@ func TestMarshalIndexMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarshalRequestMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 RequestMessage) bool {
|
||||
return testMarshal(t, "request", &m1, &RequestMessage{})
|
||||
}
|
||||
@@ -230,7 +311,11 @@ func TestMarshalRequestMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarshalResponseMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 ResponseMessage) bool {
|
||||
if len(m1.Data) == 0 {
|
||||
m1.Data = nil
|
||||
@@ -244,7 +329,11 @@ func TestMarshalResponseMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarshalClusterConfigMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 ClusterConfigMessage) bool {
|
||||
return testMarshal(t, "clusterconfig", &m1, &ClusterConfigMessage{})
|
||||
}
|
||||
@@ -255,7 +344,11 @@ func TestMarshalClusterConfigMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarshalCloseMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 CloseMessage) bool {
|
||||
return testMarshal(t, "close", &m1, &CloseMessage{})
|
||||
}
|
||||
62
lib/protocol/vector.go → Godeps/_workspace/src/github.com/syncthing/protocol/vector.go
generated
vendored
62
lib/protocol/vector.go → Godeps/_workspace/src/github.com/syncthing/protocol/vector.go
generated
vendored
@@ -4,7 +4,7 @@ package protocol
|
||||
|
||||
// The Vector type represents a version vector. The zero value is a usable
|
||||
// version vector. The vector has slice semantics and some operations on it
|
||||
// are "append-like" in that they may return the same vector modified, or v
|
||||
// are "append-like" in that they may return the same vector modified, or a
|
||||
// new allocated Vector with the modified contents.
|
||||
type Vector []Counter
|
||||
|
||||
@@ -33,43 +33,43 @@ func (v Vector) Update(ID uint64) Vector {
|
||||
return nv
|
||||
}
|
||||
}
|
||||
// Append a new index
|
||||
// Append a new new index
|
||||
return append(v, Counter{ID, 1})
|
||||
}
|
||||
|
||||
// Merge returns the vector containing the maximum indexes from v and b. If it
|
||||
// is possible, the vector v is updated and returned. If it is not, a copy
|
||||
// Merge returns the vector containing the maximum indexes from a and b. If it
|
||||
// is possible, the vector a is updated and returned. If it is not, a copy
|
||||
// will be created, updated and returned.
|
||||
func (v Vector) Merge(b Vector) Vector {
|
||||
var vi, bi int
|
||||
func (a Vector) Merge(b Vector) Vector {
|
||||
var ai, bi int
|
||||
for bi < len(b) {
|
||||
if vi == len(v) {
|
||||
// We've reach the end of v, all that remains are appends
|
||||
return append(v, b[bi:]...)
|
||||
if ai == len(a) {
|
||||
// We've reach the end of a, all that remains are appends
|
||||
return append(a, b[bi:]...)
|
||||
}
|
||||
|
||||
if v[vi].ID > b[bi].ID {
|
||||
if a[ai].ID > b[bi].ID {
|
||||
// The index from b should be inserted here
|
||||
n := make(Vector, len(v)+1)
|
||||
copy(n, v[:vi])
|
||||
n[vi] = b[bi]
|
||||
copy(n[vi+1:], v[vi:])
|
||||
v = n
|
||||
n := make(Vector, len(a)+1)
|
||||
copy(n, a[:ai])
|
||||
n[ai] = b[bi]
|
||||
copy(n[ai+1:], a[ai:])
|
||||
a = n
|
||||
}
|
||||
|
||||
if v[vi].ID == b[bi].ID {
|
||||
if val := b[bi].Value; val > v[vi].Value {
|
||||
v[vi].Value = val
|
||||
if a[ai].ID == b[bi].ID {
|
||||
if v := b[bi].Value; v > a[ai].Value {
|
||||
a[ai].Value = v
|
||||
}
|
||||
}
|
||||
|
||||
if bi < len(b) && v[vi].ID == b[bi].ID {
|
||||
if bi < len(b) && a[ai].ID == b[bi].ID {
|
||||
bi++
|
||||
}
|
||||
vi++
|
||||
ai++
|
||||
}
|
||||
|
||||
return v
|
||||
return a
|
||||
}
|
||||
|
||||
// Copy returns an identical vector that is not shared with v.
|
||||
@@ -80,27 +80,27 @@ func (v Vector) Copy() Vector {
|
||||
}
|
||||
|
||||
// Equal returns true when the two vectors are equivalent.
|
||||
func (v Vector) Equal(b Vector) bool {
|
||||
return v.Compare(b) == Equal
|
||||
func (a Vector) Equal(b Vector) bool {
|
||||
return a.Compare(b) == Equal
|
||||
}
|
||||
|
||||
// LesserEqual returns true when the two vectors are equivalent or v is Lesser
|
||||
// LesserEqual returns true when the two vectors are equivalent or a is Lesser
|
||||
// than b.
|
||||
func (v Vector) LesserEqual(b Vector) bool {
|
||||
comp := v.Compare(b)
|
||||
func (a Vector) LesserEqual(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == Lesser || comp == Equal
|
||||
}
|
||||
|
||||
// GreaterEqual returns true when the two vectors are equivalent or v is Greater
|
||||
// LesserEqual returns true when the two vectors are equivalent or a is Greater
|
||||
// than b.
|
||||
func (v Vector) GreaterEqual(b Vector) bool {
|
||||
comp := v.Compare(b)
|
||||
func (a Vector) GreaterEqual(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == Greater || comp == Equal
|
||||
}
|
||||
|
||||
// Concurrent returns true when the two vectors are concrurrent.
|
||||
func (v Vector) Concurrent(b Vector) bool {
|
||||
comp := v.Compare(b)
|
||||
func (a Vector) Concurrent(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == ConcurrentGreater || comp == ConcurrentLesser
|
||||
}
|
||||
|
||||
@@ -123,12 +123,12 @@ func TestMerge(t *testing.T) {
|
||||
func TestCounterValue(t *testing.T) {
|
||||
v0 := Vector{Counter{42, 1}, Counter{64, 5}}
|
||||
if v0.Counter(42) != 1 {
|
||||
t.Errorf("Counter error, %d != %d", v0.Counter(42), 1)
|
||||
t.Error("Counter error, %d != %d", v0.Counter(42), 1)
|
||||
}
|
||||
if v0.Counter(64) != 5 {
|
||||
t.Errorf("Counter error, %d != %d", v0.Counter(64), 5)
|
||||
t.Error("Counter error, %d != %d", v0.Counter(64), 5)
|
||||
}
|
||||
if v0.Counter(72) != 0 {
|
||||
t.Errorf("Counter error, %d != %d", v0.Counter(72), 0)
|
||||
t.Error("Counter error, %d != %d", v0.Counter(72), 0)
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
@@ -201,7 +201,6 @@ func (p *BufferPool) String() string {
|
||||
|
||||
func (p *BufferPool) drain() {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
|
||||
10
Godeps/_workspace/src/github.com/thejerf/suture/README.md
generated
vendored
10
Godeps/_workspace/src/github.com/thejerf/suture/README.md
generated
vendored
@@ -6,8 +6,10 @@ Suture
|
||||
Suture provides Erlang-ish supervisor trees for Go. "Supervisor trees" ->
|
||||
"sutree" -> "suture" -> holds your code together when it's trying to die.
|
||||
|
||||
This library has hit maturity, and isn't expected to be changed
|
||||
radically. This can also be imported via gopkg.in/thejerf/suture.v1 .
|
||||
This is intended to be a production-quality library going into code that I
|
||||
will be very early on the phone tree to support when it goes down. However,
|
||||
it has not been deployed into something quite that serious yet. (I will
|
||||
update this statement when that changes.)
|
||||
|
||||
It is intended to deal gracefully with the real failure cases that can
|
||||
occur with supervision trees (such as burning all your CPU time endlessly
|
||||
@@ -22,6 +24,10 @@ This module is fully covered with [godoc](http://godoc.org/github.com/thejerf/su
|
||||
including an example, usage, and everything else you might expect from a
|
||||
README.md on GitHub. (DRY.)
|
||||
|
||||
This is not currently tagged with particular git tags for Go as this is
|
||||
currently considered to be alpha code. As I move this into production and
|
||||
feel more confident about it, I'll give it relevant tags.
|
||||
|
||||
Code Signing
|
||||
------------
|
||||
|
||||
|
||||
2
Godeps/_workspace/src/golang.org/x/net/internal/iana/const.go
generated
vendored
2
Godeps/_workspace/src/golang.org/x/net/internal/iana/const.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
|
||||
package iana // import "golang.org/x/net/internal/iana"
|
||||
package iana
|
||||
|
||||
// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25
|
||||
const (
|
||||
|
||||
6
Godeps/_workspace/src/golang.org/x/net/ipv6/doc.go
generated
vendored
6
Godeps/_workspace/src/golang.org/x/net/ipv6/doc.go
generated
vendored
@@ -42,7 +42,7 @@
|
||||
// The outgoing packets will be labeled DiffServ assured forwarding
|
||||
// class 1 low drop precedence, known as AF11 packets.
|
||||
//
|
||||
// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil {
|
||||
// if err := ipv6.NewConn(c).SetTrafficClass(DiffServAF11); err != nil {
|
||||
// // error handling
|
||||
// }
|
||||
// if _, err := c.Write(data); err != nil {
|
||||
@@ -124,7 +124,7 @@
|
||||
//
|
||||
// The application can also send both unicast and multicast packets.
|
||||
//
|
||||
// p.SetTrafficClass(0x0)
|
||||
// p.SetTrafficClass(DiffServCS0)
|
||||
// p.SetHopLimit(16)
|
||||
// if _, err := p.WriteTo(data[:n], nil, src); err != nil {
|
||||
// // error handling
|
||||
@@ -237,4 +237,4 @@
|
||||
// MLDv1 and starts to listen to multicast traffic.
|
||||
// In the fallback case, ExcludeSourceSpecificGroup and
|
||||
// IncludeSourceSpecificGroup may return an error.
|
||||
package ipv6 // import "golang.org/x/net/ipv6"
|
||||
package ipv6
|
||||
|
||||
18
Godeps/_workspace/src/golang.org/x/net/proxy/direct.go
generated
vendored
18
Godeps/_workspace/src/golang.org/x/net/proxy/direct.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
type direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var Direct = direct{}
|
||||
|
||||
func (direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
140
Godeps/_workspace/src/golang.org/x/net/proxy/per_host.go
generated
vendored
140
Godeps/_workspace/src/golang.org/x/net/proxy/per_host.go
generated
vendored
@@ -1,140 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the hostname
|
||||
// requested matches one of a number of exceptions.
|
||||
type PerHost struct {
|
||||
def, bypass Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
|
||||
return &PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *PerHost) dialerForRequest(host string) Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone "example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a hostname
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a hostname that will use the bypass proxy.
|
||||
func (p *PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
55
Godeps/_workspace/src/golang.org/x/net/proxy/per_host_test.go
generated
vendored
55
Godeps/_workspace/src/golang.org/x/net/proxy/per_host_test.go
generated
vendored
@@ -1,55 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type recordingProxy struct {
|
||||
addrs []string
|
||||
}
|
||||
|
||||
func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) {
|
||||
r.addrs = append(r.addrs, addr)
|
||||
return nil, errors.New("recordingProxy")
|
||||
}
|
||||
|
||||
func TestPerHost(t *testing.T) {
|
||||
var def, bypass recordingProxy
|
||||
perHost := NewPerHost(&def, &bypass)
|
||||
perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16")
|
||||
|
||||
expectedDef := []string{
|
||||
"example.com:123",
|
||||
"1.2.3.4:123",
|
||||
"[1001::]:123",
|
||||
}
|
||||
expectedBypass := []string{
|
||||
"localhost:123",
|
||||
"zone:123",
|
||||
"foo.zone:123",
|
||||
"127.0.0.1:123",
|
||||
"10.1.2.3:123",
|
||||
"[1000::]:123",
|
||||
}
|
||||
|
||||
for _, addr := range expectedDef {
|
||||
perHost.Dial("tcp", addr)
|
||||
}
|
||||
for _, addr := range expectedBypass {
|
||||
perHost.Dial("tcp", addr)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedDef, def.addrs) {
|
||||
t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedBypass, bypass.addrs) {
|
||||
t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass)
|
||||
}
|
||||
}
|
||||
94
Godeps/_workspace/src/golang.org/x/net/proxy/proxy.go
generated
vendored
94
Godeps/_workspace/src/golang.org/x/net/proxy/proxy.go
generated
vendored
@@ -1,94 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
package proxy // import "golang.org/x/net/proxy"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func FromEnvironment() Dialer {
|
||||
allProxy := os.Getenv("all_proxy")
|
||||
if len(allProxy) == 0 {
|
||||
return Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return Direct
|
||||
}
|
||||
proxy, err := FromURL(proxyURL, Direct)
|
||||
if err != nil {
|
||||
return Direct
|
||||
}
|
||||
|
||||
noProxy := os.Getenv("no_proxy")
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := NewPerHost(proxy, Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
|
||||
if proxySchemes == nil {
|
||||
proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
|
||||
}
|
||||
proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
|
||||
var auth *Auth
|
||||
if u.User != nil {
|
||||
auth = new(Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxySchemes != nil {
|
||||
if f, ok := proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
142
Godeps/_workspace/src/golang.org/x/net/proxy/proxy_test.go
generated
vendored
142
Godeps/_workspace/src/golang.org/x/net/proxy/proxy_test.go
generated
vendored
@@ -1,142 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFromURL(t *testing.T) {
|
||||
endSystem, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("net.Listen failed: %v", err)
|
||||
}
|
||||
defer endSystem.Close()
|
||||
gateway, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("net.Listen failed: %v", err)
|
||||
}
|
||||
defer gateway.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg)
|
||||
|
||||
url, err := url.Parse("socks5://user:password@" + gateway.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("url.Parse failed: %v", err)
|
||||
}
|
||||
proxy, err := FromURL(url, Direct)
|
||||
if err != nil {
|
||||
t.Fatalf("FromURL failed: %v", err)
|
||||
}
|
||||
_, port, err := net.SplitHostPort(endSystem.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("net.SplitHostPort failed: %v", err)
|
||||
}
|
||||
if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil {
|
||||
t.Fatalf("FromURL.Dial failed: %v", err)
|
||||
} else {
|
||||
c.Close()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestSOCKS5(t *testing.T) {
|
||||
endSystem, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("net.Listen failed: %v", err)
|
||||
}
|
||||
defer endSystem.Close()
|
||||
gateway, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("net.Listen failed: %v", err)
|
||||
}
|
||||
defer gateway.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg)
|
||||
|
||||
proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct)
|
||||
if err != nil {
|
||||
t.Fatalf("SOCKS5 failed: %v", err)
|
||||
}
|
||||
if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil {
|
||||
t.Fatalf("SOCKS5.Dial failed: %v", err)
|
||||
} else {
|
||||
c.Close()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
c, err := gateway.Accept()
|
||||
if err != nil {
|
||||
t.Errorf("net.Listener.Accept failed: %v", err)
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
b := make([]byte, 32)
|
||||
var n int
|
||||
if typ == socks5Domain {
|
||||
n = 4
|
||||
} else {
|
||||
n = 3
|
||||
}
|
||||
if _, err := io.ReadFull(c, b[:n]); err != nil {
|
||||
t.Errorf("io.ReadFull failed: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil {
|
||||
t.Errorf("net.Conn.Write failed: %v", err)
|
||||
return
|
||||
}
|
||||
if typ == socks5Domain {
|
||||
n = 16
|
||||
} else {
|
||||
n = 10
|
||||
}
|
||||
if _, err := io.ReadFull(c, b[:n]); err != nil {
|
||||
t.Errorf("io.ReadFull failed: %v", err)
|
||||
return
|
||||
}
|
||||
if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ {
|
||||
t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3])
|
||||
return
|
||||
}
|
||||
if typ == socks5Domain {
|
||||
copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9})
|
||||
b = append(b, []byte("localhost")...)
|
||||
} else {
|
||||
copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4})
|
||||
}
|
||||
host, port, err := net.SplitHostPort(endSystem.Addr().String())
|
||||
if err != nil {
|
||||
t.Errorf("net.SplitHostPort failed: %v", err)
|
||||
return
|
||||
}
|
||||
b = append(b, []byte(net.ParseIP(host).To4())...)
|
||||
p, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
t.Errorf("strconv.Atoi failed: %v", err)
|
||||
return
|
||||
}
|
||||
b = append(b, []byte{byte(p >> 8), byte(p)}...)
|
||||
if _, err := c.Write(b); err != nil {
|
||||
t.Errorf("net.Conn.Write failed: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
210
Godeps/_workspace/src/golang.org/x/net/proxy/socks5.go
generated
vendored
210
Godeps/_workspace/src/golang.org/x/net/proxy/socks5.go
generated
vendored
@@ -1,210 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928.
|
||||
func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {
|
||||
s := &socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward Dialer
|
||||
}
|
||||
|
||||
const socks5Version = 5
|
||||
|
||||
const (
|
||||
socks5AuthNone = 0
|
||||
socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const socks5Connect = 1
|
||||
|
||||
const (
|
||||
socks5IP4 = 1
|
||||
socks5Domain = 3
|
||||
socks5IP6 = 4
|
||||
)
|
||||
|
||||
var socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the network net via the SOCKS5 proxy.
|
||||
func (s *socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closeConn := &conn
|
||||
defer func() {
|
||||
if closeConn != nil {
|
||||
(*closeConn).Close()
|
||||
}
|
||||
}()
|
||||
|
||||
host, portStr, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return nil, errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return nil, errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
if buf[1] == socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return nil, errors.New("proxy: destination hostname too long: " + host)
|
||||
}
|
||||
buf = append(buf, socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(socks5Errors) {
|
||||
failure = socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
closeConn = nil
|
||||
return conn, nil
|
||||
}
|
||||
6
NICKS
6
NICKS
@@ -7,10 +7,8 @@ LordLandon <lordlandon@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Stefan-Code <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
Zillode <zillode@zillode.be>
|
||||
acogdev <jake@acogdev.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
@@ -20,7 +18,7 @@ brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
burkemw3 <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
burkemw3 <mburke@amplify.com>
|
||||
calmh <jakob@nym.se>
|
||||
canton7 <antony.male@gmail.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
@@ -29,7 +27,6 @@ ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
@@ -45,7 +42,6 @@ kozec <kozec@kozec.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
marclaporte <marc@marclaporte.com>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
|
||||
49
README.md
49
README.md
@@ -1,16 +1,17 @@
|
||||
# Syncthing
|
||||
Syncthing
|
||||
=========
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://ci.appveyor.com/project/calmh/syncthing)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
[](http://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
|
||||
This is the Syncthing project which pursues the following goals:
|
||||
|
||||
1. Define a protocol for synchronization of a folder between a number of
|
||||
collaborating devices. This protocol should be well defined, unambiguous,
|
||||
easily understood, free to use, efficient, secure and language neutral.
|
||||
This is called the [Block Exchange Protocol][1].
|
||||
This is called the [Block Exchange
|
||||
Protocol](https://github.com/syncthing/specs/blob/master/BEPv1.md).
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility. We hope that
|
||||
@@ -20,38 +21,38 @@ The two are evolving together; the protocol is not to be considered
|
||||
stable until Syncthing 1.0 is released, at which point it is locked down
|
||||
for incompatible changes.
|
||||
|
||||
## Getting Started
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
Take a look at the [getting started guide][2].
|
||||
Take a look at the [getting started
|
||||
guide](http://docs.syncthing.net/intro/getting-started.html).
|
||||
|
||||
There are a few examples for keeping Syncthing running in the background
|
||||
on your system in [the etc directory][3].
|
||||
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
|
||||
|
||||
There is an IRC channel, `#syncthing` on [Freenode][4], for talking directly
|
||||
There is an IRC channel, `#syncthing` on Freenode, for talking directly
|
||||
to developers and users.
|
||||
|
||||
## Building
|
||||
Building
|
||||
--------
|
||||
|
||||
Building Syncthing from source is easy, and there's a [guide][5].
|
||||
Building Syncthing from source is easy, and there's a
|
||||
[guide](http://docs.syncthing.net/dev/building.html).
|
||||
that describes it for both Unix and Windows systems.
|
||||
|
||||
## Signed Releases
|
||||
Signed Releases
|
||||
---------------
|
||||
|
||||
As of v0.10.15 and onwards, git tags and release binaries are GPG signed
|
||||
with the key D26E6ED000654A3E (see https://syncthing.net/security.html).
|
||||
For release binaries, MD5 and SHA1 checksums are calculated and signed,
|
||||
available in the md5sum.txt.asc and sha1sum.txt.asc files.
|
||||
|
||||
## Documentation
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Please see the [Syncthing documentation site][6].
|
||||
Please see the [Syncthing
|
||||
documentation site](http://docs.syncthing.net/).
|
||||
|
||||
All code is licensed under the [MPLv2 License][7].
|
||||
|
||||
[1]: http://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: http://docs.syncthing.net/intro/getting-started.html
|
||||
[3]: https://github.com/syncthing/syncthing/blob/master/etc
|
||||
[4]: https://webchat.freenode.net/
|
||||
[5]: http://docs.syncthing.net/dev/building.html
|
||||
[6]: http://docs.syncthing.net/
|
||||
[7]: https://github.com/syncthing/syncthing/blob/master/LICENSE
|
||||
All code is licensed under the
|
||||
[MPLv2 License](https://github.com/syncthing/syncthing/blob/master/LICENSE).
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
version: '{branch}-{build}'
|
||||
clone_folder: C:\src\github.com\syncthing\syncthing
|
||||
init:
|
||||
- go version
|
||||
environment:
|
||||
GOPATH: C:\
|
||||
build_script:
|
||||
- go run build.go zip
|
||||
test_script:
|
||||
- go run build.go test
|
||||
artifacts:
|
||||
- path: '*.zip'
|
||||
48
build.go
48
build.go
@@ -13,6 +13,7 @@ import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -186,7 +187,7 @@ func setup() {
|
||||
|
||||
func test(pkg string) {
|
||||
setBuildEnv()
|
||||
runPrint("go", "test", "-short", "-race", "-timeout", "60s", pkg)
|
||||
runPrint("go", "test", "-short", "-timeout", "60s", pkg)
|
||||
}
|
||||
|
||||
func bench(pkg string) {
|
||||
@@ -195,11 +196,7 @@ func bench(pkg string) {
|
||||
}
|
||||
|
||||
func install(pkg string, tags []string) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
|
||||
os.Setenv("GOBIN", "./bin")
|
||||
args := []string{"install", "-v", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
@@ -218,7 +215,7 @@ func build(pkg string, tags []string) {
|
||||
binary += ".exe"
|
||||
}
|
||||
|
||||
rmr(binary)
|
||||
rmr(binary, binary+".md5")
|
||||
args := []string{"build", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
@@ -229,6 +226,13 @@ func build(pkg string, tags []string) {
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
runPrint("go", args...)
|
||||
|
||||
// Create an md5 checksum of the binary, to be included in the archive for
|
||||
// automatic upgrades.
|
||||
err := md5File(binary)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTar() {
|
||||
@@ -245,6 +249,7 @@ func buildTar() {
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing", dst: name + "/syncthing"},
|
||||
{src: "syncthing.md5", dst: name + "/syncthing.md5"},
|
||||
}
|
||||
|
||||
for _, file := range listFiles("etc") {
|
||||
@@ -272,6 +277,7 @@ func buildZip() {
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing.exe", dst: name + "/syncthing.exe"},
|
||||
{src: "syncthing.exe.md5", dst: name + "/syncthing.exe.md5"},
|
||||
}
|
||||
|
||||
for _, file := range listFiles("extra") {
|
||||
@@ -405,7 +411,7 @@ func assets() {
|
||||
}
|
||||
|
||||
func xdr() {
|
||||
runPrint("go", "generate", "./lib/discover", "./lib/db", "./lib/protocol")
|
||||
runPrint("go", "generate", "./lib/discover", "./lib/db")
|
||||
}
|
||||
|
||||
func translate() {
|
||||
@@ -706,6 +712,32 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
}
|
||||
|
||||
func md5File(file string) error {
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
h := md5.New()
|
||||
_, err = io.Copy(h, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(file + ".md5")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func vet(pkg string) {
|
||||
bs, err := runError("go", "vet", pkg)
|
||||
if err != nil && err.Error() == "exit status 3" || bytes.Contains(bs, []byte("no such tool \"vet\"")) {
|
||||
|
||||
4
build.sh
4
build.sh
@@ -108,10 +108,10 @@ case "${1:-default}" in
|
||||
# For every package in the repo
|
||||
for dir in $(go list ./...) ; do
|
||||
# run the tests
|
||||
GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -race -coverprofile=profile.out $dir
|
||||
godep go test -coverprofile=profile.out $dir
|
||||
if [ -f profile.out ] ; then
|
||||
# and if there was test output, append it to coverage.out
|
||||
grep -v "mode: " profile.out >> coverage.out
|
||||
grep -v "mode: set" profile.out >> coverage.out
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
)
|
||||
|
||||
@@ -68,7 +68,7 @@ func main() {
|
||||
if *standardBlocks || blockSize < protocol.BlockSize {
|
||||
blockSize = protocol.BlockSize
|
||||
}
|
||||
bs, err := scanner.Blocks(fd, blockSize, fi.Size(), nil)
|
||||
bs, err := scanner.Blocks(fd, blockSize, fi.Size())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -7,105 +7,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var timeout = 5 * time.Second
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
var server string
|
||||
|
||||
flag.StringVar(&server, "server", "", "Announce server (blank for default set)")
|
||||
flag.DurationVar(&timeout, "timeout", timeout, "Query timeout")
|
||||
flag.Usage = usage
|
||||
flag.StringVar(&server, "server", "udp4://announce.syncthing.net:22026", "Announce server")
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
flag.Usage()
|
||||
if len(flag.Args()) != 1 || server == "" {
|
||||
log.Printf("Usage: %s [-server=\"udp4://announce.syncthing.net:22026\"] <device>", os.Args[0])
|
||||
os.Exit(64)
|
||||
}
|
||||
|
||||
id, err := protocol.DeviceIDFromString(flag.Args()[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
log.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if server != "" {
|
||||
checkServers(id, server)
|
||||
} else {
|
||||
checkServers(id, config.DefaultDiscoveryServers...)
|
||||
discoverer := discover.NewDiscoverer(protocol.LocalDeviceID, nil)
|
||||
discoverer.StartGlobal([]string{server}, 1)
|
||||
for _, addr := range discoverer.Lookup(id) {
|
||||
log.Println(addr)
|
||||
}
|
||||
}
|
||||
|
||||
type checkResult struct {
|
||||
server string
|
||||
direct []string
|
||||
relays []discover.Relay
|
||||
error
|
||||
}
|
||||
|
||||
func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
t0 := time.Now()
|
||||
resc := make(chan checkResult)
|
||||
for _, srv := range servers {
|
||||
srv := srv
|
||||
go func() {
|
||||
res := checkServer(deviceID, srv)
|
||||
res.server = srv
|
||||
resc <- res
|
||||
}()
|
||||
}
|
||||
|
||||
for _ = range servers {
|
||||
res := <-resc
|
||||
|
||||
u, _ := url.Parse(res.server)
|
||||
fmt.Printf("%s (%v):\n", u.Host, time.Since(t0))
|
||||
|
||||
if res.error != nil {
|
||||
fmt.Println(" " + res.error.Error())
|
||||
}
|
||||
for _, addr := range res.direct {
|
||||
fmt.Println(" address:", addr)
|
||||
}
|
||||
for _, rel := range res.relays {
|
||||
fmt.Printf(" relay: %s (%d ms)\n", rel.URL, rel.Latency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, nil)
|
||||
if err != nil {
|
||||
return checkResult{error: err}
|
||||
}
|
||||
|
||||
res := make(chan checkResult, 1)
|
||||
|
||||
time.AfterFunc(timeout, func() {
|
||||
res <- checkResult{error: errors.New("timeout")}
|
||||
})
|
||||
|
||||
go func() {
|
||||
direct, relays, err := disco.Lookup(deviceID)
|
||||
res <- checkResult{direct: direct, relays: relays, error: err}
|
||||
}()
|
||||
|
||||
return <-res
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Printf("Usage:\n\t%s [options] <device ID>\n\nOptions:\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
func dump(ldb *leveldb.DB) {
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.UnmarshalXDR(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
fmt.Printf("[mtime]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// An IntHeap is a min-heap of ints.
|
||||
type SizedElement struct {
|
||||
key string
|
||||
size int
|
||||
}
|
||||
|
||||
type ElementHeap []SizedElement
|
||||
|
||||
func (h ElementHeap) Len() int { return len(h) }
|
||||
func (h ElementHeap) Less(i, j int) bool { return h[i].size > h[j].size }
|
||||
func (h ElementHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *ElementHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(SizedElement))
|
||||
}
|
||||
|
||||
func (h *ElementHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb *leveldb.DB) {
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
var ele SizedElement
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
ele.key = fmt.Sprintf("DEVICE:%s:%s:%s", dev, folder, name)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%s:%s", folder, name)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%s:%x:%s", folder, hash, name)
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:])
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
ele.key = fmt.Sprintf("FOLDERSTATS:%s", key[1:])
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
ele.key = fmt.Sprintf("MTIME:%s", key[1:])
|
||||
|
||||
default:
|
||||
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
|
||||
}
|
||||
ele.size = len(it.Value())
|
||||
heap.Push(h, ele)
|
||||
}
|
||||
|
||||
for h.Len() > 0 {
|
||||
ele = heap.Pop(h).(SizedElement)
|
||||
fmt.Println(ele.key, ele.size)
|
||||
}
|
||||
}
|
||||
@@ -7,33 +7,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var mode string
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
flag.StringVar(&mode, "mode", "dump", "Mode of operation: dump, dumpsize")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.11.0.db")
|
||||
}
|
||||
|
||||
fmt.Println("Path:", path)
|
||||
|
||||
ldb, err := leveldb.OpenFile(path, &opt.Options{
|
||||
ldb, err := leveldb.OpenFile(flag.Arg(0), &opt.Options{
|
||||
ErrorIfMissing: true,
|
||||
Strict: opt.StrictAll,
|
||||
OpenFilesCacheCapacity: 100,
|
||||
@@ -42,11 +34,53 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if mode == "dump" {
|
||||
dump(ldb)
|
||||
} else if mode == "dumpsize" {
|
||||
dumpsize(ldb)
|
||||
} else {
|
||||
fmt.Println("Unknown mode")
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.UnmarshalXDR(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
)
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func defaultConfigDir() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if p := os.Getenv("LocalAppData"); p != "" {
|
||||
return filepath.Join(p, "Syncthing")
|
||||
}
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
dir, err := osutil.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
|
||||
default:
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing")
|
||||
}
|
||||
dir, err := osutil.ExpandTilde("~/.config/syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/signature"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
log.Println(`Usage:
|
||||
stsigtool <command>
|
||||
|
||||
Where command is one of:
|
||||
|
||||
gen
|
||||
- generate a new key pair
|
||||
|
||||
sign <privkeyfile> <datafile>
|
||||
- sign a file
|
||||
|
||||
verify <signaturefile> <datafile>
|
||||
- verify a signature, using the built in public key
|
||||
|
||||
verify <signaturefile> <datafile> <pubkeyfile>
|
||||
- verify a signature, using the specified public key file
|
||||
`)
|
||||
}
|
||||
|
||||
switch flag.Arg(0) {
|
||||
case "gen":
|
||||
gen()
|
||||
case "sign":
|
||||
sign(flag.Arg(1), flag.Arg(2))
|
||||
case "verify":
|
||||
if flag.NArg() == 4 {
|
||||
verifyWithFile(flag.Arg(1), flag.Arg(2), flag.Arg(3))
|
||||
} else {
|
||||
verifyWithKey(flag.Arg(1), flag.Arg(2), upgrade.SigningKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func gen() {
|
||||
priv, pub, err := signature.GenerateKeys()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
os.Stdout.Write(priv)
|
||||
os.Stdout.Write(pub)
|
||||
}
|
||||
|
||||
func sign(keyname, dataname string) {
|
||||
privkey, err := ioutil.ReadFile(keyname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fd, err := os.Open(dataname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
sig, err := signature.Sign(privkey, fd)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
os.Stdout.Write(sig)
|
||||
}
|
||||
|
||||
func verifyWithFile(signame, dataname, keyname string) {
|
||||
pubkey, err := ioutil.ReadFile(keyname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
verifyWithKey(signame, dataname, pubkey)
|
||||
}
|
||||
|
||||
func verifyWithKey(signame, dataname string, pubkey []byte) {
|
||||
sig, err := ioutil.ReadFile(signame)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fd, err := os.Open(dataname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
err = signature.Verify(pubkey, sig, fd)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("correct signature")
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type addressLister struct {
|
||||
upnpSvc *upnpSvc
|
||||
cfg *config.Wrapper
|
||||
}
|
||||
|
||||
func newAddressLister(upnpSvc *upnpSvc, cfg *config.Wrapper) *addressLister {
|
||||
return &addressLister{
|
||||
upnpSvc: upnpSvc,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
// ExternalAddresses returns a list of addresses that are our best guess for
|
||||
// where we are reachable from the outside. As a special case, we may return
|
||||
// one or more addresses with an empty IP address (0.0.0.0 or ::) and just
|
||||
// port number - this means that the outside address of a NAT gateway should
|
||||
// be substituted.
|
||||
func (e *addressLister) ExternalAddresses() []string {
|
||||
return e.addresses(false)
|
||||
}
|
||||
|
||||
// AllAddresses returns a list of addresses that are our best guess for where
|
||||
// we are reachable from the local network. Same conditions as
|
||||
// ExternalAddresses, but private IPv4 addresses are included.
|
||||
func (e *addressLister) AllAddresses() []string {
|
||||
return e.addresses(true)
|
||||
}
|
||||
|
||||
func (e *addressLister) addresses(includePrivateIPV4 bool) []string {
|
||||
var addrs []string
|
||||
|
||||
// Grab our listen addresses from the config. Unspecified ones are passed
|
||||
// on verbatim (to be interpreted by a global discovery server or local
|
||||
// discovery peer). Public addresses are passed on verbatim. Private
|
||||
// addresses are filtered.
|
||||
for _, addrStr := range e.cfg.Options().ListenAddress {
|
||||
addrURL, err := url.Parse(addrStr)
|
||||
if err != nil {
|
||||
l.Infoln("Listen address", addrStr, "is invalid:", err)
|
||||
continue
|
||||
}
|
||||
addr, err := net.ResolveTCPAddr("tcp", addrURL.Host)
|
||||
if err != nil {
|
||||
l.Infoln("Listen address", addrStr, "is invalid:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if addr.IP == nil || addr.IP.IsUnspecified() {
|
||||
// Address like 0.0.0.0:22000 or [::]:22000 or :22000; include as is.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
} else if isPublicIPv4(addr.IP) || isPublicIPv6(addr.IP) {
|
||||
// A public address; include as is.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
} else if includePrivateIPV4 && addr.IP.To4().IsGlobalUnicast() {
|
||||
// A private IPv4 address.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
}
|
||||
}
|
||||
|
||||
// Get an external port mapping from the upnpSvc, if it has one. If so,
|
||||
// add it as another unspecified address.
|
||||
if e.upnpSvc != nil {
|
||||
if port := e.upnpSvc.ExternalPort(); port != 0 {
|
||||
addrs = append(addrs, fmt.Sprintf("tcp://:%d", port))
|
||||
}
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
func isPublicIPv4(ip net.IP) bool {
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
// Not an IPv4 address (IPv6)
|
||||
return false
|
||||
}
|
||||
|
||||
// IsGlobalUnicast below only checks that it's not link local or
|
||||
// multicast, and we want to exclude private (NAT:ed) addresses as well.
|
||||
rfc1918 := []net.IPNet{
|
||||
{IP: net.IP{10, 0, 0, 0}, Mask: net.IPMask{255, 0, 0, 0}},
|
||||
{IP: net.IP{172, 16, 0, 0}, Mask: net.IPMask{255, 240, 0, 0}},
|
||||
{IP: net.IP{192, 168, 0, 0}, Mask: net.IPMask{255, 255, 0, 0}},
|
||||
}
|
||||
for _, n := range rfc1918 {
|
||||
if n.Contains(ip) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return ip.IsGlobalUnicast()
|
||||
}
|
||||
|
||||
func isPublicIPv6(ip net.IP) bool {
|
||||
if ip.To4() != nil {
|
||||
// Not an IPv6 address (IPv4)
|
||||
// (To16() returns a v6 mapped v4 address so can't be used to check
|
||||
// that it's an actual v6 address)
|
||||
return false
|
||||
}
|
||||
|
||||
return ip.IsGlobalUnicast()
|
||||
}
|
||||
|
||||
func tcpAddr(host string) string {
|
||||
u := url.URL{
|
||||
Scheme: "tcp",
|
||||
Host: host,
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
372
cmd/syncthing/connections.go
Normal file
372
cmd/syncthing/connections.go
Normal file
@@ -0,0 +1,372 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
// The connection service listens on TLS and dials configured unconnected
|
||||
// devices. Successful connections are handed to the model.
|
||||
type connectionSvc struct {
|
||||
*suture.Supervisor
|
||||
cfg *config.Wrapper
|
||||
myID protocol.DeviceID
|
||||
model *model.Model
|
||||
tlsCfg *tls.Config
|
||||
conns chan *tls.Conn
|
||||
}
|
||||
|
||||
func newConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, model *model.Model, tlsCfg *tls.Config) *connectionSvc {
|
||||
svc := &connectionSvc{
|
||||
Supervisor: suture.NewSimple("connectionSvc"),
|
||||
cfg: cfg,
|
||||
myID: myID,
|
||||
model: model,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: make(chan *tls.Conn),
|
||||
}
|
||||
|
||||
// There are several moving parts here; one routine per listening address
|
||||
// to handle incoming connections, one routine to periodically attempt
|
||||
// outgoing connections, and lastly one routine to the the common handling
|
||||
// regardless of whether the connection was incoming or outgoing. It ends
|
||||
// up as in the diagram below. We embed a Supervisor to manage the
|
||||
// routines (i.e. log and restart if they crash or exit, etc).
|
||||
//
|
||||
// +-----------------+
|
||||
// Incoming | +---------------+-+ +-----------------+
|
||||
// Connections | | | | | Outgoing
|
||||
// -------------->| | svc.listen | | | Connections
|
||||
// | | (1 per listen | | svc.connect |-------------->
|
||||
// | | address) | | |
|
||||
// +-+ | | |
|
||||
// +-----------------+ +-----------------+
|
||||
// v v
|
||||
// | |
|
||||
// | |
|
||||
// +------------+-----------+
|
||||
// |
|
||||
// | svc.conns
|
||||
// v
|
||||
// +-----------------+
|
||||
// | |
|
||||
// | |
|
||||
// | svc.handle |------> model.AddConnection()
|
||||
// | |
|
||||
// | |
|
||||
// +-----------------+
|
||||
//
|
||||
// TODO: Clean shutdown, and/or handling config changes on the fly. We
|
||||
// partly do this now - new devices and addresses will be picked up, but
|
||||
// not new listen addresses and we don't support disconnecting devices
|
||||
// that are removed and so on...
|
||||
|
||||
svc.Add(serviceFunc(svc.connect))
|
||||
for _, addr := range svc.cfg.Options().ListenAddress {
|
||||
addr := addr
|
||||
listener := serviceFunc(func() {
|
||||
svc.listen(addr)
|
||||
})
|
||||
svc.Add(listener)
|
||||
}
|
||||
svc.Add(serviceFunc(svc.handle))
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func (s *connectionSvc) handle() {
|
||||
next:
|
||||
for conn := range s.conns {
|
||||
cs := conn.ConnectionState()
|
||||
|
||||
// We should have negotiated the next level protocol "bep/1.0" as part
|
||||
// of the TLS handshake. Unfortunately this can't be a hard error,
|
||||
// because there are implementations out there that don't support
|
||||
// protocol negotiation (iOS for one...).
|
||||
if !cs.NegotiatedProtocolIsMutual || cs.NegotiatedProtocol != bepProtocolName {
|
||||
l.Infof("Peer %s did not negotiate bep/1.0", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
// We should have received exactly one certificate from the other
|
||||
// side. If we didn't, they don't have a device ID and we drop the
|
||||
// connection.
|
||||
certs := cs.PeerCertificates
|
||||
if cl := len(certs); cl != 1 {
|
||||
l.Infof("Got peer certificate list of length %d != 1 from %s; protocol error", cl, conn.RemoteAddr())
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
remoteCert := certs[0]
|
||||
remoteID := protocol.NewDeviceID(remoteCert.Raw)
|
||||
|
||||
// The device ID should not be that of ourselves. It can happen
|
||||
// though, especially in the presence of NAT hairpinning, multiple
|
||||
// clients between the same NAT gateway, and global discovery.
|
||||
if remoteID == myID {
|
||||
l.Infof("Connected to myself (%s) - should not happen", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// We should not already be connected to the other party. TODO: This
|
||||
// could use some better handling. If the old connection is dead but
|
||||
// hasn't timed out yet we may want to drop *that* connection and keep
|
||||
// this one. But in case we are two devices connecting to each other
|
||||
// in parallel we don't want to do that or we end up with no
|
||||
// connections still established...
|
||||
if s.model.ConnectedTo(remoteID) {
|
||||
l.Infof("Connected to already connected device (%s)", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for deviceID, deviceCfg := range s.cfg.Devices() {
|
||||
if deviceID == remoteID {
|
||||
// Verify the name on the certificate. By default we set it to
|
||||
// "syncthing" when generating, but the user may have replaced
|
||||
// the certificate and used another name.
|
||||
certName := deviceCfg.CertName
|
||||
if certName == "" {
|
||||
certName = tlsDefaultCommonName
|
||||
}
|
||||
err := remoteCert.VerifyHostname(certName)
|
||||
if err != nil {
|
||||
// Incorrect certificate name is something the user most
|
||||
// likely wants to know about, since it's an advanced
|
||||
// config. Warn instead of Info.
|
||||
l.Warnf("Bad certificate from %s (%v): %v", remoteID, conn.RemoteAddr(), err)
|
||||
conn.Close()
|
||||
continue next
|
||||
}
|
||||
|
||||
// If rate limiting is set, and based on the address we should
|
||||
// limit the connection, then we wrap it in a limiter.
|
||||
|
||||
limit := s.shouldLimit(conn.RemoteAddr())
|
||||
|
||||
wr := io.Writer(conn)
|
||||
if limit && writeRateLimit != nil {
|
||||
wr = &limitedWriter{conn, writeRateLimit}
|
||||
}
|
||||
|
||||
rd := io.Reader(conn)
|
||||
if limit && readRateLimit != nil {
|
||||
rd = &limitedReader{conn, readRateLimit}
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
if debugNet {
|
||||
l.Debugf("cipher suite: %04X in lan: %t", conn.ConnectionState().CipherSuite, !limit)
|
||||
}
|
||||
|
||||
s.model.AddConnection(conn, protoConn)
|
||||
continue next
|
||||
}
|
||||
}
|
||||
|
||||
if !s.cfg.IgnoredDevice(remoteID) {
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
})
|
||||
l.Infof("Connection from %s with unknown device ID %s", conn.RemoteAddr(), remoteID)
|
||||
} else {
|
||||
l.Infof("Connection from %s with ignored device ID %s", conn.RemoteAddr(), remoteID)
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *connectionSvc) listen(addr string) {
|
||||
if debugNet {
|
||||
l.Debugln("listening on", addr)
|
||||
}
|
||||
|
||||
tcaddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", tcaddr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
l.Warnln("Accepting connection:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if debugNet {
|
||||
l.Debugln("connect from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
s.setTCPOptions(tcpConn)
|
||||
|
||||
tc := tls.Server(conn, s.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
s.conns <- tc
|
||||
}
|
||||
}
|
||||
|
||||
func (s *connectionSvc) connect() {
|
||||
delay := time.Second
|
||||
for {
|
||||
nextDevice:
|
||||
for deviceID, deviceCfg := range s.cfg.Devices() {
|
||||
if deviceID == myID {
|
||||
continue
|
||||
}
|
||||
|
||||
if s.model.ConnectedTo(deviceID) {
|
||||
continue
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
for _, addr := range deviceCfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
if discoverer != nil {
|
||||
t := discoverer.Lookup(deviceID)
|
||||
if len(t) == 0 {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, t...)
|
||||
}
|
||||
} else {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
|
||||
// addr is on the form "1.2.3.4"
|
||||
addr = net.JoinHostPort(addr, "22000")
|
||||
} else if err == nil && port == "" {
|
||||
// addr is on the form "1.2.3.4:"
|
||||
addr = net.JoinHostPort(host, "22000")
|
||||
}
|
||||
if debugNet {
|
||||
l.Debugln("dial", deviceCfg.DeviceID, addr)
|
||||
}
|
||||
|
||||
raddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, raddr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
s.setTCPOptions(conn)
|
||||
|
||||
tc := tls.Client(conn, s.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
s.conns <- tc
|
||||
continue nextDevice
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if maxD := time.Duration(s.cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
delay = maxD
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (*connectionSvc) setTCPOptions(conn *net.TCPConn) {
|
||||
var err error
|
||||
if err = conn.SetLinger(0); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetNoDelay(false); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlive(true); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *connectionSvc) shouldLimit(addr net.Addr) bool {
|
||||
if s.cfg.Options().LimitBandwidthInLan {
|
||||
return true
|
||||
}
|
||||
|
||||
tcpaddr, ok := addr.(*net.TCPAddr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
for _, lan := range lans {
|
||||
if lan.Contains(tcpaddr.IP) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return !tcpaddr.IP.IsLoopback()
|
||||
}
|
||||
|
||||
func (s *connectionSvc) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *connectionSvc) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// We require a restart if a device as been removed.
|
||||
|
||||
newDevices := make(map[protocol.DeviceID]bool, len(to.Devices))
|
||||
for _, dev := range to.Devices {
|
||||
newDevices[dev.DeviceID] = true
|
||||
}
|
||||
|
||||
for _, dev := range from.Devices {
|
||||
if !newDevices[dev.DeviceID] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -9,16 +9,10 @@ package main
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
httpl = logger.DefaultLogger.NewFacility("http", "REST API")
|
||||
debugNet = strings.Contains(os.Getenv("STTRACE"), "net") || os.Getenv("STTRACE") == "all"
|
||||
debugHTTP = strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all"
|
||||
debugSuture = strings.Contains(os.Getenv("STTRACE"), "suture") || os.Getenv("STTRACE") == "all"
|
||||
)
|
||||
|
||||
func init() {
|
||||
l.SetDebug("main", strings.Contains(os.Getenv("STTRACE"), "main") || os.Getenv("STTRACE") == "all")
|
||||
l.SetDebug("http", strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all")
|
||||
}
|
||||
|
||||
@@ -20,70 +20,65 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/auto"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/vitrun/qart/qr"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
type guiError struct {
|
||||
Time time.Time `json:"time"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
var (
|
||||
configInSync = true
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut = sync.NewMutex()
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
type apiSvc struct {
|
||||
id protocol.DeviceID
|
||||
cfg *config.Wrapper
|
||||
cfg config.GUIConfiguration
|
||||
assetDir string
|
||||
model *model.Model
|
||||
eventSub *events.BufferedSubscription
|
||||
discoverer *discover.CachingMux
|
||||
relaySvc *relay.Svc
|
||||
listener net.Listener
|
||||
fss *folderSummarySvc
|
||||
stop chan struct{}
|
||||
systemConfigMut sync.Mutex
|
||||
|
||||
guiErrors *logger.Recorder
|
||||
systemLog *logger.Recorder
|
||||
eventSub *events.BufferedSubscription
|
||||
}
|
||||
|
||||
func newAPISvc(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *model.Model, eventSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc, errors, systemLog *logger.Recorder) (*apiSvc, error) {
|
||||
func newAPISvc(id protocol.DeviceID, cfg config.GUIConfiguration, assetDir string, m *model.Model, eventSub *events.BufferedSubscription) (*apiSvc, error) {
|
||||
svc := &apiSvc{
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
assetDir: assetDir,
|
||||
model: m,
|
||||
eventSub: eventSub,
|
||||
discoverer: discoverer,
|
||||
relaySvc: relaySvc,
|
||||
systemConfigMut: sync.NewMutex(),
|
||||
guiErrors: errors,
|
||||
systemLog: systemLog,
|
||||
eventSub: eventSub,
|
||||
}
|
||||
|
||||
var err error
|
||||
svc.listener, err = svc.getListener(cfg.GUI())
|
||||
svc.listener, err = svc.getListener(cfg)
|
||||
return svc, err
|
||||
}
|
||||
|
||||
func (s *apiSvc) getListener(guiCfg config.GUIConfiguration) (net.Listener, error) {
|
||||
func (s *apiSvc) getListener(cfg config.GUIConfiguration) (net.Listener, error) {
|
||||
cert, err := tls.LoadX509KeyPair(locations[locHTTPSCertFile], locations[locHTTPSKeyFile])
|
||||
if err != nil {
|
||||
l.Infoln("Loading HTTPS certificate:", err)
|
||||
@@ -97,7 +92,7 @@ func (s *apiSvc) getListener(guiCfg config.GUIConfiguration) (net.Listener, erro
|
||||
name = tlsDefaultCommonName
|
||||
}
|
||||
|
||||
cert, err = tlsutil.NewCertificate(locations[locHTTPSCertFile], locations[locHTTPSKeyFile], name, tlsRSABits)
|
||||
cert, err = newCertificate(locations[locHTTPSCertFile], locations[locHTTPSKeyFile], name)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -120,18 +115,20 @@ func (s *apiSvc) getListener(guiCfg config.GUIConfiguration) (net.Listener, erro
|
||||
},
|
||||
}
|
||||
|
||||
rawListener, err := net.Listen("tcp", guiCfg.Address())
|
||||
rawListener, err := net.Listen("tcp", cfg.Address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener := &tlsutil.DowngradingListener{rawListener, tlsCfg}
|
||||
listener := &DowngradingListener{rawListener, tlsCfg}
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func (s *apiSvc) Serve() {
|
||||
s.stop = make(chan struct{})
|
||||
|
||||
l.AddHandler(logger.LevelWarn, s.showGuiError)
|
||||
|
||||
// The GET handlers
|
||||
getRestMux := http.NewServeMux()
|
||||
getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion) // device folder
|
||||
@@ -156,9 +153,6 @@ func (s *apiSvc) Serve() {
|
||||
getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus) // -
|
||||
getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade) // -
|
||||
getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion) // -
|
||||
getRestMux.HandleFunc("/rest/system/debug", s.getSystemDebug) // -
|
||||
getRestMux.HandleFunc("/rest/system/log", s.getSystemLog) // [since]
|
||||
getRestMux.HandleFunc("/rest/system/log.txt", s.getSystemLogTxt) // [since]
|
||||
|
||||
// The POST handlers
|
||||
postRestMux := http.NewServeMux()
|
||||
@@ -167,6 +161,7 @@ func (s *apiSvc) Serve() {
|
||||
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
||||
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/discovery", s.postSystemDiscovery) // device addr
|
||||
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
||||
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
||||
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
||||
@@ -174,9 +169,6 @@ func (s *apiSvc) Serve() {
|
||||
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
||||
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
||||
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
||||
postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device
|
||||
postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
|
||||
// Debug endpoints, not for general use
|
||||
getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
||||
@@ -196,38 +188,37 @@ func (s *apiSvc) Serve() {
|
||||
assets: auto.Assets(),
|
||||
})
|
||||
|
||||
guiCfg := s.cfg.GUI()
|
||||
|
||||
// Wrap everything in CSRF protection. The /rest prefix should be
|
||||
// protected, other requests will grant cookies.
|
||||
handler := csrfMiddleware(s.id.String()[:5], "/rest", guiCfg.APIKey(), mux)
|
||||
handler := csrfMiddleware(s.id.String()[:5], "/rest", s.cfg.APIKey, mux)
|
||||
|
||||
// Add our version and ID as a header to responses
|
||||
handler = withDetailsMiddleware(s.id, handler)
|
||||
|
||||
// Wrap everything in basic auth, if user/password is set.
|
||||
if len(guiCfg.User) > 0 && len(guiCfg.Password) > 0 {
|
||||
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], guiCfg, handler)
|
||||
if len(s.cfg.User) > 0 && len(s.cfg.Password) > 0 {
|
||||
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], s.cfg, handler)
|
||||
}
|
||||
|
||||
// Redirect to HTTPS if we are supposed to
|
||||
if guiCfg.UseTLS() {
|
||||
if s.cfg.UseTLS {
|
||||
handler = redirectToHTTPSMiddleware(handler)
|
||||
}
|
||||
|
||||
handler = debugMiddleware(handler)
|
||||
if debugHTTP {
|
||||
handler = debugMiddleware(handler)
|
||||
}
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
s.fss = newFolderSummarySvc(s.cfg, s.model)
|
||||
s.fss = newFolderSummarySvc(s.model)
|
||||
defer s.fss.Stop()
|
||||
s.fss.ServeBackground()
|
||||
|
||||
l.Infoln("API listening on", s.listener.Addr())
|
||||
l.Infoln("GUI URL is", guiCfg.URL())
|
||||
err := srv.Serve(s.listener)
|
||||
|
||||
// The return could be due to an intentional close. Wait for the stop
|
||||
@@ -275,6 +266,7 @@ func (s *apiSvc) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// method.
|
||||
return false
|
||||
}
|
||||
s.cfg = to.GUI
|
||||
|
||||
close(s.stop)
|
||||
|
||||
@@ -370,36 +362,6 @@ func (s *apiSvc) getSystemVersion(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiSvc) getSystemDebug(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
names := l.Facilities()
|
||||
enabled := l.FacilityDebugging()
|
||||
sort.Strings(enabled)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"facilities": names,
|
||||
"enabled": enabled,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemDebug(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
q := r.URL.Query()
|
||||
for _, f := range strings.Split(q.Get("enable"), ",") {
|
||||
if f == "" {
|
||||
continue
|
||||
}
|
||||
l.SetDebug(f, true)
|
||||
l.Infof("Enabled debug data for %q", f)
|
||||
}
|
||||
for _, f := range strings.Split(q.Get("disable"), ",") {
|
||||
if f == "" {
|
||||
continue
|
||||
}
|
||||
l.SetDebug(f, false)
|
||||
l.Infof("Disabled debug data for %q", f)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSvc) getDBBrowse(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
@@ -440,12 +402,12 @@ func (s *apiSvc) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *apiSvc) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
res := folderSummary(s.cfg, s.model, folder)
|
||||
res := folderSummary(s.model, folder)
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func folderSummary(cfg *config.Wrapper, m *model.Model, folder string) map[string]interface{} {
|
||||
func folderSummary(m *model.Model, folder string) map[string]interface{} {
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
res["invalid"] = cfg.Folders()[folder].Invalid
|
||||
@@ -555,21 +517,22 @@ func (s *apiSvc) getDBFile(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *apiSvc) getSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(s.cfg.Raw())
|
||||
json.NewEncoder(w).Encode(cfg.Raw())
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
s.systemConfigMut.Lock()
|
||||
defer s.systemConfigMut.Unlock()
|
||||
|
||||
to, err := config.ReadJSON(r.Body, myID)
|
||||
var to config.Configuration
|
||||
err := json.NewDecoder(r.Body).Decode(&to)
|
||||
if err != nil {
|
||||
l.Warnln("decoding posted config:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
if to.GUI.Password != s.cfg.GUI().Password {
|
||||
if to.GUI.Password != cfg.GUI().Password {
|
||||
if to.GUI.Password != "" {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(to.GUI.Password), 0)
|
||||
if err != nil {
|
||||
@@ -584,7 +547,7 @@ func (s *apiSvc) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Fixup usage reporting settings
|
||||
|
||||
if curAcc := s.cfg.Options().URAccepted; to.Options.URAccepted > curAcc {
|
||||
if curAcc := cfg.Options().URAccepted; to.Options.URAccepted > curAcc {
|
||||
// UR was enabled
|
||||
to.Options.URAccepted = usageReportVersion
|
||||
to.Options.URUniqueID = randomString(8)
|
||||
@@ -596,9 +559,9 @@ func (s *apiSvc) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Activate and save
|
||||
|
||||
resp := s.cfg.Replace(to)
|
||||
resp := cfg.Replace(to)
|
||||
configInSync = !resp.RequiresRestart
|
||||
s.cfg.Save()
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
func (s *apiSvc) getSystemConfigInsync(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -616,7 +579,7 @@ func (s *apiSvc) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
||||
folder := qs.Get("folder")
|
||||
|
||||
if len(folder) > 0 {
|
||||
if _, ok := s.cfg.Folders()[folder]; !ok {
|
||||
if _, ok := cfg.Folders()[folder]; !ok {
|
||||
http.Error(w, "Invalid folder ID", 500)
|
||||
return
|
||||
}
|
||||
@@ -624,7 +587,7 @@ func (s *apiSvc) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if len(folder) == 0 {
|
||||
// Reset all folders.
|
||||
for folder := range s.cfg.Folders() {
|
||||
for folder := range cfg.Folders() {
|
||||
s.model.ResetFolder(folder)
|
||||
}
|
||||
s.flushResponse(`{"ok": "resetting database"}`, w)
|
||||
@@ -662,30 +625,8 @@ func (s *apiSvc) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
res["alloc"] = m.Alloc
|
||||
res["sys"] = m.Sys - m.HeapReleased
|
||||
res["tilde"] = tilde
|
||||
if s.cfg.Options().LocalAnnEnabled || s.cfg.Options().GlobalAnnEnabled {
|
||||
res["discoveryEnabled"] = true
|
||||
discoErrors := make(map[string]string)
|
||||
discoMethods := 0
|
||||
for disco, err := range s.discoverer.ChildErrors() {
|
||||
discoMethods++
|
||||
if err != nil {
|
||||
discoErrors[disco] = err.Error()
|
||||
}
|
||||
}
|
||||
res["discoveryMethods"] = discoMethods
|
||||
res["discoveryErrors"] = discoErrors
|
||||
}
|
||||
if s.relaySvc != nil {
|
||||
res["relaysEnabled"] = true
|
||||
relayClientStatus := make(map[string]bool)
|
||||
relayClientLatency := make(map[string]int)
|
||||
for _, relay := range s.relaySvc.Relays() {
|
||||
latency, ok := s.relaySvc.RelayStatus(relay)
|
||||
relayClientStatus[relay] = ok
|
||||
relayClientLatency[relay] = int(latency / time.Millisecond)
|
||||
}
|
||||
res["relayClientStatus"] = relayClientStatus
|
||||
res["relayClientLatency"] = relayClientLatency
|
||||
if cfg.Options().GlobalAnnEnabled && discoverer != nil {
|
||||
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
|
||||
}
|
||||
cpuUsageLock.RLock()
|
||||
var cpusum float64
|
||||
@@ -696,7 +637,6 @@ func (s *apiSvc) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
res["cpuPercent"] = cpusum / float64(len(cpuUsagePercent)) / float64(runtime.NumCPU())
|
||||
res["pathSeparator"] = string(filepath.Separator)
|
||||
res["uptime"] = int(time.Since(startTime).Seconds())
|
||||
res["startTime"] = startTime
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
@@ -704,53 +644,51 @@ func (s *apiSvc) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *apiSvc) getSystemError(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string][]logger.Line{
|
||||
"errors": s.guiErrors.Since(time.Time{}),
|
||||
})
|
||||
guiErrorsMut.Lock()
|
||||
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors})
|
||||
guiErrorsMut.Unlock()
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemError(w http.ResponseWriter, r *http.Request) {
|
||||
bs, _ := ioutil.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
l.Warnln(string(bs))
|
||||
s.showGuiError(0, string(bs))
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemErrorClear(w http.ResponseWriter, r *http.Request) {
|
||||
s.guiErrors.Clear()
|
||||
guiErrorsMut.Lock()
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut.Unlock()
|
||||
}
|
||||
|
||||
func (s *apiSvc) getSystemLog(w http.ResponseWriter, r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
||||
l.Debugln(err)
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
json.NewEncoder(w).Encode(map[string][]logger.Line{
|
||||
"messages": s.systemLog.Since(since),
|
||||
})
|
||||
func (s *apiSvc) showGuiError(l logger.LogLevel, err string) {
|
||||
guiErrorsMut.Lock()
|
||||
guiErrors = append(guiErrors, guiError{time.Now(), err})
|
||||
if len(guiErrors) > 5 {
|
||||
guiErrors = guiErrors[len(guiErrors)-5:]
|
||||
}
|
||||
guiErrorsMut.Unlock()
|
||||
}
|
||||
|
||||
func (s *apiSvc) getSystemLogTxt(w http.ResponseWriter, r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
||||
l.Debugln(err)
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
||||
for _, line := range s.systemLog.Since(since) {
|
||||
fmt.Fprintf(w, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
||||
func (s *apiSvc) postSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var device = qs.Get("device")
|
||||
var addr = qs.Get("addr")
|
||||
if len(device) != 0 && len(addr) != 0 && discoverer != nil {
|
||||
discoverer.Hint(device, []string{addr})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSvc) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
devices := make(map[string]discover.CacheEntry)
|
||||
devices := map[string][]discover.CacheEntry{}
|
||||
|
||||
if s.discoverer != nil {
|
||||
if discoverer != nil {
|
||||
// Device ids can't be marshalled as keys so we need to manually
|
||||
// rebuild this map using strings. Discoverer may be nil if discovery
|
||||
// has not started yet.
|
||||
for device, entry := range s.discoverer.Cache() {
|
||||
devices[device.String()] = entry
|
||||
for device, entries := range discoverer.All() {
|
||||
devices[device.String()] = entries
|
||||
}
|
||||
}
|
||||
|
||||
@@ -759,7 +697,7 @@ func (s *apiSvc) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *apiSvc) getReport(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(reportData(s.cfg, s.model))
|
||||
json.NewEncoder(w).Encode(reportData(s.model))
|
||||
}
|
||||
|
||||
func (s *apiSvc) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -828,7 +766,7 @@ func (s *apiSvc) getSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
||||
return
|
||||
}
|
||||
rel, err := upgrade.LatestRelease(s.cfg.Options().ReleasesURL, Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
@@ -871,7 +809,7 @@ func (s *apiSvc) getLang(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
rel, err := upgrade.LatestRelease(s.cfg.Options().ReleasesURL, Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
l.Warnln("getting latest release:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
@@ -892,32 +830,6 @@ func (s *apiSvc) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemPause(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
s.model.PauseDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiSvc) postSystemResume(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var deviceStr = qs.Get("device")
|
||||
|
||||
device, err := protocol.DeviceIDFromString(deviceStr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
s.model.ResumeDevice(device)
|
||||
}
|
||||
|
||||
func (s *apiSvc) postDBScan(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
@@ -969,7 +881,7 @@ func (s *apiSvc) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
tot := map[string]float64{}
|
||||
count := map[string]float64{}
|
||||
|
||||
for _, folder := range s.cfg.Folders() {
|
||||
for _, folder := range cfg.Folders() {
|
||||
for _, device := range folder.DeviceIDs() {
|
||||
deviceStr := device.String()
|
||||
if s.model.ConnectedTo(device) {
|
||||
|
||||
@@ -25,9 +25,9 @@ var (
|
||||
)
|
||||
|
||||
func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
apiKey := cfg.APIKey()
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if apiKey != "" && r.Header.Get("X-API-Key") == apiKey {
|
||||
if cfg.APIKey != "" && r.Header.Get("X-API-Key") == cfg.APIKey {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
@@ -43,7 +43,9 @@ func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguratio
|
||||
}
|
||||
}
|
||||
|
||||
httpl.Debugln("Sessionless HTTP request with authentication; this is expensive.")
|
||||
if debugHTTP {
|
||||
l.Debugln("Sessionless HTTP request with authentication; this is expensive.")
|
||||
}
|
||||
|
||||
error := func() {
|
||||
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
@@ -12,20 +12,13 @@ import (
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type LimitedReader struct {
|
||||
reader io.Reader
|
||||
type limitedReader struct {
|
||||
r io.Reader
|
||||
bucket *ratelimit.Bucket
|
||||
}
|
||||
|
||||
func NewReadLimiter(r io.Reader, b *ratelimit.Bucket) *LimitedReader {
|
||||
return &LimitedReader{
|
||||
reader: r,
|
||||
bucket: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *LimitedReader) Read(buf []byte) (int, error) {
|
||||
n, err := r.reader.Read(buf)
|
||||
func (r *limitedReader) Read(buf []byte) (int, error) {
|
||||
n, err := r.r.Read(buf)
|
||||
if r.bucket != nil {
|
||||
r.bucket.Wait(int64(n))
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package connections
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
@@ -12,21 +12,14 @@ import (
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type LimitedWriter struct {
|
||||
writer io.Writer
|
||||
type limitedWriter struct {
|
||||
w io.Writer
|
||||
bucket *ratelimit.Bucket
|
||||
}
|
||||
|
||||
func NewWriteLimiter(w io.Writer, b *ratelimit.Bucket) *LimitedWriter {
|
||||
return &LimitedWriter{
|
||||
writer: w,
|
||||
bucket: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *LimitedWriter) Write(buf []byte) (int, error) {
|
||||
func (w *limitedWriter) Write(buf []byte) (int, error) {
|
||||
if w.bucket != nil {
|
||||
w.bucket.Wait(int64(len(buf)))
|
||||
}
|
||||
return w.writer.Write(buf)
|
||||
return w.w.Write(buf)
|
||||
}
|
||||
@@ -7,9 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -23,32 +21,31 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/connections"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay"
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "unknown-dev"
|
||||
Codename = "Beryllium Bedbug"
|
||||
Codename = "Aluminium Ant"
|
||||
BuildEnv = "default"
|
||||
BuildStamp = "0"
|
||||
BuildDate time.Time
|
||||
@@ -68,21 +65,11 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
bepProtocolName = "bep/1.0"
|
||||
tlsDefaultCommonName = "syncthing"
|
||||
tlsRSABits = 3072
|
||||
pingEventInterval = time.Minute
|
||||
maxSystemErrors = 5
|
||||
initialSystemLog = 10
|
||||
maxSystemLog = 250
|
||||
bepProtocolName = "bep/1.0"
|
||||
pingEventInterval = time.Minute
|
||||
)
|
||||
|
||||
// The discovery results are sorted by their source priority.
|
||||
const (
|
||||
ipv6LocalDiscoveryPriority = iota
|
||||
ipv4LocalDiscoveryPriority
|
||||
globalDiscoveryPriority
|
||||
)
|
||||
var l = logger.DefaultLogger
|
||||
|
||||
func init() {
|
||||
if Version != "unknown-dev" {
|
||||
@@ -115,12 +102,16 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
myID protocol.DeviceID
|
||||
confDir string
|
||||
logFlags = log.Ltime
|
||||
stop = make(chan int)
|
||||
cert tls.Certificate
|
||||
lans []*net.IPNet
|
||||
cfg *config.Wrapper
|
||||
myID protocol.DeviceID
|
||||
confDir string
|
||||
logFlags = log.Ltime
|
||||
writeRateLimit *ratelimit.Bucket
|
||||
readRateLimit *ratelimit.Bucket
|
||||
stop = make(chan int)
|
||||
discoverer *discover.Discoverer
|
||||
cert tls.Certificate
|
||||
lans []*net.IPNet
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -150,11 +141,25 @@ Development Settings
|
||||
The following environment variables modify syncthing's behavior in ways that
|
||||
are mostly useful for developers. Use with care.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in
|
||||
assets.
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
||||
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings listed below.
|
||||
facility strings are:
|
||||
|
||||
- "beacon" (the beacon package)
|
||||
- "discover" (the discover package)
|
||||
- "events" (the events package)
|
||||
- "files" (the files package)
|
||||
- "http" (the main package; HTTP requests)
|
||||
- "locks" (the sync package; trace long held locks)
|
||||
- "net" (the main package; connections & network messages)
|
||||
- "model" (the model package)
|
||||
- "scanner" (the scanner package)
|
||||
- "stats" (the stats package)
|
||||
- "suture" (the suture package; service management)
|
||||
- "upnp" (the upnp package)
|
||||
- "xdr" (the xdr package)
|
||||
- "all" (all of the above)
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
@@ -177,38 +182,32 @@ are mostly useful for developers. Use with care.
|
||||
|
||||
GOGC Percentage of heap growth at which to trigger GC. Default is
|
||||
100. Lower numbers keep peak memory usage down, at the price
|
||||
of CPU usage (ie. performance).
|
||||
|
||||
|
||||
Debugging Facilities
|
||||
--------------------
|
||||
|
||||
The following are valid values for the STTRACE variable:
|
||||
|
||||
%s`
|
||||
of CPU usage (ie. performance).`
|
||||
)
|
||||
|
||||
// Command line and environment options
|
||||
var (
|
||||
reset bool
|
||||
showVersion bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
noBrowser bool
|
||||
noConsole bool
|
||||
generateDir string
|
||||
logFile string
|
||||
auditEnabled bool
|
||||
verbose bool
|
||||
paused bool
|
||||
noRestart = os.Getenv("STNORESTART") != ""
|
||||
noUpgrade = os.Getenv("STNOUPGRADE") != ""
|
||||
profiler = os.Getenv("STPROFILER")
|
||||
guiAssets = os.Getenv("STGUIASSETS")
|
||||
cpuProfile = os.Getenv("STCPUPROFILE") != ""
|
||||
stRestarting = os.Getenv("STRESTART") != ""
|
||||
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
|
||||
reset bool
|
||||
showVersion bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
noBrowser bool
|
||||
noConsole bool
|
||||
generateDir string
|
||||
logFile string
|
||||
auditEnabled bool
|
||||
verbose bool
|
||||
noRestart = os.Getenv("STNORESTART") != ""
|
||||
noUpgrade = os.Getenv("STNOUPGRADE") != ""
|
||||
guiAddress = os.Getenv("STGUIADDRESS") // legacy
|
||||
guiAuthentication = os.Getenv("STGUIAUTH") // legacy
|
||||
guiAPIKey = os.Getenv("STGUIAPIKEY") // legacy
|
||||
profiler = os.Getenv("STPROFILER")
|
||||
guiAssets = os.Getenv("STGUIASSETS")
|
||||
cpuProfile = os.Getenv("STCPUPROFILE") != ""
|
||||
stRestarting = os.Getenv("STRESTART") != ""
|
||||
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -223,9 +222,9 @@ func main() {
|
||||
flag.StringVar(&logFile, "logfile", "-", "Log file name (use \"-\" for stdout)")
|
||||
}
|
||||
|
||||
var guiAddress, guiAPIKey string
|
||||
flag.StringVar(&generateDir, "generate", "", "Generate key and config in specified dir, then exit")
|
||||
flag.StringVar(&guiAddress, "gui-address", guiAddress, "Override GUI address")
|
||||
flag.StringVar(&guiAuthentication, "gui-authentication", guiAuthentication, "Override GUI authentication; username:password")
|
||||
flag.StringVar(&guiAPIKey, "gui-apikey", guiAPIKey, "Override GUI API key")
|
||||
flag.StringVar(&confDir, "home", "", "Set configuration directory")
|
||||
flag.IntVar(&logFlags, "logflags", logFlags, "Select information in log line prefix")
|
||||
@@ -238,21 +237,10 @@ func main() {
|
||||
flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&verbose, "verbose", false, "Print verbose log output")
|
||||
flag.BoolVar(&paused, "paused", false, "Start with all devices paused")
|
||||
|
||||
longUsage := fmt.Sprintf(extraUsage, baseDirs["config"], debugFacilities())
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, longUsage)
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"]))
|
||||
flag.Parse()
|
||||
|
||||
if guiAddress != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIADDRESS", guiAddress)
|
||||
}
|
||||
if guiAPIKey != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIAPIKEY", guiAPIKey)
|
||||
}
|
||||
|
||||
if noConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
@@ -305,13 +293,10 @@ func main() {
|
||||
l.Warnln("Key exists; will not overwrite.")
|
||||
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
||||
} else {
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, tlsDefaultCommonName, tlsRSABits)
|
||||
if err != nil {
|
||||
l.Fatalln("Create certificate:", err)
|
||||
}
|
||||
cert, err = newCertificate(certFile, keyFile, tlsDefaultCommonName)
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
if err != nil {
|
||||
l.Fatalln("Load certificate:", err)
|
||||
l.Fatalln("load cert:", err)
|
||||
}
|
||||
if err == nil {
|
||||
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
||||
@@ -351,11 +336,7 @@ func main() {
|
||||
}
|
||||
|
||||
if doUpgrade || doUpgradeCheck {
|
||||
releasesURL := "https://api.github.com/repos/syncthing/syncthing/releases?per_page=30"
|
||||
if cfg, _, err := loadConfig(locations[locConfigFile]); err == nil {
|
||||
releasesURL = cfg.Options().ReleasesURL
|
||||
}
|
||||
rel, err := upgrade.LatestRelease(releasesURL, Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
@@ -369,7 +350,7 @@ func main() {
|
||||
|
||||
if doUpgrade {
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
_, err = db.Open(locations[locDatabase])
|
||||
_, err = leveldb.OpenFile(locations[locDatabase], &opt.Options{OpenFilesCacheCapacity: 100})
|
||||
if err != nil {
|
||||
l.Infoln("Attempting upgrade through running Syncthing...")
|
||||
err = upgradeViaRest()
|
||||
@@ -402,40 +383,21 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func debugFacilities() string {
|
||||
facilities := l.Facilities()
|
||||
|
||||
// Get a sorted list of names
|
||||
var names []string
|
||||
maxLen := 0
|
||||
for name := range facilities {
|
||||
names = append(names, name)
|
||||
if len(name) > maxLen {
|
||||
maxLen = len(name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
// Format the choices
|
||||
b := new(bytes.Buffer)
|
||||
for _, name := range names {
|
||||
fmt.Fprintf(b, " %-*s - %s\n", maxLen, name, facilities[name])
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func upgradeViaRest() error {
|
||||
cfg, err := config.Load(locations[locConfigFile], protocol.LocalDeviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target := cfg.GUI().URL()
|
||||
target := cfg.GUI().Address
|
||||
if cfg.GUI().UseTLS {
|
||||
target = "https://" + target
|
||||
} else {
|
||||
target = "http://" + target
|
||||
}
|
||||
r, _ := http.NewRequest("POST", target+"/rest/system/upgrade", nil)
|
||||
r.Header.Set("X-API-Key", cfg.GUI().APIKey())
|
||||
r.Header.Set("X-API-Key", cfg.GUI().APIKey)
|
||||
|
||||
tr := &http.Transport{
|
||||
Dial: dialer.Dial,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{
|
||||
@@ -463,7 +425,9 @@ func syncthingMain() {
|
||||
// We want any logging it does to go through our log system.
|
||||
mainSvc := suture.New("main", suture.Spec{
|
||||
Log: func(line string) {
|
||||
l.Debugln(line)
|
||||
if debugSuture {
|
||||
l.Debugln(line)
|
||||
}
|
||||
},
|
||||
})
|
||||
mainSvc.ServeBackground()
|
||||
@@ -480,9 +444,6 @@ func syncthingMain() {
|
||||
mainSvc.Add(newVerboseSvc())
|
||||
}
|
||||
|
||||
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
|
||||
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
|
||||
|
||||
// Event subscription for the API; must start early to catch the early events.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
|
||||
|
||||
@@ -498,10 +459,9 @@ func syncthingMain() {
|
||||
// Ensure that that we have a certificate and key.
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
if err != nil {
|
||||
l.Infof("Generating RSA key and certificate for %s...", tlsDefaultCommonName)
|
||||
cert, err = tlsutil.NewCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName, tlsRSABits)
|
||||
cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
l.Fatalln("load cert:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,7 +474,6 @@ func syncthingMain() {
|
||||
|
||||
l.Infoln(LongVersion)
|
||||
l.Infoln("My ID:", myID)
|
||||
printHashRate()
|
||||
|
||||
// Emit the Starting event, now that we know who we are.
|
||||
|
||||
@@ -527,21 +486,33 @@ func syncthingMain() {
|
||||
|
||||
cfgFile := locations[locConfigFile]
|
||||
|
||||
var myName string
|
||||
|
||||
// Load the configuration file, if it exists.
|
||||
// If it does not, create a template.
|
||||
|
||||
cfg, myName, err := loadConfig(cfgFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
l.Infoln("No config file; starting with empty defaults")
|
||||
myName, _ = os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
cfg.Save()
|
||||
l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
|
||||
} else {
|
||||
l.Fatalln("Loading config:", err)
|
||||
if info, err := os.Stat(cfgFile); err == nil {
|
||||
if !info.Mode().IsRegular() {
|
||||
l.Fatalln("Config file is not a file?")
|
||||
}
|
||||
cfg, err = config.Load(cfgFile, myID)
|
||||
if err == nil {
|
||||
myCfg := cfg.Devices()[myID]
|
||||
if myCfg.Name == "" {
|
||||
myName, _ = os.Hostname()
|
||||
} else {
|
||||
myName = myCfg.Name
|
||||
}
|
||||
} else {
|
||||
l.Fatalln("Configuration:", err)
|
||||
}
|
||||
} else {
|
||||
l.Infoln("No config file; starting with empty defaults")
|
||||
myName, _ = os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
cfg.Save()
|
||||
l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
|
||||
}
|
||||
|
||||
if cfg.Raw().OriginalVersion != config.CurrentVersion {
|
||||
@@ -596,47 +567,54 @@ func syncthingMain() {
|
||||
symlinks.Supported = false
|
||||
}
|
||||
|
||||
protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
|
||||
protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second
|
||||
|
||||
if opts.MaxSendKbps > 0 {
|
||||
writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
|
||||
}
|
||||
if opts.MaxRecvKbps > 0 {
|
||||
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
|
||||
}
|
||||
|
||||
if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
|
||||
lans, _ = osutil.GetLans()
|
||||
for _, lan := range opts.AlwaysLocalNets {
|
||||
_, ipnet, err := net.ParseCIDR(lan)
|
||||
if err != nil {
|
||||
l.Infoln("Network", lan, "is malformed:", err)
|
||||
continue
|
||||
}
|
||||
lans = append(lans, ipnet)
|
||||
}
|
||||
|
||||
networks := make([]string, len(lans))
|
||||
for i, lan := range lans {
|
||||
networks[i] = lan.String()
|
||||
networks := make([]string, 0, len(lans))
|
||||
for _, lan := range lans {
|
||||
networks = append(networks, lan.String())
|
||||
}
|
||||
l.Infoln("Local networks:", strings.Join(networks, ", "))
|
||||
}
|
||||
|
||||
dbFile := locations[locDatabase]
|
||||
ldb, err := db.Open(dbFile)
|
||||
ldb, err := leveldb.OpenFile(dbFile, dbOpts())
|
||||
if leveldbIsCorrupted(err) {
|
||||
ldb, err = leveldb.RecoverFile(dbFile, dbOpts())
|
||||
}
|
||||
if leveldbIsCorrupted(err) {
|
||||
// The database is corrupted, and we've tried to recover it but it
|
||||
// didn't work. At this point there isn't much to do beyond dropping
|
||||
// the database and reindexing...
|
||||
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
||||
if err := resetDB(); err != nil {
|
||||
l.Fatalln("Remove database:", err)
|
||||
}
|
||||
ldb, err = leveldb.OpenFile(dbFile, dbOpts())
|
||||
}
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
protectedFiles := []string{
|
||||
locations[locDatabase],
|
||||
locations[locConfigFile],
|
||||
locations[locCertFile],
|
||||
locations[locKeyFile],
|
||||
}
|
||||
|
||||
// Remove database entries for folders that no longer exist in the config
|
||||
folders := cfg.Folders()
|
||||
for _, folder := range ldb.ListFolders() {
|
||||
for _, folder := range db.ListFolders(ldb) {
|
||||
if _, ok := folders[folder]; !ok {
|
||||
l.Infof("Cleaning data for dropped folder %q", folder)
|
||||
db.DropFolder(ldb, folder)
|
||||
}
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb, protectedFiles)
|
||||
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
|
||||
cfg.Subscribe(m)
|
||||
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
@@ -645,13 +623,7 @@ func syncthingMain() {
|
||||
m.StartDeadlockDetector(time.Duration(it) * time.Second)
|
||||
}
|
||||
} else if !IsRelease || IsBeta {
|
||||
m.StartDeadlockDetector(20 * time.Minute)
|
||||
}
|
||||
|
||||
if paused {
|
||||
for device := range cfg.Devices() {
|
||||
m.PauseDevice(device)
|
||||
}
|
||||
m.StartDeadlockDetector(20 * 60 * time.Second)
|
||||
}
|
||||
|
||||
// Clear out old indexes for other devices. Otherwise we'll start up and
|
||||
@@ -676,88 +648,32 @@ func syncthingMain() {
|
||||
|
||||
mainSvc.Add(m)
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainSvc, cfg, m, apiSub)
|
||||
|
||||
// The default port we announce, possibly modified by setupUPnP next.
|
||||
|
||||
uri, err := url.Parse(opts.ListenAddress[0])
|
||||
if err != nil {
|
||||
l.Fatalf("Failed to parse listen address %s: %v", opts.ListenAddress[0], err)
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", uri.Host)
|
||||
addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0])
|
||||
if err != nil {
|
||||
l.Fatalln("Bad listen address:", err)
|
||||
}
|
||||
|
||||
// The externalAddr tracks our external addresses for discovery purposes.
|
||||
|
||||
var addrList *addressLister
|
||||
|
||||
// Start UPnP
|
||||
|
||||
if opts.UPnPEnabled {
|
||||
upnpSvc := newUPnPSvc(cfg, addr.Port)
|
||||
mainSvc.Add(upnpSvc)
|
||||
|
||||
// The external address tracker needs to know about the UPnP service
|
||||
// so it can check for an external mapped port.
|
||||
addrList = newAddressLister(upnpSvc, cfg)
|
||||
} else {
|
||||
addrList = newAddressLister(nil, cfg)
|
||||
}
|
||||
|
||||
// Start relay management
|
||||
|
||||
var relaySvc *relay.Svc
|
||||
if opts.RelaysEnabled && (opts.GlobalAnnEnabled || opts.RelayWithoutGlobalAnn) {
|
||||
relaySvc = relay.NewSvc(cfg, tlsCfg)
|
||||
mainSvc.Add(relaySvc)
|
||||
}
|
||||
|
||||
// Start discovery
|
||||
|
||||
cachedDiscovery := discover.NewCachingMux()
|
||||
mainSvc.Add(cachedDiscovery)
|
||||
localPort := addr.Port
|
||||
discoverer = discovery(localPort)
|
||||
|
||||
if cfg.Options().GlobalAnnEnabled {
|
||||
for _, srv := range cfg.GlobalDiscoveryServers() {
|
||||
l.Infoln("Using discovery server", srv)
|
||||
gd, err := discover.NewGlobal(srv, cert, addrList, relaySvc)
|
||||
if err != nil {
|
||||
l.Warnln("Global discovery:", err)
|
||||
continue
|
||||
}
|
||||
// Start UPnP. The UPnP service will restart global discovery if the
|
||||
// external port changes.
|
||||
|
||||
// Each global discovery server gets its results cached for five
|
||||
// minutes, and is not asked again for a minute when it's returned
|
||||
// unsuccessfully.
|
||||
cachedDiscovery.Add(gd, 5*time.Minute, time.Minute, globalDiscoveryPriority)
|
||||
}
|
||||
if opts.UPnPEnabled {
|
||||
upnpSvc := newUPnPSvc(cfg, localPort)
|
||||
mainSvc.Add(upnpSvc)
|
||||
}
|
||||
|
||||
if cfg.Options().LocalAnnEnabled {
|
||||
// v4 broadcasts
|
||||
bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), addrList, relaySvc)
|
||||
if err != nil {
|
||||
l.Warnln("IPv4 local discovery:", err)
|
||||
} else {
|
||||
cachedDiscovery.Add(bcd, 0, 0, ipv4LocalDiscoveryPriority)
|
||||
}
|
||||
// v6 multicasts
|
||||
mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, addrList, relaySvc)
|
||||
if err != nil {
|
||||
l.Warnln("IPv6 local discovery:", err)
|
||||
} else {
|
||||
cachedDiscovery.Add(mcd, 0, 0, ipv6LocalDiscoveryPriority)
|
||||
}
|
||||
}
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainSvc, cfg, m, apiSub, cachedDiscovery, relaySvc, errors, systemLog)
|
||||
|
||||
// Start connection management
|
||||
|
||||
connectionSvc := connections.NewConnectionSvc(cfg, myID, m, tlsCfg, cachedDiscovery, relaySvc, bepProtocolName, tlsDefaultCommonName, lans)
|
||||
connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
|
||||
cfg.Subscribe(connectionSvc)
|
||||
mainSvc.Add(connectionSvc)
|
||||
|
||||
if cpuProfile {
|
||||
@@ -793,7 +709,7 @@ func syncthingMain() {
|
||||
// The usageReportingManager registers itself to listen to configuration
|
||||
// changes, and there's nothing more we need to tell it from the outside.
|
||||
// Hence we don't keep the returned pointer.
|
||||
newUsageReportingManager(cfg, m)
|
||||
newUsageReportingManager(m, cfg)
|
||||
|
||||
if opts.RestartOnWakeup {
|
||||
go standbyMonitor()
|
||||
@@ -803,7 +719,7 @@ func syncthingMain() {
|
||||
if noUpgrade {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
} else if IsRelease {
|
||||
go autoUpgrade(cfg)
|
||||
go autoUpgrade()
|
||||
} else {
|
||||
l.Infof("No automatic upgrades; %s is not a release version.", Version)
|
||||
}
|
||||
@@ -829,43 +745,38 @@ func syncthingMain() {
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// printHashRate prints the hashing performance in MB/s, formatting it with
|
||||
// appropriate precision for the value, i.e. 182 MB/s, 18 MB/s, 1.8 MB/s, 0.18
|
||||
// MB/s.
|
||||
func printHashRate() {
|
||||
hashRate := cpuBench(3, 100*time.Millisecond)
|
||||
func dbOpts() *opt.Options {
|
||||
// Calculate a suitable database block cache capacity.
|
||||
|
||||
decimals := 0
|
||||
if hashRate < 1 {
|
||||
decimals = 2
|
||||
} else if hashRate < 10 {
|
||||
decimals = 1
|
||||
// Default is 8 MiB.
|
||||
blockCacheCapacity := 8 << 20
|
||||
// Increase block cache up to this maximum:
|
||||
const maxCapacity = 64 << 20
|
||||
// ... which we reach when the box has this much RAM:
|
||||
const maxAtRAM = 8 << 30
|
||||
|
||||
if v := cfg.Options().DatabaseBlockCacheMiB; v != 0 {
|
||||
// Use the value from the config, if it's set.
|
||||
blockCacheCapacity = v << 20
|
||||
} else if bytes, err := memorySize(); err == nil {
|
||||
// We start at the default of 8 MiB and use larger values for machines
|
||||
// with more memory.
|
||||
|
||||
if bytes > maxAtRAM {
|
||||
// Cap the cache at maxCapacity when we reach maxAtRam amount of memory
|
||||
blockCacheCapacity = maxCapacity
|
||||
} else if bytes > maxAtRAM/maxCapacity*int64(blockCacheCapacity) {
|
||||
// Grow from the default to maxCapacity at maxAtRam amount of memory
|
||||
blockCacheCapacity = int(bytes * maxCapacity / maxAtRAM)
|
||||
}
|
||||
l.Infoln("Database block cache capacity", blockCacheCapacity/1024, "KiB")
|
||||
}
|
||||
|
||||
l.Infof("Single thread hash performance is ~%.*f MB/s", decimals, hashRate)
|
||||
}
|
||||
|
||||
func loadConfig(cfgFile string) (*config.Wrapper, string, error) {
|
||||
info, err := os.Stat(cfgFile)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return &opt.Options{
|
||||
OpenFilesCacheCapacity: 100,
|
||||
BlockCacheCapacity: blockCacheCapacity,
|
||||
WriteBuffer: 4 << 20,
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return nil, "", errors.New("configuration is not a file")
|
||||
}
|
||||
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
myCfg := cfg.Devices()[myID]
|
||||
myName := myCfg.Name
|
||||
if myName == "" {
|
||||
myName, _ = os.Hostname()
|
||||
}
|
||||
|
||||
return cfg, myName, nil
|
||||
}
|
||||
|
||||
func startAuditing(mainSvc *suture.Supervisor) {
|
||||
@@ -885,53 +796,82 @@ func startAuditing(mainSvc *suture.Supervisor) {
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
}
|
||||
|
||||
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc, errors, systemLog *logger.Recorder) {
|
||||
guiCfg := cfg.GUI()
|
||||
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription) {
|
||||
opts := cfg.Options()
|
||||
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
|
||||
|
||||
if !guiCfg.Enabled {
|
||||
return
|
||||
}
|
||||
if guiCfg.Enabled && guiCfg.Address != "" {
|
||||
addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address)
|
||||
if err != nil {
|
||||
l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err)
|
||||
} else {
|
||||
var hostOpen, hostShow string
|
||||
switch {
|
||||
case addr.IP == nil:
|
||||
hostOpen = "localhost"
|
||||
hostShow = "0.0.0.0"
|
||||
case addr.IP.IsUnspecified():
|
||||
hostOpen = "localhost"
|
||||
hostShow = addr.IP.String()
|
||||
default:
|
||||
hostOpen = addr.IP.String()
|
||||
hostShow = hostOpen
|
||||
}
|
||||
|
||||
api, err := newAPISvc(myID, cfg, guiAssets, m, apiSub, discoverer, relaySvc, errors, systemLog)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
cfg.Subscribe(api)
|
||||
mainSvc.Add(api)
|
||||
var proto = "http"
|
||||
if guiCfg.UseTLS {
|
||||
proto = "https"
|
||||
}
|
||||
|
||||
if cfg.Options().StartBrowser && !noBrowser && !stRestarting {
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in it's own routine.
|
||||
go openURL(guiCfg.URL())
|
||||
urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
|
||||
l.Infoln("Starting web GUI on", urlShow)
|
||||
api, err := newAPISvc(myID, guiCfg, guiAssets, m, apiSub)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
cfg.Subscribe(api)
|
||||
mainSvc.Add(api)
|
||||
|
||||
if opts.StartBrowser && !noBrowser && !stRestarting {
|
||||
urlOpen := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostOpen, strconv.Itoa(addr.Port)))
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in it's own routine.
|
||||
go openURL(urlOpen)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func defaultConfig(myName string) config.Configuration {
|
||||
defaultFolder := config.NewFolderConfiguration("default", locations[locDefFolder])
|
||||
defaultFolder.RescanIntervalS = 60
|
||||
defaultFolder.MinDiskFreePct = 1
|
||||
defaultFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: myID}}
|
||||
defaultFolder.AutoNormalize = true
|
||||
defaultFolder.MaxConflicts = -1
|
||||
|
||||
thisDevice := config.NewDeviceConfiguration(myID, myName)
|
||||
thisDevice.Addresses = []string{"dynamic"}
|
||||
|
||||
newCfg := config.New(myID)
|
||||
newCfg.Folders = []config.FolderConfiguration{defaultFolder}
|
||||
newCfg.Devices = []config.DeviceConfiguration{thisDevice}
|
||||
newCfg.Folders = []config.FolderConfiguration{
|
||||
{
|
||||
ID: "default",
|
||||
RawPath: locations[locDefFolder],
|
||||
RescanIntervalS: 60,
|
||||
MinDiskFreePct: 1,
|
||||
Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}},
|
||||
},
|
||||
}
|
||||
newCfg.Devices = []config.DeviceConfiguration{
|
||||
{
|
||||
DeviceID: myID,
|
||||
Addresses: []string{"dynamic"},
|
||||
Name: myName,
|
||||
},
|
||||
}
|
||||
|
||||
port, err := getFreePort("127.0.0.1", 8384)
|
||||
if err != nil {
|
||||
l.Fatalln("get free port (GUI):", err)
|
||||
}
|
||||
newCfg.GUI.RawAddress = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
newCfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
|
||||
port, err = getFreePort("0.0.0.0", 22000)
|
||||
if err != nil {
|
||||
l.Fatalln("get free port (BEP):", err)
|
||||
}
|
||||
newCfg.Options.ListenAddress = []string{fmt.Sprintf("tcp://0.0.0.0:%d", port)}
|
||||
newCfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}
|
||||
return newCfg
|
||||
}
|
||||
|
||||
@@ -956,6 +896,23 @@ func shutdown() {
|
||||
stop <- exitSuccess
|
||||
}
|
||||
|
||||
func discovery(extPort int) *discover.Discoverer {
|
||||
opts := cfg.Options()
|
||||
disc := discover.NewDiscoverer(myID, opts.ListenAddress)
|
||||
|
||||
if opts.LocalAnnEnabled {
|
||||
l.Infoln("Starting local discovery announcements")
|
||||
disc.StartLocal(opts.LocalAnnPort, opts.LocalAnnMCAddr)
|
||||
}
|
||||
|
||||
if opts.GlobalAnnEnabled {
|
||||
l.Infoln("Starting global discovery announcements")
|
||||
disc.StartGlobal(opts.GlobalAnnServers, uint16(extPort))
|
||||
}
|
||||
|
||||
return disc
|
||||
}
|
||||
|
||||
func ensureDir(dir string, mode int) {
|
||||
fi, err := os.Stat(dir)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -993,6 +950,48 @@ func getFreePort(host string, ports ...int) (int, error) {
|
||||
return addr.Port, nil
|
||||
}
|
||||
|
||||
func overrideGUIConfig(cfg config.GUIConfiguration, address, authentication, apikey string) config.GUIConfiguration {
|
||||
if address != "" {
|
||||
cfg.Enabled = true
|
||||
|
||||
if !strings.Contains(address, "//") {
|
||||
// Assume just an IP was given. Don't touch he TLS setting.
|
||||
cfg.Address = address
|
||||
} else {
|
||||
parsed, err := url.Parse(address)
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
cfg.Address = parsed.Host
|
||||
switch parsed.Scheme {
|
||||
case "http":
|
||||
cfg.UseTLS = false
|
||||
case "https":
|
||||
cfg.UseTLS = true
|
||||
default:
|
||||
l.Fatalln("Unknown scheme:", parsed.Scheme)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if authentication != "" {
|
||||
authenticationParts := strings.SplitN(authentication, ":", 2)
|
||||
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(authenticationParts[1]), 0)
|
||||
if err != nil {
|
||||
l.Fatalln("Invalid GUI password:", err)
|
||||
}
|
||||
|
||||
cfg.User = authenticationParts[0]
|
||||
cfg.Password = string(hash)
|
||||
}
|
||||
|
||||
if apikey != "" {
|
||||
cfg.APIKey = apikey
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func standbyMonitor() {
|
||||
restartDelay := time.Duration(60 * time.Second)
|
||||
now := time.Now()
|
||||
@@ -1013,7 +1012,7 @@ func standbyMonitor() {
|
||||
}
|
||||
}
|
||||
|
||||
func autoUpgrade(cfg *config.Wrapper) {
|
||||
func autoUpgrade() {
|
||||
timer := time.NewTimer(0)
|
||||
sub := events.Default.Subscribe(events.DeviceConnected)
|
||||
for {
|
||||
@@ -1027,7 +1026,7 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestRelease(cfg.Options().ReleasesURL, Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err == upgrade.ErrUpgradeUnsupported {
|
||||
events.Default.Unsubscribe(sub)
|
||||
return
|
||||
@@ -1113,3 +1112,19 @@ func checkShortIDs(cfg *config.Wrapper) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A "better" version of leveldb's errors.IsCorrupted.
|
||||
func leveldbIsCorrupted(err error) bool {
|
||||
switch {
|
||||
case err == nil:
|
||||
return false
|
||||
|
||||
case errors.IsCorrupted(err):
|
||||
return true
|
||||
|
||||
case strings.Contains(err.Error(), "corrupted"):
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -10,10 +10,13 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func TestFolderErrors(t *testing.T) {
|
||||
@@ -35,11 +38,11 @@ func TestFolderErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ldb := db.OpenMemory()
|
||||
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
// Case 1 - new folder, directory and marker created
|
||||
|
||||
m := model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m := model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
@@ -70,7 +73,7 @@ func TestFolderErrors(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
@@ -93,7 +96,7 @@ func TestFolderErrors(t *testing.T) {
|
||||
{Name: "dummyfile"},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
@@ -124,7 +127,7 @@ func TestFolderErrors(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder path missing" {
|
||||
|
||||
@@ -28,10 +28,8 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
countRestarts = 4
|
||||
loopThreshold = 60 * time.Second
|
||||
logFileAutoCloseDelay = 5 * time.Second
|
||||
logFileMaxOpenTime = time.Minute
|
||||
countRestarts = 4
|
||||
loopThreshold = 60 * time.Second
|
||||
)
|
||||
|
||||
func monitorMain() {
|
||||
@@ -39,10 +37,16 @@ func monitorMain() {
|
||||
os.Setenv("STMONITORED", "yes")
|
||||
l.SetPrefix("[monitor] ")
|
||||
|
||||
var err error
|
||||
var dst io.Writer = os.Stdout
|
||||
|
||||
if logFile != "-" {
|
||||
var fileDst io.Writer = newAutoclosedFile(logFile, logFileAutoCloseDelay, logFileMaxOpenTime)
|
||||
var fileDst io.Writer
|
||||
|
||||
fileDst, err = os.Create(logFile)
|
||||
if err != nil {
|
||||
l.Fatalln("log file:", err)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Translate line breaks to Windows standard
|
||||
@@ -258,114 +262,3 @@ func restartMonitorWindows(args []string) error {
|
||||
cmd.Stdin = os.Stdin
|
||||
return cmd.Start()
|
||||
}
|
||||
|
||||
// An autoclosedFile is an io.WriteCloser that opens itself for appending on
|
||||
// Write() and closes itself after an interval of no writes (closeDelay) or
|
||||
// when the file has been open for too long (maxOpenTime). A call to Write()
|
||||
// will return any error that happens on the resulting Open() call too. Errors
|
||||
// on automatic Close() calls are silently swallowed...
|
||||
type autoclosedFile struct {
|
||||
name string // path to write to
|
||||
closeDelay time.Duration // close after this long inactivity
|
||||
maxOpenTime time.Duration // or this long after opening
|
||||
|
||||
fd io.WriteCloser // underlying WriteCloser
|
||||
opened time.Time // timestamp when the file was last opened
|
||||
closed chan struct{} // closed on Close(), stops the closerLoop
|
||||
closeTimer *time.Timer // fires closeDelay after a write
|
||||
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) *autoclosedFile {
|
||||
f := &autoclosedFile{
|
||||
name: name,
|
||||
closeDelay: closeDelay,
|
||||
maxOpenTime: maxOpenTime,
|
||||
mut: sync.NewMutex(),
|
||||
closed: make(chan struct{}),
|
||||
closeTimer: time.NewTimer(time.Minute),
|
||||
}
|
||||
go f.closerLoop()
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *autoclosedFile) Write(bs []byte) (int, error) {
|
||||
f.mut.Lock()
|
||||
defer f.mut.Unlock()
|
||||
|
||||
// Make sure the file is open for appending
|
||||
if err := f.ensureOpen(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// If we haven't run into the maxOpenTime, postpone close for another
|
||||
// closeDelay
|
||||
if time.Since(f.opened) < f.maxOpenTime {
|
||||
f.closeTimer.Reset(f.closeDelay)
|
||||
}
|
||||
|
||||
return f.fd.Write(bs)
|
||||
}
|
||||
|
||||
func (f *autoclosedFile) Close() error {
|
||||
f.mut.Lock()
|
||||
defer f.mut.Unlock()
|
||||
|
||||
// Stop the timer and closerLoop() routine
|
||||
f.closeTimer.Stop()
|
||||
close(f.closed)
|
||||
|
||||
// Close the file, if it's open
|
||||
if f.fd != nil {
|
||||
return f.fd.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must be called with f.mut held!
|
||||
func (f *autoclosedFile) ensureOpen() error {
|
||||
if f.fd != nil {
|
||||
// File is already open
|
||||
return nil
|
||||
}
|
||||
|
||||
// We open the file for write only, and create it if it doesn't exist.
|
||||
flags := os.O_WRONLY | os.O_CREATE
|
||||
if f.opened.IsZero() {
|
||||
// This is the first time we are opening the file. We should truncate
|
||||
// it to better emulate an os.Create() call.
|
||||
flags |= os.O_TRUNC
|
||||
} else {
|
||||
// The file was already opened once, so we should append to it.
|
||||
flags |= os.O_APPEND
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(f.name, flags, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.fd = fd
|
||||
f.opened = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *autoclosedFile) closerLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-f.closeTimer.C:
|
||||
// Close the file when the timer expires.
|
||||
f.mut.Lock()
|
||||
if f.fd != nil {
|
||||
f.fd.Close() // errors, schmerrors
|
||||
f.fd = nil
|
||||
}
|
||||
f.mut.Unlock()
|
||||
|
||||
case <-f.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -9,7 +9,6 @@ package main
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
type folderSummarySvc struct {
|
||||
*suture.Supervisor
|
||||
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
stop chan struct{}
|
||||
immediate chan string
|
||||
@@ -35,10 +33,9 @@ type folderSummarySvc struct {
|
||||
lastEventReqMut sync.Mutex
|
||||
}
|
||||
|
||||
func newFolderSummarySvc(cfg *config.Wrapper, m *model.Model) *folderSummarySvc {
|
||||
func newFolderSummarySvc(m *model.Model) *folderSummarySvc {
|
||||
svc := &folderSummarySvc{
|
||||
Supervisor: suture.NewSimple("folderSummarySvc"),
|
||||
cfg: cfg,
|
||||
model: m,
|
||||
stop: make(chan struct{}),
|
||||
immediate: make(chan string),
|
||||
@@ -165,13 +162,13 @@ func (c *folderSummarySvc) foldersToHandle() []string {
|
||||
func (c *folderSummarySvc) sendSummary(folder string) {
|
||||
// The folder summary contains how many bytes, files etc
|
||||
// are in the folder and how in sync we are.
|
||||
data := folderSummary(c.cfg, c.model, folder)
|
||||
data := folderSummary(c.model, folder)
|
||||
events.Default.Log(events.FolderSummary, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"summary": data,
|
||||
})
|
||||
|
||||
for _, devCfg := range c.cfg.Folders()[folder].Devices {
|
||||
for _, devCfg := range cfg.Folders()[folder].Devices {
|
||||
if devCfg.DeviceID.Equals(myID) {
|
||||
// We already know about ourselves.
|
||||
continue
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package tlsutil
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
mr "math/rand"
|
||||
@@ -23,14 +22,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIdentificationFailed = fmt.Errorf("failed to identify socket type")
|
||||
const (
|
||||
tlsRSABits = 3072
|
||||
tlsDefaultCommonName = "syncthing"
|
||||
)
|
||||
|
||||
func NewCertificate(certFile, keyFile, tlsDefaultCommonName string, tlsRSABits int) (tls.Certificate, error) {
|
||||
func newCertificate(certFile, keyFile, name string) (tls.Certificate, error) {
|
||||
l.Infof("Generating RSA key and certificate for %s...", name)
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("generate key: %s", err)
|
||||
l.Fatalln("generate key:", err)
|
||||
}
|
||||
|
||||
notBefore := time.Now()
|
||||
@@ -39,7 +41,7 @@ func NewCertificate(certFile, keyFile, tlsDefaultCommonName string, tlsRSABits i
|
||||
template := x509.Certificate{
|
||||
SerialNumber: new(big.Int).SetInt64(mr.Int63()),
|
||||
Subject: pkix.Name{
|
||||
CommonName: tlsDefaultCommonName,
|
||||
CommonName: name,
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
@@ -47,38 +49,37 @@ func NewCertificate(certFile, keyFile, tlsDefaultCommonName string, tlsRSABits i
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
SignatureAlgorithm: x509.SHA256WithRSA,
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("create cert: %s", err)
|
||||
l.Fatalln("create cert:", err)
|
||||
}
|
||||
|
||||
certOut, err := os.Create(certFile)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save cert: %s", err)
|
||||
l.Fatalln("save cert:", err)
|
||||
}
|
||||
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save cert: %s", err)
|
||||
l.Fatalln("save cert:", err)
|
||||
}
|
||||
err = certOut.Close()
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save cert: %s", err)
|
||||
l.Fatalln("save cert:", err)
|
||||
}
|
||||
|
||||
keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save key: %s", err)
|
||||
l.Fatalln("save key:", err)
|
||||
}
|
||||
err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save key: %s", err)
|
||||
l.Fatalln("save key:", err)
|
||||
}
|
||||
err = keyOut.Close()
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("save key: %s", err)
|
||||
l.Fatalln("save key:", err)
|
||||
}
|
||||
|
||||
return tls.LoadX509KeyPair(certFile, keyFile)
|
||||
@@ -89,29 +90,15 @@ type DowngradingListener struct {
|
||||
TLSConfig *tls.Config
|
||||
}
|
||||
|
||||
func (l *DowngradingListener) Accept() (net.Conn, error) {
|
||||
conn, isTLS, err := l.AcceptNoWrapTLS()
|
||||
|
||||
// We failed to identify the socket type, pretend that everything is fine,
|
||||
// and pass it to the underlying handler, and let them deal with it.
|
||||
if err == ErrIdentificationFailed {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return conn, err
|
||||
}
|
||||
|
||||
if isTLS {
|
||||
return tls.Server(conn, l.TLSConfig), nil
|
||||
}
|
||||
return conn, nil
|
||||
type WrappedConnection struct {
|
||||
io.Reader
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (l *DowngradingListener) AcceptNoWrapTLS() (net.Conn, bool, error) {
|
||||
func (l *DowngradingListener) Accept() (net.Conn, error) {
|
||||
conn, err := l.Listener.Accept()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
br := bufio.NewReader(conn)
|
||||
@@ -120,19 +107,20 @@ func (l *DowngradingListener) AcceptNoWrapTLS() (net.Conn, bool, error) {
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
// We hit a read error here, but the Accept() call succeeded so we must not return an error.
|
||||
// We return the connection as is with a special error which handles this
|
||||
// special case in Accept().
|
||||
return conn, false, ErrIdentificationFailed
|
||||
// We return the connection as is and let whoever tries to use it deal with the error.
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return &UnionedConnection{br, conn}, bs[0] == 0x16, nil
|
||||
wrapper := &WrappedConnection{br, conn}
|
||||
|
||||
// 0x16 is the first byte of a TLS handshake
|
||||
if bs[0] == 0x16 {
|
||||
return tls.Server(wrapper, l.TLSConfig), nil
|
||||
}
|
||||
|
||||
return wrapper, nil
|
||||
}
|
||||
|
||||
type UnionedConnection struct {
|
||||
io.Reader
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (c *UnionedConnection) Read(b []byte) (n int, err error) {
|
||||
func (c *WrappedConnection) Read(b []byte) (n int, err error) {
|
||||
return c.Reader.Read(b)
|
||||
}
|
||||
@@ -11,30 +11,26 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/upnp"
|
||||
)
|
||||
|
||||
// The UPnP service runs a loop for discovery of IGDs (Internet Gateway
|
||||
// Devices) and setup/renewal of a port mapping.
|
||||
type upnpSvc struct {
|
||||
cfg *config.Wrapper
|
||||
localPort int
|
||||
extPort int
|
||||
extPortMut sync.Mutex
|
||||
stop chan struct{}
|
||||
cfg *config.Wrapper
|
||||
localPort int
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newUPnPSvc(cfg *config.Wrapper, localPort int) *upnpSvc {
|
||||
return &upnpSvc{
|
||||
cfg: cfg,
|
||||
localPort: localPort,
|
||||
extPortMut: sync.NewMutex(),
|
||||
cfg: cfg,
|
||||
localPort: localPort,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *upnpSvc) Serve() {
|
||||
extPort := 0
|
||||
foundIGD := true
|
||||
s.stop = make(chan struct{})
|
||||
|
||||
@@ -42,15 +38,7 @@ func (s *upnpSvc) Serve() {
|
||||
igds := upnp.Discover(time.Duration(s.cfg.Options().UPnPTimeoutS) * time.Second)
|
||||
if len(igds) > 0 {
|
||||
foundIGD = true
|
||||
s.extPortMut.Lock()
|
||||
oldExtPort := s.extPort
|
||||
s.extPortMut.Unlock()
|
||||
|
||||
newExtPort := s.tryIGDs(igds, oldExtPort)
|
||||
|
||||
s.extPortMut.Lock()
|
||||
s.extPort = newExtPort
|
||||
s.extPortMut.Unlock()
|
||||
extPort = s.tryIGDs(igds, extPort)
|
||||
} else if foundIGD {
|
||||
// Only print a notice if we've previously found an IGD or this is
|
||||
// the first time around.
|
||||
@@ -76,13 +64,6 @@ func (s *upnpSvc) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
func (s *upnpSvc) ExternalPort() int {
|
||||
s.extPortMut.Lock()
|
||||
port := s.extPort
|
||||
s.extPortMut.Unlock()
|
||||
return port
|
||||
}
|
||||
|
||||
func (s *upnpSvc) tryIGDs(igds []upnp.IGD, prevExtPort int) int {
|
||||
// Lets try all the IGDs we found and use the first one that works.
|
||||
// TODO: Use all of them, and sort out the resulting mess to the
|
||||
@@ -95,10 +76,17 @@ func (s *upnpSvc) tryIGDs(igds []upnp.IGD, prevExtPort int) int {
|
||||
}
|
||||
|
||||
if extPort != prevExtPort {
|
||||
// External port changed; refresh the discovery announcement.
|
||||
// TODO: Don't reach out to some magic global here?
|
||||
l.Infof("New UPnP port mapping: external port %d to local port %d.", extPort, s.localPort)
|
||||
events.Default.Log(events.ExternalPortMappingChanged, map[string]int{"port": extPort})
|
||||
if s.cfg.Options().GlobalAnnEnabled {
|
||||
discoverer.StopGlobal()
|
||||
discoverer.StartGlobal(s.cfg.Options().GlobalAnnServers, uint16(extPort))
|
||||
}
|
||||
}
|
||||
if debugNet {
|
||||
l.Debugf("Created/updated UPnP port mapping for external port %d on device %s.", extPort, igd.FriendlyIdentifier())
|
||||
}
|
||||
l.Debugf("Created/updated UPnP port mapping for external port %d on device %s.", extPort, igd.FriendlyIdentifier())
|
||||
return extPort
|
||||
}
|
||||
|
||||
|
||||
@@ -10,37 +10,30 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
// Current version number of the usage report, for acceptance purposes. If
|
||||
// fields are added or changed this integer must be incremented so that users
|
||||
// are prompted for acceptance of the new report.
|
||||
const usageReportVersion = 2
|
||||
const usageReportVersion = 1
|
||||
|
||||
type usageReportingManager struct {
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
sup *suture.Supervisor
|
||||
}
|
||||
|
||||
func newUsageReportingManager(cfg *config.Wrapper, m *model.Model) *usageReportingManager {
|
||||
func newUsageReportingManager(m *model.Model, cfg *config.Wrapper) *usageReportingManager {
|
||||
mgr := &usageReportingManager{
|
||||
cfg: cfg,
|
||||
model: m,
|
||||
}
|
||||
|
||||
@@ -61,7 +54,9 @@ func (m *usageReportingManager) VerifyConfiguration(from, to config.Configuratio
|
||||
func (m *usageReportingManager) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if to.Options.URAccepted >= usageReportVersion && m.sup == nil {
|
||||
// Usage reporting was turned on; lets start it.
|
||||
svc := newUsageReportingService(m.cfg, m.model)
|
||||
svc := &usageReportingService{
|
||||
model: m.model,
|
||||
}
|
||||
m.sup = suture.NewSimple("usageReporting")
|
||||
m.sup.Add(svc)
|
||||
m.sup.ServeBackground()
|
||||
@@ -80,9 +75,8 @@ func (m *usageReportingManager) String() string {
|
||||
|
||||
// reportData returns the data to be sent in a usage report. It's used in
|
||||
// various places, so not part of the usageReportingSvc object.
|
||||
func reportData(cfg *config.Wrapper, m *model.Model) map[string]interface{} {
|
||||
func reportData(m *model.Model) map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
res["urVersion"] = usageReportVersion
|
||||
res["uniqueID"] = cfg.Options().URUniqueID
|
||||
res["version"] = Version
|
||||
res["longVersion"] = LongVersion
|
||||
@@ -112,158 +106,45 @@ func reportData(cfg *config.Wrapper, m *model.Model) map[string]interface{} {
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
res["memoryUsageMiB"] = (mem.Sys - mem.HeapReleased) / 1024 / 1024
|
||||
res["sha256Perf"] = cpuBench(5, 125*time.Millisecond)
|
||||
|
||||
var perf float64
|
||||
for i := 0; i < 5; i++ {
|
||||
p := cpuBench()
|
||||
if p > perf {
|
||||
perf = p
|
||||
}
|
||||
}
|
||||
res["sha256Perf"] = perf
|
||||
|
||||
bytes, err := memorySize()
|
||||
if err == nil {
|
||||
res["memorySize"] = bytes / 1024 / 1024
|
||||
}
|
||||
res["numCPU"] = runtime.NumCPU()
|
||||
|
||||
var rescanIntvs []int
|
||||
folderUses := map[string]int{
|
||||
"readonly": 0,
|
||||
"ignorePerms": 0,
|
||||
"ignoreDelete": 0,
|
||||
"autoNormalize": 0,
|
||||
}
|
||||
for _, cfg := range cfg.Folders() {
|
||||
rescanIntvs = append(rescanIntvs, cfg.RescanIntervalS)
|
||||
|
||||
if cfg.ReadOnly {
|
||||
folderUses["readonly"]++
|
||||
}
|
||||
if cfg.IgnorePerms {
|
||||
folderUses["ignorePerms"]++
|
||||
}
|
||||
if cfg.IgnoreDelete {
|
||||
folderUses["ignoreDelete"]++
|
||||
}
|
||||
if cfg.AutoNormalize {
|
||||
folderUses["autoNormalize"]++
|
||||
}
|
||||
}
|
||||
sort.Ints(rescanIntvs)
|
||||
res["rescanIntvs"] = rescanIntvs
|
||||
res["folderUses"] = folderUses
|
||||
|
||||
deviceUses := map[string]int{
|
||||
"introducer": 0,
|
||||
"customCertName": 0,
|
||||
"compressAlways": 0,
|
||||
"compressMetadata": 0,
|
||||
"compressNever": 0,
|
||||
"dynamicAddr": 0,
|
||||
"staticAddr": 0,
|
||||
}
|
||||
for _, cfg := range cfg.Devices() {
|
||||
if cfg.Introducer {
|
||||
deviceUses["introducer"]++
|
||||
}
|
||||
if cfg.CertName != "" && cfg.CertName != "syncthing" {
|
||||
deviceUses["customCertName"]++
|
||||
}
|
||||
if cfg.Compression == protocol.CompressAlways {
|
||||
deviceUses["compressAlways"]++
|
||||
} else if cfg.Compression == protocol.CompressMetadata {
|
||||
deviceUses["compressMetadata"]++
|
||||
} else if cfg.Compression == protocol.CompressNever {
|
||||
deviceUses["compressNever"]++
|
||||
}
|
||||
for _, addr := range cfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
deviceUses["dynamicAddr"]++
|
||||
} else {
|
||||
deviceUses["staticAddr"]++
|
||||
}
|
||||
}
|
||||
}
|
||||
res["deviceUses"] = deviceUses
|
||||
|
||||
defaultAnnounceServersDNS, defaultAnnounceServersIP, otherAnnounceServers := 0, 0, 0
|
||||
for _, addr := range cfg.Options().GlobalAnnServers {
|
||||
if addr == "default" {
|
||||
defaultAnnounceServersDNS++
|
||||
} else if stringIn(addr, config.DefaultDiscoveryServersIP) {
|
||||
defaultAnnounceServersIP++
|
||||
} else {
|
||||
otherAnnounceServers++
|
||||
}
|
||||
}
|
||||
res["announce"] = map[string]interface{}{
|
||||
"globalEnabled": cfg.Options().GlobalAnnEnabled,
|
||||
"localEnabled": cfg.Options().LocalAnnEnabled,
|
||||
"defaultServersDNS": defaultAnnounceServersDNS,
|
||||
"defaultServersIP": defaultAnnounceServersIP,
|
||||
"otherServers": otherAnnounceServers,
|
||||
}
|
||||
|
||||
defaultRelayServers, otherRelayServers := 0, 0
|
||||
for _, addr := range cfg.Options().RelayServers {
|
||||
switch addr {
|
||||
case "dynamic+https://relays.syncthing.net/endpoint":
|
||||
defaultRelayServers++
|
||||
default:
|
||||
otherRelayServers++
|
||||
}
|
||||
}
|
||||
res["relays"] = map[string]interface{}{
|
||||
"enabled": cfg.Options().RelaysEnabled,
|
||||
"defaultServers": defaultRelayServers,
|
||||
"otherServers": otherRelayServers,
|
||||
}
|
||||
|
||||
res["usesRateLimit"] = cfg.Options().MaxRecvKbps > 0 || cfg.Options().MaxSendKbps > 0
|
||||
|
||||
res["upgradeAllowedManual"] = !(upgrade.DisabledByCompilation || noUpgrade)
|
||||
res["upgradeAllowedAuto"] = !(upgrade.DisabledByCompilation || noUpgrade) && cfg.Options().AutoUpgradeIntervalH > 0
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func stringIn(needle string, haystack []string) bool {
|
||||
for _, s := range haystack {
|
||||
if needle == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type usageReportingService struct {
|
||||
cfg *config.Wrapper
|
||||
model *model.Model
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newUsageReportingService(cfg *config.Wrapper, model *model.Model) *usageReportingService {
|
||||
return &usageReportingService{
|
||||
cfg: cfg,
|
||||
model: model,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *usageReportingService) sendUsageReport() error {
|
||||
d := reportData(s.cfg, s.model)
|
||||
d := reportData(s.model)
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(d)
|
||||
|
||||
transp := &http.Transport{}
|
||||
client := &http.Client{Transport: transp}
|
||||
var client = http.DefaultClient
|
||||
if BuildEnv == "android" {
|
||||
// This works around the lack of DNS resolution on Android... :(
|
||||
transp.Dial = func(network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, "194.126.249.13:443")
|
||||
tr := &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, "194.126.249.13:443")
|
||||
},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
if s.cfg.Options().URPostInsecurely {
|
||||
transp.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
_, err := client.Post(s.cfg.Options().URURL, "application/json", &b)
|
||||
_, err := client.Post("https://data.syncthing.net/newdata", "application/json", &b)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -273,7 +154,7 @@ func (s *usageReportingService) Serve() {
|
||||
l.Infoln("Starting usage reporting")
|
||||
defer l.Infoln("Stopping usage reporting")
|
||||
|
||||
t := time.NewTimer(time.Duration(s.cfg.Options().URInitialDelayS) * time.Second) // time to initial report at start
|
||||
t := time.NewTimer(10 * time.Minute) // time to initial report at start
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
@@ -293,17 +174,7 @@ func (s *usageReportingService) Stop() {
|
||||
}
|
||||
|
||||
// cpuBench returns CPU performance as a measure of single threaded SHA-256 MiB/s
|
||||
func cpuBench(iterations int, duration time.Duration) float64 {
|
||||
var perf float64
|
||||
for i := 0; i < iterations; i++ {
|
||||
if v := cpuBenchOnce(duration); v > perf {
|
||||
perf = v
|
||||
}
|
||||
}
|
||||
return perf
|
||||
}
|
||||
|
||||
func cpuBenchOnce(duration time.Duration) float64 {
|
||||
func cpuBench() float64 {
|
||||
chunkSize := 100 * 1 << 10
|
||||
h := sha256.New()
|
||||
bs := make([]byte, chunkSize)
|
||||
@@ -311,7 +182,7 @@ func cpuBenchOnce(duration time.Duration) float64 {
|
||||
|
||||
t0 := time.Now()
|
||||
b := 0
|
||||
for time.Since(t0) < duration {
|
||||
for time.Since(t0) < 125*time.Millisecond {
|
||||
h.Write(bs)
|
||||
b += chunkSize
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
)
|
||||
@@ -61,7 +60,7 @@ func (s *verboseSvc) WaitForStart() {
|
||||
|
||||
func (s *verboseSvc) formatEvent(ev events.Event) string {
|
||||
switch ev.Type {
|
||||
case events.Ping, events.DownloadProgress, events.LocalIndexUpdated:
|
||||
case events.Ping, events.DownloadProgress:
|
||||
// Skip
|
||||
return ""
|
||||
|
||||
@@ -75,7 +74,7 @@ func (s *verboseSvc) formatEvent(ev events.Event) string {
|
||||
return fmt.Sprintf("Discovered device %v at %v", data["device"], data["addrs"])
|
||||
case events.DeviceConnected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Connected to device %v at %v (type %s)", data["id"], data["addr"], data["type"])
|
||||
return fmt.Sprintf("Connected to device %v at %v", data["id"], data["addr"])
|
||||
case events.DeviceDisconnected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Disconnected from device %v", data["id"])
|
||||
@@ -87,6 +86,9 @@ func (s *verboseSvc) formatEvent(ev events.Event) string {
|
||||
case events.RemoteIndexUpdated:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Device %v sent an index update for %q with %d items", data["device"], data["folder"], data["items"])
|
||||
case events.LocalIndexUpdated:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Updated index for folder %q with %v items", data["folder"], data["items"])
|
||||
|
||||
case events.DeviceRejected:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
@@ -121,35 +123,6 @@ func (s *verboseSvc) formatEvent(ev events.Event) string {
|
||||
delete(sum, "ignorePatterns")
|
||||
delete(sum, "stateChanged")
|
||||
return fmt.Sprintf("Summary for folder %q is %v", data["folder"], data["summary"])
|
||||
case events.FolderScanProgress:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
current := data["current"].(int64)
|
||||
total := data["total"].(int64)
|
||||
var pct int64
|
||||
if total > 0 {
|
||||
pct = 100 * current / total
|
||||
}
|
||||
return fmt.Sprintf("Scanning folder %q, %d%% done", folder, pct)
|
||||
|
||||
case events.DevicePaused:
|
||||
data := ev.Data.(map[string]string)
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was paused", device)
|
||||
case events.DeviceResumed:
|
||||
data := ev.Data.(map[string]string)
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was resumed", device)
|
||||
|
||||
case events.ExternalPortMappingChanged:
|
||||
data := ev.Data.(map[string]int)
|
||||
port := data["port"]
|
||||
return fmt.Sprintf("External port mapping changed; new port is %d.", port)
|
||||
case events.RelayStateChanged:
|
||||
data := ev.Data.(map[string][]string)
|
||||
newRelays := data["new"]
|
||||
return fmt.Sprintf("Relay state changed; connected relay(s) are %s.", strings.Join(newRelays, ", "))
|
||||
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %#v", ev.Type, ev)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Systemd Configuration
|
||||
|
||||
This directory contains configuration files for running Syncthing under the
|
||||
This directory contains configuration files for running syncthing under the
|
||||
"systemd" service manager on Linux both under either a systemd system service or
|
||||
systemd user service. For further documentation take a look at the [systemd
|
||||
section][1] on http://docs.syncthing.net.
|
||||
section][1] on the Github Wiki.
|
||||
|
||||
[1]: http://docs.syncthing.net/users/autostart.html#systemd
|
||||
[1]: http://docs.syncthing.net/users/autostart.html#systemd
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Upstart Configuration
|
||||
|
||||
This directory contains example configuration files for running Syncthing under
|
||||
the "Upstart" service manager on Linux. To have syncthing start when you login
|
||||
place "user/syncthing.conf" in the "/home/[username]/.config/upstart/" folder.
|
||||
To have syncthing start when the system boots place "system/syncthing.conf"
|
||||
in the "/etc/init/" folder.
|
||||
To manualy start syncthing via Upstart when using the system configuration use:
|
||||
|
||||
```
|
||||
sudo initctl start syncthing
|
||||
```
|
||||
|
||||
For further documentation see [http://docs.syncthing.net/users/autostart.html][1].
|
||||
|
||||
[1]: http://docs.syncthing.net/users/autostart.html#Upstart
|
||||
@@ -1,13 +0,0 @@
|
||||
description "Syncthing"
|
||||
|
||||
start on (local-filesystems and net-device-up IFACE!=lo)
|
||||
stop on runlevel [!2345]
|
||||
|
||||
env STNORESTART=yes
|
||||
env HOME=/home/$USER
|
||||
setuid "$USER"
|
||||
setgid "$USER"
|
||||
|
||||
exec /usr/local/bin/syncthing
|
||||
|
||||
respawn
|
||||
@@ -1,21 +0,0 @@
|
||||
# Location of the syncthing executable
|
||||
env SYNCTHING_EXE="/usr/local/bin"
|
||||
|
||||
# Set the name of the application
|
||||
description "Syncthing"
|
||||
|
||||
# Start syncthing you login to your desktop
|
||||
start on desktop-start
|
||||
|
||||
# Stop syncthing you logout of your desktop
|
||||
stop on desktop-end
|
||||
|
||||
# Set STNORESTART to yes to have Upstart monitor the process instead
|
||||
# of having a separate syncthing process do the monitoring
|
||||
env STNORESTART=yes
|
||||
|
||||
# If Upstart detects syncthing has failed - it should restart it
|
||||
respawn
|
||||
|
||||
# the syncthing command Upstart is to execute when it is started up
|
||||
exec $SYNCTHING_EXE -no-browser
|
||||
@@ -21,10 +21,6 @@ ul+h5 {
|
||||
margin-top: 1.5em;
|
||||
}
|
||||
|
||||
#content {
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
.panel-progress {
|
||||
background: #3498db;
|
||||
height: 3px;
|
||||
@@ -45,17 +41,8 @@ identicon {
|
||||
position: relative;
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
line-height: 1;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
.identicon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #333;
|
||||
line-height: 1em;
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
.checkbox {
|
||||
@@ -68,7 +55,17 @@ identicon {
|
||||
|
||||
.popover {
|
||||
max-width: none;
|
||||
min-width: 250px;
|
||||
}
|
||||
|
||||
.identicon {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
}
|
||||
|
||||
.panel-heading .identicon {
|
||||
display: block;
|
||||
position: absolute;
|
||||
top: 1px;
|
||||
}
|
||||
|
||||
.panel-heading .fa, .modal-header .fa {
|
||||
@@ -80,6 +77,10 @@ identicon {
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #666;
|
||||
}
|
||||
|
||||
.text-monospace {
|
||||
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
|
||||
}
|
||||
|
||||
Binary file not shown.
@@ -2,5 +2,5 @@
|
||||
font-family: 'Raleway';
|
||||
font-style: normal;
|
||||
font-weight: 500;
|
||||
src: local('Raleway Medium'), local('Raleway-Medium'), url(raleway-500.woff) format('woff');
|
||||
src: local('Raleway'), url(raleway-500.woff) format('woff');
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "Няма логика в задаването на отрицателен брой дни.",
|
||||
"A negative number of days doesn't make sense.": "Няма логика на зададен отрицателен брой дни.",
|
||||
"A new major version may not be compatible with previous versions.": "Нова основна версия, която може да не е съвмеситима с предишни версии.",
|
||||
"API Key": "API Ключ",
|
||||
"About": "За програмата",
|
||||
"About": "За Програмата",
|
||||
"Actions": "Действия",
|
||||
"Add": "Добави",
|
||||
"Add Device": "Добави устройство",
|
||||
@@ -13,36 +13,35 @@
|
||||
"Advanced": "Допълнителни",
|
||||
"Advanced Configuration": "Допълнителни настройки",
|
||||
"All Data": "Всички данни",
|
||||
"Allow Anonymous Usage Reporting?": "Разреши анонимно докладване за употребата на програмата?",
|
||||
"Allow Anonymous Usage Reporting?": "Разреши анонимен доклад за ползване на програмата?",
|
||||
"Alphabetic": "Азбучен ред",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Друга команда се занимава с версиите. Тази команда трябва да премахни файла от синхронизираната папка.",
|
||||
"Anonymous Usage Reporting": "Анонимен доклад",
|
||||
"Anonymous Usage Reporting": "Анонимен Доклад",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Устройства настроени на introducer компютъра също ще бъдат добавени към този компютър.",
|
||||
"Automatic upgrades": "Автоматично обновяване",
|
||||
"Be careful!": "Внимание!",
|
||||
"Automatic upgrades": "Автоматични ъпдейти",
|
||||
"Be careful!": "Внимавай!",
|
||||
"Bugs": "Бъгове",
|
||||
"CPU Utilization": "Процесор в употреба",
|
||||
"Changelog": "Списък с промени",
|
||||
"CPU Utilization": "Натоварване на Процесора",
|
||||
"Changelog": "Сипъск с промени",
|
||||
"Clean out after": "Изчисти след",
|
||||
"Close": "Затвори",
|
||||
"Command": "Команда",
|
||||
"Comment, when used at the start of a line": "Коментар, използван в началото на реда",
|
||||
"Compression": "Компресиране",
|
||||
"Connection Error": "Грешка при свързването",
|
||||
"Compression": "Компресия",
|
||||
"Connection Error": "Грешка при Свързването",
|
||||
"Copied from elsewhere": "Копиране от някъде другаде",
|
||||
"Copied from original": "Копиран от оригинала",
|
||||
"Copyright © 2015 the following Contributors:": "Всички правата запазени © 2015 Сътрудници:",
|
||||
"Copyright © 2015 the following Contributors:": "Правата запазени © 2015 Сътрудници:",
|
||||
"Delete": "Изтрий",
|
||||
"Deleted": "Изтрито",
|
||||
"Device ID": "Идентификатор на устройство",
|
||||
"Device Identification": "Идентификатор на устройство",
|
||||
"Device Identification": "Идентификация на устройство",
|
||||
"Device Name": "Име на устройство",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Устройство {{device}} ({{address}}) желае да се свърже. Добави ново устройство?",
|
||||
"Devices": "Устройства",
|
||||
"Disconnected": "Не е свързано",
|
||||
"Discovery": "Откриване",
|
||||
"Disconnected": "Прекрати Връзката",
|
||||
"Documentation": "Документация",
|
||||
"Download Rate": "Скорост на сваляне",
|
||||
"Download Rate": "Скорост на Теглене",
|
||||
"Downloaded": "Изтеглен",
|
||||
"Downloading": "Изтегляне",
|
||||
"Edit": "Промени",
|
||||
@@ -50,49 +49,49 @@
|
||||
"Edit Folder": "Промени папка",
|
||||
"Editing": "Променяне",
|
||||
"Enable UPnP": "Включи UPnP",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Въведете адреси разделени със запетая (\"tcp://ip:port\", \"tcp://host:port\") или \"dynamic\", за да автоматично откриване на наличните адреси.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Въведи адреси разделени със запетая (\"tcp://ip:port\", \"tcp://host:port\") или \"dynamic\", за да извършиш автоматична връзка на адреси.",
|
||||
"Enter ignore patterns, one per line.": "Добави шаблони за игнориране, по един на ред.",
|
||||
"Error": "Грешка",
|
||||
"External File Versioning": "Външно управление на версиите",
|
||||
"External File Versioning": "Външно упраление на версиите",
|
||||
"Failed Items": "Неуспешни",
|
||||
"File Pull Order": "Ред на сваляне",
|
||||
"File Versioning": "Версии на файловете",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Битовете за права за достъп ще бъдат игнорирани, когато се проверява за промени. Ползвайте за файлови системи тип FAT.",
|
||||
"File Pull Order": "По ред на дърпане",
|
||||
"File Versioning": "Файлови Версии",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Битовете за права за достъп са игнорирани, когато се проверява за промени. Използвай с файлови системи тип FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Файловете биват преместени в .stversions папка, когато са заменен или изтрити от Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Когато syncthing замени или изтрие файл той се премества в .stversions и преименува с набавени дата и час.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Защитава файловете от промени направени на други устройства, но промените направени на това устройство ще бъдат синхронизирани с останалите устройства.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Когато syncthing замени или изтрие файл той се премества в .stversions и преименува с дабавени дата и час.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файловете са защитени от промени направени на други устройства, но промени направени на това устройство ще бъдат синхронизирани с другите устройства.",
|
||||
"Folder": "Папка",
|
||||
"Folder ID": "Идентификатор на папката",
|
||||
"Folder ID": "Идентификатор на папка",
|
||||
"Folder Master": "Главна папка",
|
||||
"Folder Path": "Път до папката",
|
||||
"Folders": "Папки",
|
||||
"GUI": "Потребителски интерфейс",
|
||||
"GUI Authentication Password": "Парола за потребителския интерфейс",
|
||||
"GUI Authentication User": "Потребител за потребителския интерфейс",
|
||||
"GUI Listen Addresses": "Адрес за свързване с потребителския интерфейс",
|
||||
"GUI": "Потребителски интефейс",
|
||||
"GUI Authentication Password": "Парола за Потребителския Интерфейс",
|
||||
"GUI Authentication User": "Потребител за Потребителския Интерфейс",
|
||||
"GUI Listen Addresses": "Адрес за Свързване с Потребителския Интерфейс",
|
||||
"Generate": "Генерирай",
|
||||
"Global Discovery": "Глобално откриване",
|
||||
"Global Discovery Server": "Сървър за глобално откриване",
|
||||
"Global Discovery": "Глобавно Откриване",
|
||||
"Global Discovery Server": "Сървър за Глобално Откриване",
|
||||
"Global State": "Глобално състояние",
|
||||
"Help": "Помощ",
|
||||
"Home page": "Начална страница",
|
||||
"Ignore": "Игнорирай",
|
||||
"Ignore Patterns": "Шаблони за игнориране",
|
||||
"Ignore Permissions": "Игнорирай правата за достъп",
|
||||
"Incoming Rate Limit (KiB/s)": "Входящ лимит на скоростта (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Неправилни настройки могат да повредят файловете и да попречат на синхронизирането.",
|
||||
"Ignore Patterns": "Шаблони за Игнориране",
|
||||
"Ignore Permissions": "Игнорирай Права за Достъп",
|
||||
"Incoming Rate Limit (KiB/s)": "Входящ Лимит на Скоростта (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Неправилни настройки могат да повредят съдържанието на папката и да попречат на по-нататъшно синхронизиране.",
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Обратното на даденото условие (пр. не изключвай)",
|
||||
"Keep Versions": "Пази версии",
|
||||
"Largest First": " Първо най-големите",
|
||||
"Keep Versions": "Пази Версии",
|
||||
"Largest First": "Най-големите първо",
|
||||
"Last File Received": "Последния получен файл",
|
||||
"Last seen": "Последно видян",
|
||||
"Later": "По-късно",
|
||||
"Local Discovery": "Локално откриване",
|
||||
"Local Discovery": "Локално Откриване",
|
||||
"Local State": "Локално състояние",
|
||||
"Local State (Total)": "Локално състояние (Общо)",
|
||||
"Local State (Total)": "Локално Състояние (Общо)",
|
||||
"Major Upgrade": "Основно Обновяване",
|
||||
"Maximum Age": "Максимална възраст",
|
||||
"Maximum Age": "Максимална Възраст",
|
||||
"Metadata Only": "Само мета информация",
|
||||
"Minimum Free Disk Space": "Минимално свободно дисково пространство",
|
||||
"Move to top of queue": "Премести в началото на опашката",
|
||||
@@ -100,66 +99,66 @@
|
||||
"Never": "Никога",
|
||||
"New Device": "Ново устройство",
|
||||
"New Folder": "Нова папка",
|
||||
"Newest First": "Първо най-новите",
|
||||
"Newest First": "Най-новите първо",
|
||||
"No": "Не",
|
||||
"No File Versioning": "Без версии",
|
||||
"No File Versioning": "Няма Файлови Версии",
|
||||
"Notice": "Известие",
|
||||
"OK": "ОК",
|
||||
"Off": "Изключено",
|
||||
"Oldest First": "Първо най-старите",
|
||||
"Oldest First": "Най-старите първо",
|
||||
"Options": "Настройки",
|
||||
"Out of Sync": "Несинхронизирано",
|
||||
"Out of Sync": "Не синхронизиран",
|
||||
"Out of Sync Items": "Несинхронизирани елементи",
|
||||
"Outgoing Rate Limit (KiB/s)": "Лимит на изходящата скорост (KiB/s)",
|
||||
"Override Changes": "Наложи локалните промени",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Път до папката на това устройство. Ако не съществува ще бъде създадена. Символът тилда (~) може да бъде използван като заместител на",
|
||||
"Outgoing Rate Limit (KiB/s)": "Лимит на Изходящата Скорост (KiB/s)",
|
||||
"Override Changes": "Замени Промените",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Пътят до папката на този компютър. Ще бъде създадена ако не съществува. Символът тилда (~) може да бъде използван като заместител на",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Пътят, където версиите да бъдат складирани(остави празно за папката .stversions).",
|
||||
"Pause": "Пауза",
|
||||
"Paused": "На пауза",
|
||||
"Please consult the release notes before performing a major upgrade.": "Моля прочети бележките по обновяването преди да започнеш.",
|
||||
"Please wait": "Моля изчакай",
|
||||
"Preview": "Преглед",
|
||||
"Preview Usage Report": "Разгледай доклада за използване",
|
||||
"Preview Usage Report": "Разгледай Доклада за Използване",
|
||||
"Quick guide to supported patterns": "Бърз наръчник към поддържаните шаблони",
|
||||
"RAM Utilization": "RAM в употреба",
|
||||
"Random": "Произволен",
|
||||
"RAM Utilization": "RAM Натоварване",
|
||||
"Random": "Произволно",
|
||||
"Relayed via": "Препратено през",
|
||||
"Relays": "Препращачи",
|
||||
"Release Notes": "Бележки по обновяването",
|
||||
"Remove": "Премахни",
|
||||
"Rescan": "Сканирай повторно",
|
||||
"Rescan All": "Сканирай повторно всички",
|
||||
"Rescan Interval": "Интервал за повторно сканиране",
|
||||
"Rescan": "Повторно Сканиране",
|
||||
"Rescan All": "Пълно повторно сканиране",
|
||||
"Rescan Interval": "Интервал за Повторно Сканиране",
|
||||
"Restart": "Рестартирай",
|
||||
"Restart Needed": "Изисква се рестартиране",
|
||||
"Restart Needed": "Изискава се Рестартиране",
|
||||
"Restarting": "Рестартиране",
|
||||
"Resume": "Пусни",
|
||||
"Reused": "Повторно използван",
|
||||
"Save": "Запази",
|
||||
"Scanning": "Сканиране",
|
||||
"Select the devices to share this folder with.": "Изберете устройствата, с които да споделите папката.",
|
||||
"Select the devices to share this folder with.": "Избери устройствата, с които да споделиш тази папка.",
|
||||
"Select the folders to share with this device.": "Изберете папките за споделяне с това устройство.",
|
||||
"Settings": "Настройки",
|
||||
"Share": "Сподели",
|
||||
"Share Folder": "Сподели папка",
|
||||
"Share Folders With Device": "Сподели папки с това устройство",
|
||||
"Share With Devices": "Споделяне с устройства",
|
||||
"Share With Devices": "Сподели с устройства",
|
||||
"Share this folder?": "Сподели тази папка?",
|
||||
"Shared With": "Споделена с",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Идентификаторът на папката трябва да бъде еднакъв на всички устройства.",
|
||||
"Show ID": "Покажи идентификатора",
|
||||
"Shared With": "Споделена със",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Кратък идентификатор на папката. Трябва да бъде същият на всички компютри.",
|
||||
"Show ID": "Покажи Идентификатора",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Покажи вместо идентификатор на устройството в статус на клъстъра. Ще бъде предлагано на други комютри като име по подразбиране.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Покажи вместо идентификатор на устройството в статус на клъстъра. Ще бъде обновено с името по подразбиране изпратено от другия компютър.",
|
||||
"Shutdown": "Спри програмата",
|
||||
"Shutdown": "Спри Програмата",
|
||||
"Shutdown Complete": "Спирането завършено",
|
||||
"Simple File Versioning": "Опростени версии",
|
||||
"Simple File Versioning": "Опростени Файлови Версии",
|
||||
"Single level wildcard (matches within a directory only)": "Маска на едно ниво (покрива само в папка)",
|
||||
"Smallest First": "Първо най-малките",
|
||||
"Source Code": "Сорс код",
|
||||
"Staggered File Versioning": "Наслагващи се версии",
|
||||
"Start Browser": "Стартирай браузъра",
|
||||
"Smallest First": "Най-малките първо",
|
||||
"Source Code": "Сорс Код",
|
||||
"Staggered File Versioning": "Наслагващи се Файлови Версии",
|
||||
"Start Browser": "Стартирай Браузъра",
|
||||
"Statistics": "Статистика",
|
||||
"Stopped": "Без синхронизиране",
|
||||
"Stopped": "Спряна",
|
||||
"Support": "Помощ",
|
||||
"Sync Protocol Listen Addresses": "Адрес за слушане на синхронизиращия протокол",
|
||||
"Syncing": "Синхронизиране",
|
||||
@@ -172,12 +171,12 @@
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Сумарната статистика е публично достъпна на {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфигурацията е запазена, но не е активирана. Syncthing трябва да рестартира, за да се активира новата конфигурация.",
|
||||
"The device ID cannot be blank.": "Полето идентификатор на устройство не може да бъде празно.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Идентификатор на устройство за въвеждане тук, може да бъде намерен в \"Промени > Покажи идентификатора\". Интервалите и тиретата са пожелание (биват прескачани).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Криптираният доклад се изпраща ежедневно. Използва се, за отичане на ползваните платформи, размер на папки и версии на приложението. Ако събираните данни се променят, ще бъдете информиран с подобен на този диалог.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Въведеният идентификатор на устройство не е валиден. Трябва да бъде 52 или 56 символа и да се състои от букви и цифри, като интервалите и тиретата са пожелание.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Идентификатор на устройство за въвеждане тук, може да бъде намерен в \"Промени > Покажи Идентификатора\". Интервалите и тиретата са пожелание(биват прескачани).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Криптираният доклад се изпраща дневно. Използва се, за да следи общи платформи, размери на папки и версии на приложението. Ако събираните данни се променят, ще бъдете информиран с подобен на този диалог.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Въведни идентификатор на устройство не е валиден. Трябва да бъде 52 или 56 символа и да се състои от букви и цифри, като интервалите и тиретата са пожелание.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Първият параметър за командата е пътя до папката, а вторият е релативния път в самата папка.",
|
||||
"The folder ID cannot be blank.": "Полето идентификатор на папка не може да бъде празно.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Идентификаторът на папка трябва да бъде къс (64 символа или по-малко) състоящ се само от букви, цифри, точка (.), тире (-) и подчерта (_).",
|
||||
"The folder ID cannot be blank.": "Полето идентификатор на папка неможе да бъде празно.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Идентификаторът на папка трябва да бъде къс (64 символа или по-малко) състоящ се само от букви, цифри, точка(.), тире(-) и подчерта (_).",
|
||||
"The folder ID must be unique.": "Идентификаторът на папката тряба да бъде уникален.",
|
||||
"The folder path cannot be blank.": "Пътят до папката не може да бъде празен.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Използва се следния интервал: за първия час се пази версия всеки 30 секунди, за първия ден се пази версия всеки час, за първите 30 дена се пази версия всеки ден, до максимума се пази една версия всяка седмица.",
|
||||
@@ -185,29 +184,29 @@
|
||||
"The maximum age must be a number and cannot be blank.": "Максималната възраст трябва да е число и не може д ае празна.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максималното време да се пазят весрсии (в дни, сложи 0, за да пазиш версии завинаги).",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "Минималното свободно дисково пространство в проценти трябва да е между 0 и 100 (включително).",
|
||||
"The number of days must be a number and cannot be blank.": "Броят дни трябва да бъде число и не може да бъде празно.",
|
||||
"The number of days must be a number and cannot be blank.": "Броят дни трябва да бъде число и неможе да бъде празно.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Броят дни за запазване на файловете в кошчето. Нула значи завинаги.",
|
||||
"The number of old versions to keep, per file.": "Броят стари версии, които да бъдат пазени за всеки файл.",
|
||||
"The number of versions must be a number and cannot be blank.": "Броят версии трябва да бъде число и не може да бъде празно.",
|
||||
"The path cannot be blank.": "Пътят не може да бъде празен.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Ограничението на скоростта трябва да бъде положително число (0: неограничено)",
|
||||
"The path cannot be blank.": "Пътят неможе да бъде празен.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Ограничението на скорста трябва да бъде положително число (0: неограничено)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервала на сканиране трябва да бъде не отрицателно число в секунди.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "Ще бъдат спрени и автоматично синхронизирани, когато грешката бъде оправена.",
|
||||
"This is a major version upgrade.": "Това е нова основна версия.",
|
||||
"Trash Can File Versioning": "Само на файловете в кошчето",
|
||||
"Unknown": "Неясно",
|
||||
"Unshared": "Несподелена",
|
||||
"Trash Can File Versioning": "Версии на файлове в кошчето",
|
||||
"Unknown": "Неясен",
|
||||
"Unshared": "Споделянето прекратено",
|
||||
"Unused": "Неизползван",
|
||||
"Up to Date": "Синхронизирано",
|
||||
"Up to Date": "Актуален",
|
||||
"Updated": "Обновено",
|
||||
"Upgrade": "Обнови",
|
||||
"Upgrade To {%version%}": "Обновен до {{version}}",
|
||||
"Upgrade To {%version%}": "Обновен До {{version}}",
|
||||
"Upgrading": "Обновяване",
|
||||
"Upload Rate": "Скорост на качване",
|
||||
"Uptime": "Работи от",
|
||||
"Use HTTPS for GUI": "Използвай HTTPS за потребителския интерфейс",
|
||||
"Upload Rate": "Скорост на Качване",
|
||||
"Uptime": "Работи вече",
|
||||
"Use HTTPS for GUI": "Използвай HTTPS за Потребителския Интерфейс",
|
||||
"Version": "Версия",
|
||||
"Versions Path": "Път до версиите",
|
||||
"Versions Path": "Път до Версиите",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Версиите биват изтривани автоматично, когато са по-стари от максималната възраст или надминават броя файлове разрешени в даден интервал.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Когато добавяш ново устройство помни, че твоето устройство също трябва да бъде добавено от другата страна.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Когато добавяш нов идентификатор на папка помни, че той се използва за свързване на папките на различни устройства. Главни/малки букви са от значение и трябва да са еднакви на всички устройства.",
|
||||
@@ -215,6 +214,6 @@
|
||||
"You must keep at least one version.": "Трябва да пазиш поне една версия.",
|
||||
"days": "дни",
|
||||
"full documentation": "пълна документация",
|
||||
"items": "елемента",
|
||||
"items": "артикула",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} желае да сподели папка \"{{folder}}\"."
|
||||
}
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "El dispositiu {{device}} ({{address}}) vol conectar-se. Afegir nou dispositiu?",
|
||||
"Devices": "Dispositius",
|
||||
"Disconnected": "Desconnectat",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentació",
|
||||
"Download Rate": "Tasca de descarrega",
|
||||
"Downloaded": "Descarregat",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "El dispositiu {{device}} ({{address}}) vol connectar-se. Afegir nou dispositiu?",
|
||||
"Devices": "Dispositius",
|
||||
"Disconnected": "Desconnectat",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentació",
|
||||
"Download Rate": "Velocitat de descàrrega",
|
||||
"Downloaded": "Descarregat",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Přístroj {{device}} ({{address}}) žádá o připojení. Chcete ho přidat?",
|
||||
"Devices": "Přístroje",
|
||||
"Disconnected": "Odpojen",
|
||||
"Discovery": "Oznamování",
|
||||
"Documentation": "Dokumentace",
|
||||
"Download Rate": "Rychlost stahování",
|
||||
"Downloaded": "Staženo",
|
||||
@@ -80,7 +79,7 @@
|
||||
"Ignore Patterns": "Ignorované vzory",
|
||||
"Ignore Permissions": "Ignorovat oprávnění",
|
||||
"Incoming Rate Limit (KiB/s)": "Omezení příchozí rychlosti (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Nesprávné nastavení může poškodit obsah Vašich adresářů a učinit Syncthing nefunkční.",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Nesprávné nastavení může poškodit obsah Vašich složek a učinit Syncthing nefunkční.",
|
||||
"Introducer": "Zavaděč",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Prohození zadané podmínky (např. nevynechat)",
|
||||
"Keep Versions": "Ponechat verze",
|
||||
|
||||
@@ -40,9 +40,8 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Gerät {{device}} ({{address}}) möchte sich verbinden. Gerät hinzufügen?",
|
||||
"Devices": "Geräte",
|
||||
"Disconnected": "Getrennt",
|
||||
"Discovery": "Gerätesuche",
|
||||
"Documentation": "Dokumentation",
|
||||
"Download Rate": "Download",
|
||||
"Download Rate": "Download-Rate",
|
||||
"Downloaded": "Heruntergeladen",
|
||||
"Downloading": "Lädt herunter",
|
||||
"Edit": "Bearbeiten",
|
||||
@@ -90,7 +89,7 @@
|
||||
"Later": "Später",
|
||||
"Local Discovery": "Lokale Gerätesuche",
|
||||
"Local State": "Lokaler Status",
|
||||
"Local State (Total)": "Lokaler Status (Gesamt)",
|
||||
"Local State (Total)": "Lokaler Status (total)",
|
||||
"Major Upgrade": "Hauptversionsupgrade",
|
||||
"Maximum Age": "Höchstalter",
|
||||
"Metadata Only": "Nur Metadaten",
|
||||
@@ -149,7 +148,7 @@
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kurze ID für das Verzeichnis. Muss auf allen Verbunds-Geräten gleich sein.",
|
||||
"Show ID": "ID anzeigen",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Wird anstatt der Geräte ID angezeigt. Wird als optionaler Gerätename an die anderen Clients im Cluster weitergegeben.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Wird auf diesem Gerät als Gerätename angezeigt und an die anderen Geräte im Geräte-Verbund weitergegeben. Wenn kein Gerätename anegegeben wird, wird der Name des entfernten Gerätes genommen.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Wird anstatt der Geräte ID im Verbunds-Status angezeigt.Wird auf den Namen aktualisiert, den das Gerät angibt, wenn nichts eingetragen wird.",
|
||||
"Shutdown": "Herunterfahren",
|
||||
"Shutdown Complete": "Vollständig Heruntergefahren",
|
||||
"Simple File Versioning": "Einfache Dateiversionierung",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Η συσκευή {{device}} ({{address}}) επιθυμεί να συνδεθεί. Προσθήκη της νέας συσκευής1",
|
||||
"Devices": "Συσκευές",
|
||||
"Disconnected": "Αποσυνδεδεμένος",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Τεκμηρίωση",
|
||||
"Download Rate": "Ταχύτητα λήψης",
|
||||
"Downloaded": "Έχει ληφθεί",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Device {{device}} ({{address}}) wants to connect. Add new device?",
|
||||
"Devices": "Devices",
|
||||
"Disconnected": "Disconnected",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Download Rate",
|
||||
"Downloaded": "Downloaded",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Device {{device}} ({{address}}) wants to connect. Add new device?",
|
||||
"Devices": "Devices",
|
||||
"Disconnected": "Disconnected",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Download Rate",
|
||||
"Downloaded": "Downloaded",
|
||||
@@ -50,7 +49,7 @@
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Editing": "Editing",
|
||||
"Enable UPnP": "Enable UPnP",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.",
|
||||
"Enter ignore patterns, one per line.": "Enter ignore patterns, one per line.",
|
||||
"Error": "Error",
|
||||
"External File Versioning": "External File Versioning",
|
||||
@@ -114,8 +113,6 @@
|
||||
"Override Changes": "Override Changes",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Path where versions should be stored (leave empty for the default .stversions folder in the folder).",
|
||||
"Pause": "Pause",
|
||||
"Paused": "Paused",
|
||||
"Please consult the release notes before performing a major upgrade.": "Please consult the release notes before performing a major upgrade.",
|
||||
"Please wait": "Please wait",
|
||||
"Preview": "Preview",
|
||||
@@ -123,8 +120,6 @@
|
||||
"Quick guide to supported patterns": "Quick guide to supported patterns",
|
||||
"RAM Utilization": "RAM Utilization",
|
||||
"Random": "Random",
|
||||
"Relayed via": "Relayed via",
|
||||
"Relays": "Relays",
|
||||
"Release Notes": "Release Notes",
|
||||
"Remove": "Remove",
|
||||
"Rescan": "Rescan",
|
||||
@@ -133,7 +128,6 @@
|
||||
"Restart": "Restart",
|
||||
"Restart Needed": "Restart Needed",
|
||||
"Restarting": "Restarting",
|
||||
"Resume": "Resume",
|
||||
"Reused": "Reused",
|
||||
"Save": "Save",
|
||||
"Scanning": "Scanning",
|
||||
@@ -213,7 +207,6 @@
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.",
|
||||
"Yes": "Yes",
|
||||
"You must keep at least one version.": "You must keep at least one version.",
|
||||
"days": "days",
|
||||
"full documentation": "full documentation",
|
||||
"items": "items",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} wants to share folder \"{{folder}}\"."
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "El dispositivo {{device}} ({{address}}) quiere conectarse. ¿Añadir nuevo dispositivo?",
|
||||
"Devices": "Dispositivos",
|
||||
"Disconnected": "Desconectado",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentación",
|
||||
"Download Rate": "Velocidad de descarga",
|
||||
"Downloaded": "Descargado",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "El dispositivo {{device}} ({{address}}) se quiere conectar. ¿Agregar nuevo dispositivo?",
|
||||
"Devices": "Dispositivos",
|
||||
"Disconnected": "Desconectado",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentación",
|
||||
"Download Rate": "Tasa de descarga",
|
||||
"Downloaded": "Descargado",
|
||||
@@ -75,12 +74,12 @@
|
||||
"Global Discovery Server": "Servidor global de identificación",
|
||||
"Global State": "Estado global",
|
||||
"Help": "Ayuda",
|
||||
"Home page": "Pagina de inicio",
|
||||
"Home page": "Home page",
|
||||
"Ignore": "Ignorar",
|
||||
"Ignore Patterns": "Patrones de exclusión",
|
||||
"Ignore Permissions": "Ignorar permisos",
|
||||
"Incoming Rate Limit (KiB/s)": "Límite de velocidad de entrada (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Configuración incorrecta puede dañar los contenidos de la carpeta y hacer Syncthing inoperable.",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Incorrect configuration may damage your folder contents and render Syncthing inoperable.",
|
||||
"Introducer": "Introductor",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversión de la condición dada (es decir, no excluir)",
|
||||
"Keep Versions": "Conservar versiones",
|
||||
@@ -94,7 +93,7 @@
|
||||
"Major Upgrade": "Actualización mayor",
|
||||
"Maximum Age": "Edad máxima",
|
||||
"Metadata Only": "Sólo metadatos",
|
||||
"Minimum Free Disk Space": "Espacio mínimo libre en disco",
|
||||
"Minimum Free Disk Space": "Minimum Free Disk Space",
|
||||
"Move to top of queue": "Mover al principio de la cola.",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Carácter comodín multinivel (coincide en el directorio y sus subdirectorios)",
|
||||
"Never": "Nunca",
|
||||
@@ -123,8 +122,8 @@
|
||||
"Quick guide to supported patterns": "Guía rápida sobre los patrones soportados",
|
||||
"RAM Utilization": "Utilización de RAM",
|
||||
"Random": "Aleatorio",
|
||||
"Relayed via": "retransmitida vía",
|
||||
"Relays": "Relés",
|
||||
"Relayed via": "Relayed via",
|
||||
"Relays": "Relays",
|
||||
"Release Notes": "Notas de lanzamiento",
|
||||
"Remove": "Eliminar",
|
||||
"Rescan": "Reescanear",
|
||||
@@ -213,7 +212,7 @@
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Al agregar un nuevo repositorio, tenga en cuenta que la ID del repositorio se utiliza para conectar los repositorios entre dispositivos. Se distingue entre mayúsculas y minúsculas y debe ser exactamente igual en todos los dispositivos.",
|
||||
"Yes": "Sí",
|
||||
"You must keep at least one version.": "Debe mantener al menos una versión",
|
||||
"days": "días",
|
||||
"days": "days",
|
||||
"full documentation": "documentación completa",
|
||||
"items": "ítems",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} quiere compartir repositorio \"{{folder}}\"."
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "Negatiivinen määrä päiviä ei ole järjellinen.",
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A new major version may not be compatible with previous versions.": "A new major version may not be compatible with previous versions.",
|
||||
"API Key": "API-avain",
|
||||
"About": "Tietoja",
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Laite {{device}} ({{address}}) haluaa yhdistää. Lisää uusi laite?",
|
||||
"Devices": "Laitteet",
|
||||
"Disconnected": "Yhteys katkaistu",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Dokumentaatio",
|
||||
"Download Rate": "Latausmäärä",
|
||||
"Downloaded": "Ladattu",
|
||||
@@ -50,7 +49,7 @@
|
||||
"Edit Folder": "Muokkaa kansiota",
|
||||
"Editing": "Muokkaus",
|
||||
"Enable UPnP": "Ota UPnP käyttöön",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Syötä osoitteet pilkuilla erotettuina (\"tcp://ip:portti, tcp://nimi:portti\") tai \"dynamic\" käyttääksesi osoitteen automaattista selvitystä.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.",
|
||||
"Enter ignore patterns, one per line.": "Syötä ohituslausekkeet, yksi riviä kohden.",
|
||||
"Error": "Virhe",
|
||||
"External File Versioning": "Ulkoinen tiedostoversionti",
|
||||
@@ -58,10 +57,10 @@
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "Tiedostoversiointi",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Tiedostojen oikeusbitit jätetään huomiotta etsittäessä muutoksia. Käytä FAT-tiedostojärjestelmissä.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Kun Syncthing poistaa tai korvaa tiedostoja, ne siirretään .stversions-hakemistoon.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Tiedostot siirretään päivämäärällä merkityiksi versioiksi .stversions-kansioon, kun Syncthing korvaa tai poistaa ne.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Tiedostot on suojattu muilla laitteilla tehdyiltä muutoksilta, mutta tällä laitteella tehdyt muutokset lähetetään muuhun ryhmään.",
|
||||
"Folder": "Kansio",
|
||||
"Folder": "Folder",
|
||||
"Folder ID": "Kansion ID",
|
||||
"Folder Master": "Hallitsijakansio",
|
||||
"Folder Path": "Kansion polku",
|
||||
@@ -94,7 +93,7 @@
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maksimi-ikä",
|
||||
"Metadata Only": "Vain metadata",
|
||||
"Minimum Free Disk Space": "Vapaan levytilan vähimmäismäärä",
|
||||
"Minimum Free Disk Space": "Minimum Free Disk Space",
|
||||
"Move to top of queue": "Siirrä jonon alkuun",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Monitasoinen jokerimerkki (vaikuttaa useassa kansiotasossa)",
|
||||
"Never": "Ei koskaan",
|
||||
@@ -114,8 +113,8 @@
|
||||
"Override Changes": "Ohita muutokset",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Polku kansioon paikallisella tietokoneella. Kansio luodaan, ellei sitä ole olemassa. Tilde-merkkiä (~) voidaan käyttää oikotienä polulle",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Polku jonne versiot tullaan tallentamaan (jätä tyhjäksi oletusvalintaa .stversions varten).",
|
||||
"Pause": "Keskeytä",
|
||||
"Paused": "Keskeytetty",
|
||||
"Pause": "Pause",
|
||||
"Paused": "Paused",
|
||||
"Please consult the release notes before performing a major upgrade.": "Please consult the release notes before performing a major upgrade.",
|
||||
"Please wait": "Ole hyvä ja odota",
|
||||
"Preview": "Esikatselu",
|
||||
@@ -124,16 +123,16 @@
|
||||
"RAM Utilization": "RAM:n käyttö",
|
||||
"Random": "Satunnaien",
|
||||
"Relayed via": "Relayed via",
|
||||
"Relays": "Välityspalvelimet",
|
||||
"Release Notes": "Julkaisutiedot",
|
||||
"Remove": "Poista",
|
||||
"Relays": "Relays",
|
||||
"Release Notes": "Release Notes",
|
||||
"Remove": "Remove",
|
||||
"Rescan": "Skannaa uudelleen",
|
||||
"Rescan All": "Skannaa kaikki uudelleen",
|
||||
"Rescan Interval": "Uudelleenskannauksen aikaväli",
|
||||
"Restart": "Käynnistä uudelleen",
|
||||
"Restart Needed": "Uudelleenkäynnistys tarvitaan",
|
||||
"Restarting": "Käynnistetään uudelleen",
|
||||
"Resume": "Jatka",
|
||||
"Resume": "Resume",
|
||||
"Reused": "Uudelleenkäytetty",
|
||||
"Save": "Tallenna",
|
||||
"Scanning": "Skannataan",
|
||||
@@ -154,11 +153,11 @@
|
||||
"Shutdown Complete": "Sammutus valmis",
|
||||
"Simple File Versioning": "Yksinkertainen tiedostoversiointi",
|
||||
"Single level wildcard (matches within a directory only)": "Yksitasoinen jokerimerkki (vaikuttaa vain kyseisen kansion sisällä)",
|
||||
"Smallest First": "Pienin ensin",
|
||||
"Smallest First": "Smallest First",
|
||||
"Source Code": "Lähdekoodi",
|
||||
"Staggered File Versioning": "Porrastettu tiedostoversiointi",
|
||||
"Start Browser": "Käynnistä selain",
|
||||
"Statistics": "Tilastot",
|
||||
"Statistics": "Statistics",
|
||||
"Stopped": "Pysäytetty",
|
||||
"Support": "Tuki",
|
||||
"Sync Protocol Listen Addresses": "Synkronointiprotokollan kuunteluosoite",
|
||||
@@ -181,16 +180,16 @@
|
||||
"The folder ID must be unique.": "Kansion ID:n tulee olla uniikki.",
|
||||
"The folder path cannot be blank.": "Kansion polku ei voi olla tyhjä.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Seuraavat aikavälit ovat käytössä: ensimmäisen tunnin ajalta uusi versio säilytetään joka 30 sekunti, ensimmäisen päivän ajalta uusi versio säilytetään tunneittain ja ensimmäisen 30 päivän aikana uusi versio säilytetään päivittäin. Lopulta uusi versio säilytetään viikoittain, kunnes maksimi-ikä saavutetaan.",
|
||||
"The following items could not be synchronized.": "Seuraavia nimikkeitä ei voitu synkronoida.",
|
||||
"The following items could not be synchronized.": "The following items could not be synchronized.",
|
||||
"The maximum age must be a number and cannot be blank.": "Maksimi-iän tulee olla numero, eikä se voi olla tyhjä.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimiaika versioiden säilytykseen (päivissä, aseta 0 säilyttääksesi versiot ikuisesti).",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "Vapaan levytilan vähimmäismäärä prosentteina tulee olla positiivinen luku (suljetulta) väliltä 0-100.",
|
||||
"The number of days must be a number and cannot be blank.": "Päivien määrän tulee olla numero, eikä se voi olla tyhjä.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Montako päivää tiedostoja säilytetään roskakorissa. Nolla (0) = ikuisesti.",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of old versions to keep, per file.": "Säilytettävien vanhojen versioiden määrä tiedostoa kohden.",
|
||||
"The number of versions must be a number and cannot be blank.": "Versioiden määrän rulee olla numero, eikä se voi olla tyhjä.",
|
||||
"The path cannot be blank.": "Polku ei voi olla tyhjä.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Nopeusrajan tulee olla positiivinen luku tai nolla. (0: ei rajaa)",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "The rate limit must be a non-negative number (0: no limit)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Uudelleenskannauksen aikavälin tulee olla ei-negatiivinen numero sekunteja.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "They are retried automatically and will be synced when the error is resolved.",
|
||||
"This is a major version upgrade.": "This is a major version upgrade.",
|
||||
@@ -204,7 +203,7 @@
|
||||
"Upgrade To {%version%}": "Päivitä versioon {{version}}",
|
||||
"Upgrading": "Päivitetään",
|
||||
"Upload Rate": "Lähetysmäärä",
|
||||
"Uptime": "Päälläoloaika",
|
||||
"Uptime": "Uptime",
|
||||
"Use HTTPS for GUI": "Käytä HTTPS:ää GUI:n kanssa",
|
||||
"Version": "Versio",
|
||||
"Versions Path": "Versioiden polku",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "L'appareil {{device}} ({{address}}) veut se connecter. Voulez-vous ajouter cette appareil ?",
|
||||
"Devices": "Appareil",
|
||||
"Disconnected": "Déconnecté",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Débit de réception",
|
||||
"Downloaded": "Téléchargé",
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "L'appareil {{device}} ({{address}}) veut se connecter. Voulez-vous ajouter cette appareil ?",
|
||||
"Devices": "Appareil",
|
||||
"Disconnected": "Déconnecté",
|
||||
"Discovery": "Discovery",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Débit de réception",
|
||||
"Downloaded": "Téléchargé",
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
"Connection Error": "Kapcsolódási hiba",
|
||||
"Copied from elsewhere": "Másolva máshonnan",
|
||||
"Copied from original": "Másolva az eredetiről",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 az alábbi Közreműködők:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 az alábbi Közreműködők",
|
||||
"Delete": "Törlés",
|
||||
"Deleted": "Törölve",
|
||||
"Device ID": "Eszköz azonosító",
|
||||
@@ -40,7 +40,6 @@
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "{{device}} ({{address}}) csatlakozni szeretne. Hozzáadod az új eszközt?",
|
||||
"Devices": "Eszközök",
|
||||
"Disconnected": "Kapcsolat bontva",
|
||||
"Discovery": "Felfedezés",
|
||||
"Documentation": "Dokumentáció",
|
||||
"Download Rate": "Letöltési sebesség",
|
||||
"Downloaded": "Letöltve",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user