mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-14 00:39:13 -05:00
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae0e56e98d | ||
|
|
6efe521e44 | ||
|
|
bccd21ac14 | ||
|
|
8449a65cdf | ||
|
|
fc47562983 | ||
|
|
76900ae291 | ||
|
|
ec55559ff1 | ||
|
|
3daa26e1f7 | ||
|
|
9ea8b6f659 | ||
|
|
387f2f0a94 | ||
|
|
b0d95d02be | ||
|
|
3a98f01d31 | ||
|
|
d305752749 | ||
|
|
2ba4b235fc | ||
|
|
6820c0a5d7 | ||
|
|
048883ad27 | ||
|
|
08e7ada242 | ||
|
|
d3ddfa31f7 | ||
|
|
4b899a813e | ||
|
|
15ee9a5cac | ||
|
|
58945a429f | ||
|
|
9f4111015e | ||
|
|
04b960b415 | ||
|
|
33267f2178 | ||
|
|
970e50d1f1 | ||
|
|
d4199c2d08 | ||
|
|
cf4ca7b6a8 | ||
|
|
d8b335ce65 | ||
|
|
39a2934b05 | ||
|
|
7d1c720b84 | ||
|
|
51743461ee | ||
|
|
53cf5ca762 | ||
|
|
b5ef42b0a1 | ||
|
|
0521ddd858 | ||
|
|
b7bb3bfee2 | ||
|
|
4183044e96 | ||
|
|
27448bde20 | ||
|
|
87b9e8fbaf | ||
|
|
9d79859ba6 | ||
|
|
25bb55491a | ||
|
|
198cbacc3e | ||
|
|
3f842221f7 | ||
|
|
5d0183a9ed | ||
|
|
99df4d660b | ||
|
|
f2adfde1a8 | ||
|
|
1e915a2903 | ||
|
|
e2dc3e9ff3 | ||
|
|
f1bb8daaab | ||
|
|
b0fcbebdae | ||
|
|
34f72ecf8f |
@@ -22,3 +22,4 @@ Phill Luby <phill.luby@newredo.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
|
||||
10
Godeps/Godeps.json
generated
10
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.3.1",
|
||||
"GoVersion": "go1.3.3",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
@@ -38,13 +38,17 @@
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/osext",
|
||||
"Rev": "9bf61584e5f1f172e8766ddc9022d9c401faaa5e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "a597b63b87d6140f79084c8aab214b4d533833a1"
|
||||
"Rev": "ec3d404f43731551258977b38dd72cf557d00398"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
@@ -52,7 +56,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "0d8857b7ec571b0a6c9677d8e6c0a4ceeabd1d71"
|
||||
"Rev": "cd2b8f743192883ab9fbc5f070ebda1dc90f3732"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
|
||||
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
logger
|
||||
======
|
||||
|
||||
A small wrapper around `log` to provide log levels.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/logger
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
35
internal/logger/logger.go → Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
35
internal/logger/logger.go → Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
@@ -1,17 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
@@ -35,6 +23,7 @@ const (
|
||||
NumLevels
|
||||
)
|
||||
|
||||
// A MessageHandler is called with the log level and message text.
|
||||
type MessageHandler func(l LogLevel, msg string)
|
||||
|
||||
type Logger struct {
|
||||
@@ -43,6 +32,7 @@ type Logger struct {
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// The default logger logs to standard output with a time prefix.
|
||||
var DefaultLogger = New()
|
||||
|
||||
func New() *Logger {
|
||||
@@ -51,16 +41,20 @@ func New() *Logger {
|
||||
}
|
||||
}
|
||||
|
||||
// AddHandler registers a new MessageHandler to receive messages with the
|
||||
// specified log level or above.
|
||||
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.handlers[level] = append(l.handlers[level], h)
|
||||
}
|
||||
|
||||
// See log.SetFlags
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.logger.SetFlags(flag)
|
||||
}
|
||||
|
||||
// See log.SetPrefix
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.logger.SetPrefix(prefix)
|
||||
}
|
||||
@@ -71,6 +65,7 @@ func (l *Logger) callHandlers(level LogLevel, s string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln logs a line with a DEBUG prefix.
|
||||
func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -79,6 +74,7 @@ func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted line with a DEBUG prefix.
|
||||
func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -86,6 +82,8 @@ func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with an INFO prefix.
|
||||
func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -94,6 +92,7 @@ func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with an INFO prefix.
|
||||
func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -102,6 +101,7 @@ func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Okln logs a line with an OK prefix.
|
||||
func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -110,6 +110,7 @@ func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Okf logs a formatted line with an OK prefix.
|
||||
func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -118,6 +119,7 @@ func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Warnln logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -126,6 +128,7 @@ func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -134,6 +137,8 @@ func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Fatalln logs a line with a FATAL prefix and exits the process with exit
|
||||
// code 1.
|
||||
func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -143,6 +148,8 @@ func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
|
||||
// exit code 1.
|
||||
func (l *Logger) Fatalf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -1,17 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package logger
|
||||
|
||||
8
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
8
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
@@ -33,11 +33,15 @@ var s = XDRBenchStruct{
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e = s.MarshalXDR()
|
||||
var e []byte
|
||||
|
||||
func init() {
|
||||
e, _ = s.MarshalXDR()
|
||||
}
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res = s.MarshalXDR()
|
||||
res, _ = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
40
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
40
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
@@ -72,15 +72,23 @@ func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() []byte {
|
||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) []byte {
|
||||
func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -88,13 +96,13 @@ func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if len(o.Bs0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Bs0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if len(o.S0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.S0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
@@ -150,15 +158,23 @@ func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() []byte {
|
||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) []byte {
|
||||
func (o repeatReader) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
26
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
26
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -53,15 +53,23 @@ func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() []byte {
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) []byte {
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -71,8 +79,8 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
@@ -84,8 +92,8 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
@@ -135,7 +143,7 @@ func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
|
||||
12
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
12
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
@@ -24,9 +24,10 @@ type TestStruct struct {
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte
|
||||
S string
|
||||
BS []byte // max:1024
|
||||
S string // max:1024
|
||||
C Opaque
|
||||
SS []string // max:1024
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
@@ -49,9 +50,12 @@ func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs := t0.MarshalXDR()
|
||||
bs, err := t0.MarshalXDR()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var t1 TestStruct
|
||||
err := t1.UnmarshalXDR(bs)
|
||||
err = t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
54
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
54
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
@@ -54,6 +54,14 @@ TestStruct Structure:
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ SS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
@@ -66,9 +74,10 @@ struct TestStruct {
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<>;
|
||||
string S<>;
|
||||
opaque BS<1024>;
|
||||
string S<1024>;
|
||||
Opaque C;
|
||||
string SS<1024>;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -78,15 +87,23 @@ func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() []byte {
|
||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) []byte {
|
||||
func (o TestStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -99,12 +116,25 @@ func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
if l := len(o.BS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
|
||||
}
|
||||
xw.WriteBytes(o.BS)
|
||||
if l := len(o.S); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
if l := len(o.SS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.SS)))
|
||||
for i := range o.SS {
|
||||
xw.WriteString(o.SS[i])
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -129,8 +159,16 @@ func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytes()
|
||||
o.S = xr.ReadString()
|
||||
o.BS = xr.ReadBytesMax(1024)
|
||||
o.S = xr.ReadStringMax(1024)
|
||||
(&o.C).decodeXDR(xr)
|
||||
_SSSize := int(xr.ReadUint32())
|
||||
if _SSSize > 1024 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
o.SS = make([]string, _SSSize)
|
||||
for i := range o.SS {
|
||||
o.SS[i] = xr.ReadString()
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
10
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
@@ -5,14 +5,12 @@
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var ErrElementSizeExceeded = errors.New("element size exceeded")
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
@@ -71,7 +69,7 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ErrElementSizeExceeded
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -162,3 +160,7 @@ func (r *Reader) Error() error {
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
|
||||
func ElementSizeExceeded(field string, size, limit int) error {
|
||||
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
|
||||
}
|
||||
|
||||
5
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
5
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
@@ -5,6 +5,7 @@ package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
@@ -60,7 +61,7 @@ func TestReadBytesMaxInto(t *testing.T) {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
@@ -84,7 +85,7 @@ func TestReadStringMax(t *testing.T) {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
|
||||
31
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
31
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -7,6 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -46,7 +47,7 @@ type DB struct {
|
||||
|
||||
// Snapshot.
|
||||
snapsMu sync.Mutex
|
||||
snapsRoot snapshotElement
|
||||
snapsList *list.List
|
||||
|
||||
// Stats.
|
||||
aliveSnaps, aliveIters int32
|
||||
@@ -85,6 +86,8 @@ func openDB(s *session) (*DB, error) {
|
||||
seq: s.stSeq,
|
||||
// MemDB
|
||||
memPool: make(chan *memdb.DB, 1),
|
||||
// Snapshot
|
||||
snapsList: list.New(),
|
||||
// Write
|
||||
writeC: make(chan *Batch),
|
||||
writeMergedC: make(chan bool),
|
||||
@@ -103,7 +106,6 @@ func openDB(s *session) (*DB, error) {
|
||||
// Close
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
db.initSnapshot()
|
||||
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
@@ -609,7 +611,9 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
return db.get(key, db.getSeq(), ro)
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.get(key, se.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the latest snapshot of the
|
||||
@@ -633,9 +637,11 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
|
||||
snap := db.newSnapshot()
|
||||
defer snap.Release()
|
||||
return snap.NewIterator(slice, ro)
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
// Iterator holds 'version' lock, 'version' is immutable so snapshot
|
||||
// can be released after iterator created.
|
||||
return db.newIterator(se.seq, slice, ro)
|
||||
}
|
||||
|
||||
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
|
||||
@@ -655,7 +661,7 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
|
||||
//
|
||||
// Property names:
|
||||
// leveldb.num-files-at-level{n}
|
||||
// Returns the number of filer at level 'n'.
|
||||
// Returns the number of files at level 'n'.
|
||||
// leveldb.stats
|
||||
// Returns statistics of the underlying DB.
|
||||
// leveldb.sstables
|
||||
@@ -685,11 +691,12 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
numFilesPrefix := "num-files-at-level"
|
||||
switch {
|
||||
case strings.HasPrefix(p, "num-files-at-level"):
|
||||
case strings.HasPrefix(p, numFilesPrefix):
|
||||
var level uint
|
||||
var rest string
|
||||
n, _ := fmt.Scanf("%d%s", &level, &rest)
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 || level >= kNumLevels {
|
||||
err = errors.New("leveldb: GetProperty: invalid property: " + name)
|
||||
} else {
|
||||
@@ -796,12 +803,13 @@ func (db *DB) Close() error {
|
||||
default:
|
||||
}
|
||||
|
||||
// Signal all goroutines.
|
||||
close(db.closeC)
|
||||
|
||||
// Wait for the close WaitGroup.
|
||||
// Wait for all gorotines to exit.
|
||||
db.closeW.Wait()
|
||||
|
||||
// Close journal.
|
||||
// Lock writer and closes journal.
|
||||
db.writeLockC <- struct{}{}
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
@@ -827,7 +835,6 @@ func (db *DB) Close() error {
|
||||
db.journalWriter = nil
|
||||
db.journalFile = nil
|
||||
db.frozenJournalFile = nil
|
||||
db.snapsRoot = snapshotElement{}
|
||||
db.closer = nil
|
||||
|
||||
return err
|
||||
|
||||
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
@@ -538,6 +538,9 @@ type cIdle struct {
|
||||
}
|
||||
|
||||
func (r cIdle) ack(err error) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
r.ackC <- err
|
||||
}
|
||||
|
||||
@@ -548,27 +551,33 @@ type cRange struct {
|
||||
}
|
||||
|
||||
func (r cRange) ack(err error) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
if r.ackC != nil {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
r.ackC <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) compSendIdle(compC chan<- cCmd) error {
|
||||
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
select {
|
||||
case compC <- cIdle{ch}:
|
||||
case err := <-db.compErrC:
|
||||
return err
|
||||
case err = <-db.compErrC:
|
||||
return
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
// Wait cmd.
|
||||
return <-ch
|
||||
select {
|
||||
case err = <-ch:
|
||||
case err = <-db.compErrC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
@@ -584,8 +593,10 @@ func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err
|
||||
}
|
||||
// Wait cmd.
|
||||
select {
|
||||
case err = <-db.compErrC:
|
||||
case err = <-ch:
|
||||
case err = <-db.compErrC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
3
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@@ -48,8 +48,7 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
i = append(i, fmi)
|
||||
}
|
||||
i = append(i, ti...)
|
||||
strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, true)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
|
||||
66
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
66
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
@@ -7,6 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -19,51 +20,41 @@ import (
|
||||
type snapshotElement struct {
|
||||
seq uint64
|
||||
ref int
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
next, prev *snapshotElement
|
||||
}
|
||||
|
||||
// Initialize the snapshot.
|
||||
func (db *DB) initSnapshot() {
|
||||
db.snapsRoot.next = &db.snapsRoot
|
||||
db.snapsRoot.prev = &db.snapsRoot
|
||||
e *list.Element
|
||||
}
|
||||
|
||||
// Acquires a snapshot, based on latest sequence.
|
||||
func (db *DB) acquireSnapshot() *snapshotElement {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
|
||||
seq := db.getSeq()
|
||||
elem := db.snapsRoot.prev
|
||||
if elem == &db.snapsRoot || elem.seq != seq {
|
||||
at := db.snapsRoot.prev
|
||||
next := at.next
|
||||
elem = &snapshotElement{
|
||||
seq: seq,
|
||||
prev: at,
|
||||
next: next,
|
||||
|
||||
if e := db.snapsList.Back(); e != nil {
|
||||
se := e.Value.(*snapshotElement)
|
||||
if se.seq == seq {
|
||||
se.ref++
|
||||
return se
|
||||
} else if seq < se.seq {
|
||||
panic("leveldb: sequence number is not increasing")
|
||||
}
|
||||
at.next = elem
|
||||
next.prev = elem
|
||||
}
|
||||
elem.ref++
|
||||
db.snapsMu.Unlock()
|
||||
return elem
|
||||
se := &snapshotElement{seq: seq, ref: 1}
|
||||
se.e = db.snapsList.PushBack(se)
|
||||
return se
|
||||
}
|
||||
|
||||
// Releases given snapshot element.
|
||||
func (db *DB) releaseSnapshot(elem *snapshotElement) {
|
||||
if !db.isClosed() {
|
||||
db.snapsMu.Lock()
|
||||
elem.ref--
|
||||
if elem.ref == 0 {
|
||||
elem.prev.next = elem.next
|
||||
elem.next.prev = elem.prev
|
||||
elem.next = nil
|
||||
elem.prev = nil
|
||||
} else if elem.ref < 0 {
|
||||
panic("leveldb: Snapshot: negative element reference")
|
||||
}
|
||||
db.snapsMu.Unlock()
|
||||
func (db *DB) releaseSnapshot(se *snapshotElement) {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
|
||||
se.ref--
|
||||
if se.ref == 0 {
|
||||
db.snapsList.Remove(se.e)
|
||||
se.e = nil
|
||||
} else if se.ref < 0 {
|
||||
panic("leveldb: Snapshot: negative element reference")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +62,11 @@ func (db *DB) releaseSnapshot(elem *snapshotElement) {
|
||||
func (db *DB) minSeq() uint64 {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
elem := db.snapsRoot.next
|
||||
if elem != &db.snapsRoot {
|
||||
return elem.seq
|
||||
|
||||
if e := db.snapsList.Front(); e != nil {
|
||||
return e.Value.(*snapshotElement).seq
|
||||
}
|
||||
|
||||
return db.getSeq()
|
||||
}
|
||||
|
||||
|
||||
150
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
150
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@@ -7,6 +7,9 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -1126,8 +1129,7 @@ func TestDb_Snapshot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDb_SnapshotList(t *testing.T) {
|
||||
db := &DB{}
|
||||
db.initSnapshot()
|
||||
db := &DB{snapsList: list.New()}
|
||||
e0a := db.acquireSnapshot()
|
||||
e0b := db.acquireSnapshot()
|
||||
db.seq = 1
|
||||
@@ -1983,7 +1985,6 @@ func TestDb_GoleveldbIssue74(t *testing.T) {
|
||||
t.Fatalf("#%d %d != %d", i, k, n)
|
||||
}
|
||||
}
|
||||
t.Logf("writer done after %d iterations", i)
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
@@ -2022,3 +2023,146 @@ func TestDb_GoleveldbIssue74(t *testing.T) {
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDb_GetProperties(t *testing.T) {
|
||||
h := newDbHarness(t)
|
||||
defer h.close()
|
||||
|
||||
_, err := h.db.GetProperty("leveldb.num-files-at-level")
|
||||
if err == nil {
|
||||
t.Error("GetProperty() failed to detect missing level")
|
||||
}
|
||||
|
||||
_, err = h.db.GetProperty("leveldb.num-files-at-level0")
|
||||
if err != nil {
|
||||
t.Error("got unexpected error", err)
|
||||
}
|
||||
|
||||
_, err = h.db.GetProperty("leveldb.num-files-at-level0x")
|
||||
if err == nil {
|
||||
t.Error("GetProperty() failed to detect invalid level")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDb_GoleveldbIssue72and83(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
CachedOpenFiles: 3,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
const n, wn, dur = 10000, 100, 30 * time.Second
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
randomData := func(prefix byte, i int) []byte {
|
||||
data := make([]byte, 1+4+32+64+32)
|
||||
_, err := crand.Reader.Read(data[1 : len(data)-4])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
data[0] = prefix
|
||||
binary.LittleEndian.PutUint32(data[len(data)-4:], uint32(i))
|
||||
return data
|
||||
}
|
||||
|
||||
keys := make([][]byte, n)
|
||||
for i := range keys {
|
||||
keys[i] = randomData(1, 0)
|
||||
}
|
||||
|
||||
until := time.Now().Add(dur)
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(3)
|
||||
var done uint32
|
||||
go func() {
|
||||
i := 0
|
||||
defer func() {
|
||||
t.Logf("WRITER DONE #%d", i)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
b := new(Batch)
|
||||
for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
|
||||
b.Reset()
|
||||
for _, k1 := range keys {
|
||||
k2 := randomData(2, i)
|
||||
b.Put(k2, randomData(42, i))
|
||||
b.Put(k1, k2)
|
||||
}
|
||||
if err := h.db.Write(b, h.wo); err != nil {
|
||||
atomic.StoreUint32(&done, 1)
|
||||
t.Fatalf("WRITER #%d db.Write: %v", i, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("READER0 DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
snap := h.getSnapshot()
|
||||
seq := snap.elem.seq
|
||||
if seq == 0 {
|
||||
snap.Release()
|
||||
continue
|
||||
}
|
||||
iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
writei := int(snap.elem.seq/(n*2) - 1)
|
||||
var k int
|
||||
for ; iter.Next(); k++ {
|
||||
k1 := iter.Key()
|
||||
k2 := iter.Value()
|
||||
kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-4:]))
|
||||
if writei != kwritei {
|
||||
t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
|
||||
}
|
||||
if _, err := snap.Get(k2, nil); err != nil {
|
||||
t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, err)
|
||||
}
|
||||
iter.Release()
|
||||
snap.Release()
|
||||
if k > 0 && k != n {
|
||||
t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("READER1 DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
iter := h.db.NewIterator(nil, nil)
|
||||
seq := iter.(*dbIter).seq
|
||||
if seq == 0 {
|
||||
iter.Release()
|
||||
continue
|
||||
}
|
||||
writei := int(seq/(n*2) - 1)
|
||||
var k int
|
||||
for ok := iter.Last(); ok; ok = iter.Prev() {
|
||||
k++
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
|
||||
}
|
||||
iter.Release()
|
||||
if m := (writei+1)*n + n; k != m {
|
||||
t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
12
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
12
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -127,21 +127,24 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
b.init(wo.GetSync())
|
||||
|
||||
// The write happen synchronously.
|
||||
retry:
|
||||
select {
|
||||
case db.writeC <- b:
|
||||
if <-db.writeMergedC {
|
||||
return <-db.writeAckC
|
||||
}
|
||||
goto retry
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
merged := 0
|
||||
danglingMerge := false
|
||||
defer func() {
|
||||
<-db.writeLockC
|
||||
if danglingMerge {
|
||||
db.writeMergedC <- false
|
||||
} else {
|
||||
<-db.writeLockC
|
||||
}
|
||||
for i := 0; i < merged; i++ {
|
||||
db.writeAckC <- err
|
||||
}
|
||||
@@ -170,7 +173,7 @@ drain:
|
||||
db.writeMergedC <- true
|
||||
merged++
|
||||
} else {
|
||||
db.writeMergedC <- false
|
||||
danglingMerge = true
|
||||
break drain
|
||||
}
|
||||
default:
|
||||
@@ -262,6 +265,7 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lock writer.
|
||||
select {
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case _, _ = <-db.closeC:
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@@ -92,7 +92,7 @@ const (
|
||||
|
||||
// DefaultStrict is the default strict flags. Specify any strict flags
|
||||
// will override default strict flags as whole (i.e. not OR'ed).
|
||||
DefaultStrict = StrictJournalChecksum | StrictBlockChecksum
|
||||
DefaultStrict = StrictJournalChecksum | StrictIterator | StrictBlockChecksum
|
||||
|
||||
// NoStrict disables all strict flags. Override default strict flags.
|
||||
NoStrict = ^StrictAll
|
||||
|
||||
17
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
17
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
@@ -28,11 +28,12 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
|
||||
tsKeepFS = tsFSEnv == "2"
|
||||
tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
|
||||
tsMU = &sync.Mutex{}
|
||||
tsNum = 0
|
||||
tsFSEnv = os.Getenv("GOLEVELDB_USEFS")
|
||||
tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
|
||||
tsKeepFS = tsFSEnv == "2"
|
||||
tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
|
||||
tsMU = &sync.Mutex{}
|
||||
tsNum = 0
|
||||
)
|
||||
|
||||
type tsLock struct {
|
||||
@@ -413,7 +414,11 @@ func newTestStorage(t *testing.T) *testStorage {
|
||||
num := tsNum
|
||||
tsNum++
|
||||
tsMU.Unlock()
|
||||
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
|
||||
tempdir := tsTempdir
|
||||
if tempdir == "" {
|
||||
tempdir = os.TempDir()
|
||||
}
|
||||
path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
stor, err = storage.OpenFile(path)
|
||||
if err != nil {
|
||||
|
||||
54
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
54
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@@ -7,9 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
@@ -278,8 +276,6 @@ type tOps struct {
|
||||
cache cache.Cache
|
||||
cacheNS cache.Namespace
|
||||
bpool *util.BufferPool
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Creates an empty table and returns table writer.
|
||||
@@ -326,42 +322,9 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
type trWrapper struct {
|
||||
*table.Reader
|
||||
t *tOps
|
||||
ref int
|
||||
}
|
||||
|
||||
func (w *trWrapper) Release() {
|
||||
if w.ref != 0 && !w.t.closed {
|
||||
panic(fmt.Sprintf("BUG: invalid ref %d, refer to issue #72", w.ref))
|
||||
}
|
||||
w.Reader.Release()
|
||||
}
|
||||
|
||||
type trCacheHandleWrapper struct {
|
||||
cache.Handle
|
||||
t *tOps
|
||||
released bool
|
||||
}
|
||||
|
||||
func (w *trCacheHandleWrapper) Release() {
|
||||
w.t.mu.Lock()
|
||||
defer w.t.mu.Unlock()
|
||||
|
||||
if !w.released {
|
||||
w.released = true
|
||||
w.Value().(*trWrapper).ref--
|
||||
}
|
||||
w.Handle.Release()
|
||||
}
|
||||
|
||||
// Opens table. It returns a cache handle, which should
|
||||
// be released after use.
|
||||
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
num := f.file.Num()
|
||||
ch = t.cacheNS.Get(num, func() (charge int, value interface{}) {
|
||||
var r storage.Reader
|
||||
@@ -374,13 +337,11 @@ func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
|
||||
if bc := t.s.o.GetBlockCache(); bc != nil {
|
||||
bcacheNS = bc.GetNamespace(num)
|
||||
}
|
||||
return 1, &trWrapper{table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o), t, 0}
|
||||
return 1, table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o)
|
||||
})
|
||||
if ch == nil && err == nil {
|
||||
err = ErrClosed
|
||||
}
|
||||
ch.Value().(*trWrapper).ref++
|
||||
ch = &trCacheHandleWrapper{ch, t, false}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -392,7 +353,7 @@ func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []b
|
||||
return nil, nil, err
|
||||
}
|
||||
defer ch.Release()
|
||||
return ch.Value().(*trWrapper).Find(key, ro)
|
||||
return ch.Value().(*table.Reader).Find(key, ro)
|
||||
}
|
||||
|
||||
// Returns approximate offset of the given key.
|
||||
@@ -402,7 +363,7 @@ func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
|
||||
return
|
||||
}
|
||||
defer ch.Release()
|
||||
offset_, err := ch.Value().(*trWrapper).OffsetOf(key)
|
||||
offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
|
||||
return uint64(offset_), err
|
||||
}
|
||||
|
||||
@@ -412,7 +373,7 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
|
||||
if err != nil {
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
iter := ch.Value().(*trWrapper).NewIterator(slice, ro)
|
||||
iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
|
||||
iter.SetReleaser(ch)
|
||||
return iter
|
||||
}
|
||||
@@ -420,9 +381,6 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
|
||||
// Removes table from persistent storage. It waits until
|
||||
// no one use the the table.
|
||||
func (t *tOps) remove(f *tFile) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
num := f.file.Num()
|
||||
t.cacheNS.Delete(num, func(exist, pending bool) {
|
||||
if !pending {
|
||||
@@ -441,10 +399,6 @@ func (t *tOps) remove(f *tFile) {
|
||||
// Closes the table ops instance. It will close all tables,
|
||||
// regadless still used or not.
|
||||
func (t *tOps) close() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.closed = true
|
||||
t.cache.Zap()
|
||||
t.bpool.Close()
|
||||
}
|
||||
|
||||
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
generated
vendored
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
generated
vendored
@@ -19,13 +19,18 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return b.newIterator(slice, false, nil)
|
||||
type blockTesting struct {
|
||||
tr *Reader
|
||||
b *block
|
||||
}
|
||||
|
||||
func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
|
||||
return t.tr.newBlockIter(t.b, nil, slice, false)
|
||||
}
|
||||
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Block", func() {
|
||||
Build := func(kv *testutil.KeyValue, restartInterval int) *block {
|
||||
Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
|
||||
// Building the block.
|
||||
bw := &blockWriter{
|
||||
restartInterval: restartInterval,
|
||||
@@ -39,11 +44,13 @@ var _ = testutil.Defer(func() {
|
||||
// Opening the block.
|
||||
data := bw.buf.Bytes()
|
||||
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
||||
return &block{
|
||||
tr: &Reader{cmp: comparer.DefaultComparer},
|
||||
data: data,
|
||||
restartsLen: restartsLen,
|
||||
restartsOffset: len(data) - (restartsLen+1)*4,
|
||||
return &blockTesting{
|
||||
tr: &Reader{cmp: comparer.DefaultComparer},
|
||||
b: &block{
|
||||
data: data,
|
||||
restartsLen: restartsLen,
|
||||
restartsOffset: len(data) - (restartsLen+1)*4,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,11 +109,11 @@ var _ = testutil.Defer(func() {
|
||||
for restartInterval := 1; restartInterval <= 5; restartInterval++ {
|
||||
Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
|
||||
// Make block.
|
||||
br := Build(kv, restartInterval)
|
||||
bt := Build(kv, restartInterval)
|
||||
|
||||
Test := func(r *util.Range) func(done Done) {
|
||||
return func(done Done) {
|
||||
iter := br.newIterator(r, false, nil)
|
||||
iter := bt.TestNewIterator(r)
|
||||
Expect(iter.Error()).ShouldNot(HaveOccurred())
|
||||
|
||||
t := testutil.IteratorTesting{
|
||||
|
||||
140
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
140
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@@ -39,7 +39,7 @@ func max(x, y int) int {
|
||||
}
|
||||
|
||||
type block struct {
|
||||
tr *Reader
|
||||
bpool *util.BufferPool
|
||||
data []byte
|
||||
restartsLen int
|
||||
restartsOffset int
|
||||
@@ -47,14 +47,14 @@ type block struct {
|
||||
checksum bool
|
||||
}
|
||||
|
||||
func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
|
||||
func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
|
||||
index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
|
||||
offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
|
||||
offset += 1 // shared always zero, since this is a restart point
|
||||
v1, n1 := binary.Uvarint(b.data[offset:]) // key length
|
||||
_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
|
||||
m := offset + n1 + n2
|
||||
return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0
|
||||
return cmp.Compare(b.data[m:m+int(v1)], key) > 0
|
||||
}) + rstart - 1
|
||||
if index < rstart {
|
||||
// The smallest key is greater-than key sought.
|
||||
@@ -96,48 +96,9 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter {
|
||||
bi := &blockIter{
|
||||
block: b,
|
||||
cache: cache,
|
||||
// Valid key should never be nil.
|
||||
key: make([]byte, 0),
|
||||
dir: dirSOI,
|
||||
riStart: 0,
|
||||
riLimit: b.restartsLen,
|
||||
offsetStart: 0,
|
||||
offsetRealStart: 0,
|
||||
offsetLimit: b.restartsOffset,
|
||||
}
|
||||
if slice != nil {
|
||||
if slice.Start != nil {
|
||||
if bi.Seek(slice.Start) {
|
||||
bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
|
||||
bi.offsetStart = b.restartOffset(bi.riStart)
|
||||
bi.offsetRealStart = bi.prevOffset
|
||||
} else {
|
||||
bi.riStart = b.restartsLen
|
||||
bi.offsetStart = b.restartsOffset
|
||||
bi.offsetRealStart = b.restartsOffset
|
||||
}
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
|
||||
bi.offsetLimit = bi.prevOffset
|
||||
bi.riLimit = bi.restartIndex + 1
|
||||
}
|
||||
}
|
||||
bi.reset()
|
||||
if bi.offsetStart > bi.offsetLimit {
|
||||
bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
|
||||
}
|
||||
}
|
||||
return bi
|
||||
}
|
||||
|
||||
func (b *block) Release() {
|
||||
b.tr.bpool.Put(b.data)
|
||||
b.tr = nil
|
||||
b.bpool.Put(b.data)
|
||||
b.bpool = nil
|
||||
b.data = nil
|
||||
}
|
||||
|
||||
@@ -152,10 +113,12 @@ const (
|
||||
)
|
||||
|
||||
type blockIter struct {
|
||||
block *block
|
||||
cache, releaser util.Releaser
|
||||
key, value []byte
|
||||
offset int
|
||||
tr *Reader
|
||||
block *block
|
||||
blockReleaser util.Releaser
|
||||
releaser util.Releaser
|
||||
key, value []byte
|
||||
offset int
|
||||
// Previous offset, only filled by Next.
|
||||
prevOffset int
|
||||
prevNode []int
|
||||
@@ -252,7 +215,7 @@ func (i *blockIter) Seek(key []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
ri, offset, err := i.block.seek(i.riStart, i.riLimit, key)
|
||||
ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
|
||||
if err != nil {
|
||||
i.sErr(err)
|
||||
return false
|
||||
@@ -263,7 +226,7 @@ func (i *blockIter) Seek(key []byte) bool {
|
||||
i.dir = dirForward
|
||||
}
|
||||
for i.Next() {
|
||||
if i.block.tr.cmp.Compare(i.key, key) >= 0 {
|
||||
if i.tr.cmp.Compare(i.key, key) >= 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -440,15 +403,16 @@ func (i *blockIter) Value() []byte {
|
||||
|
||||
func (i *blockIter) Release() {
|
||||
if i.dir != dirReleased {
|
||||
i.tr = nil
|
||||
i.block = nil
|
||||
i.prevNode = nil
|
||||
i.prevKeys = nil
|
||||
i.key = nil
|
||||
i.value = nil
|
||||
i.dir = dirReleased
|
||||
if i.cache != nil {
|
||||
i.cache.Release()
|
||||
i.cache = nil
|
||||
if i.blockReleaser != nil {
|
||||
i.blockReleaser.Release()
|
||||
i.blockReleaser = nil
|
||||
}
|
||||
if i.releaser != nil {
|
||||
i.releaser.Release()
|
||||
@@ -476,21 +440,21 @@ func (i *blockIter) Error() error {
|
||||
}
|
||||
|
||||
type filterBlock struct {
|
||||
tr *Reader
|
||||
bpool *util.BufferPool
|
||||
data []byte
|
||||
oOffset int
|
||||
baseLg uint
|
||||
filtersNum int
|
||||
}
|
||||
|
||||
func (b *filterBlock) contains(offset uint64, key []byte) bool {
|
||||
func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
|
||||
i := int(offset >> b.baseLg)
|
||||
if i < b.filtersNum {
|
||||
o := b.data[b.oOffset+i*4:]
|
||||
n := int(binary.LittleEndian.Uint32(o))
|
||||
m := int(binary.LittleEndian.Uint32(o[4:]))
|
||||
if n < m && m <= b.oOffset {
|
||||
return b.tr.filter.Contains(b.data[n:m], key)
|
||||
return filter.Contains(b.data[n:m], key)
|
||||
} else if n == m {
|
||||
return false
|
||||
}
|
||||
@@ -499,13 +463,14 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
|
||||
}
|
||||
|
||||
func (b *filterBlock) Release() {
|
||||
b.tr.bpool.Put(b.data)
|
||||
b.tr = nil
|
||||
b.bpool.Put(b.data)
|
||||
b.bpool = nil
|
||||
b.data = nil
|
||||
}
|
||||
|
||||
type indexIter struct {
|
||||
*blockIter
|
||||
tr *Reader
|
||||
slice *util.Range
|
||||
// Options
|
||||
checksum bool
|
||||
@@ -526,7 +491,7 @@ func (i *indexIter) Get() iterator.Iterator {
|
||||
if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
|
||||
slice = i.slice
|
||||
}
|
||||
return i.blockIter.block.tr.getDataIterErr(dataBH, slice, i.checksum, i.fillCache)
|
||||
return i.tr.getDataIterErr(dataBH, slice, i.checksum, i.fillCache)
|
||||
}
|
||||
|
||||
// Reader is a table reader.
|
||||
@@ -594,7 +559,7 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
|
||||
}
|
||||
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
|
||||
b := &block{
|
||||
tr: r,
|
||||
bpool: r.bpool,
|
||||
data: data,
|
||||
restartsLen: restartsLen,
|
||||
restartsOffset: len(data) - (restartsLen+1)*4,
|
||||
@@ -655,7 +620,7 @@ func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
|
||||
return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
|
||||
}
|
||||
b := &filterBlock{
|
||||
tr: r,
|
||||
bpool: r.bpool,
|
||||
data: data,
|
||||
oOffset: oOffset,
|
||||
baseLg: uint(data[n-1]),
|
||||
@@ -713,7 +678,7 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
|
||||
if err != nil {
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
return b.newIterator(slice, false, rel)
|
||||
return r.newBlockIter(b, rel, slice, false)
|
||||
}
|
||||
|
||||
func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
|
||||
@@ -727,6 +692,46 @@ func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, checksum,
|
||||
return r.getDataIter(dataBH, slice, checksum, fillCache)
|
||||
}
|
||||
|
||||
func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
|
||||
bi := &blockIter{
|
||||
tr: r,
|
||||
block: b,
|
||||
blockReleaser: bReleaser,
|
||||
// Valid key should never be nil.
|
||||
key: make([]byte, 0),
|
||||
dir: dirSOI,
|
||||
riStart: 0,
|
||||
riLimit: b.restartsLen,
|
||||
offsetStart: 0,
|
||||
offsetRealStart: 0,
|
||||
offsetLimit: b.restartsOffset,
|
||||
}
|
||||
if slice != nil {
|
||||
if slice.Start != nil {
|
||||
if bi.Seek(slice.Start) {
|
||||
bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
|
||||
bi.offsetStart = b.restartOffset(bi.riStart)
|
||||
bi.offsetRealStart = bi.prevOffset
|
||||
} else {
|
||||
bi.riStart = b.restartsLen
|
||||
bi.offsetStart = b.restartsOffset
|
||||
bi.offsetRealStart = b.restartsOffset
|
||||
}
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
|
||||
bi.offsetLimit = bi.prevOffset
|
||||
bi.riLimit = bi.restartIndex + 1
|
||||
}
|
||||
}
|
||||
bi.reset()
|
||||
if bi.offsetStart > bi.offsetLimit {
|
||||
bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
|
||||
}
|
||||
}
|
||||
return bi
|
||||
}
|
||||
|
||||
// NewIterator creates an iterator from the table.
|
||||
//
|
||||
// Slice allows slicing the iterator to only contains keys in the given
|
||||
@@ -752,7 +757,8 @@ func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
index := &indexIter{
|
||||
blockIter: indexBlock.newIterator(slice, true, rel),
|
||||
blockIter: r.newBlockIter(indexBlock, rel, slice, true),
|
||||
tr: r,
|
||||
slice: slice,
|
||||
checksum: ro.GetStrict(opt.StrictBlockChecksum),
|
||||
fillCache: !ro.GetDontFillCache(),
|
||||
@@ -781,7 +787,7 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
|
||||
}
|
||||
defer rel.Release()
|
||||
|
||||
index := indexBlock.newIterator(nil, true, nil)
|
||||
index := r.newBlockIter(indexBlock, nil, nil, true)
|
||||
defer index.Release()
|
||||
if !index.Seek(key) {
|
||||
err = index.Error()
|
||||
@@ -798,7 +804,7 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
|
||||
if r.filter != nil {
|
||||
filterBlock, rel, ferr := r.getFilterBlock(true)
|
||||
if ferr == nil {
|
||||
if !filterBlock.contains(dataBH.offset, key) {
|
||||
if !filterBlock.contains(r.filter, dataBH.offset, key) {
|
||||
rel.Release()
|
||||
return nil, nil, ErrNotFound
|
||||
}
|
||||
@@ -866,7 +872,7 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
|
||||
}
|
||||
defer rel.Release()
|
||||
|
||||
index := indexBlock.newIterator(nil, true, nil)
|
||||
index := r.newBlockIter(indexBlock, nil, nil, true)
|
||||
defer index.Release()
|
||||
if index.Seek(key) {
|
||||
dataBH, n := decodeBlockHandle(index.Value())
|
||||
@@ -956,7 +962,7 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.Buf
|
||||
}
|
||||
// Set data end.
|
||||
r.dataEnd = int64(metaBH.offset)
|
||||
metaIter := metaBlock.newIterator(nil, false, nil)
|
||||
metaIter := r.newBlockIter(metaBlock, nil, nil, false)
|
||||
for metaIter.Next() {
|
||||
key := string(metaIter.Key())
|
||||
if !strings.HasPrefix(key, "filter.") {
|
||||
|
||||
3
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
3
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
@@ -156,6 +156,9 @@ func (p *BufferPool) Put(b []byte) {
|
||||
atomic.AddUint32(&p.put, 1)
|
||||
|
||||
pool := p.pool[p.poolNum(cap(b))]
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
select {
|
||||
case pool <- b:
|
||||
default:
|
||||
|
||||
3
build.sh
3
build.sh
@@ -57,6 +57,9 @@ case "${1:-default}" in
|
||||
go run build.go -goos freebsd -goarch amd64 tar
|
||||
go run build.go -goos freebsd -goarch 386 tar
|
||||
|
||||
go run build.go -goos openbsd -goarch amd64 tar
|
||||
go run build.go -goos openbsd -goarch 386 tar
|
||||
|
||||
go run build.go -goos darwin -goarch amd64 tar
|
||||
|
||||
go run build.go -goos windows -goarch amd64 zip
|
||||
|
||||
@@ -26,6 +26,19 @@ print-missing-copyright() {
|
||||
find . -name \*.go | xargs grep -L 'Copyright (C)' | grep -v Godeps
|
||||
}
|
||||
|
||||
print-line-blame() {
|
||||
for f in $(find . -name \*.go | grep -v Godep) gui/app.js gui/index.html ; do
|
||||
git blame --line-porcelain $f | grep author-mail
|
||||
done | sort | uniq -c | sort -n
|
||||
}
|
||||
echo Author emails missing in CONTRIBUTORS:
|
||||
print-missing-contribs
|
||||
print-missing-copyright
|
||||
echo
|
||||
|
||||
echo Files missing copyright notice:
|
||||
print-missing-copyright
|
||||
echo
|
||||
|
||||
echo Blame lines per author:
|
||||
print-line-blame
|
||||
|
||||
|
||||
@@ -32,11 +32,11 @@ import (
|
||||
"time"
|
||||
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/calmh/logger"
|
||||
"github.com/syncthing/syncthing/internal/auto"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/discover"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
@@ -453,13 +453,15 @@ func restPostDiscoveryHint(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func restGetDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
registry := discoverer.All()
|
||||
devices := map[string][]discover.CacheEntry{}
|
||||
|
||||
// Device ids can't be marshalled as keys so we need to manually
|
||||
// rebuild this map using strings.
|
||||
devices := make(map[string][]discover.CacheEntry, len(registry))
|
||||
for device, _ := range registry {
|
||||
devices[device.String()] = registry[device]
|
||||
if discoverer != nil {
|
||||
// Device ids can't be marshalled as keys so we need to manually
|
||||
// rebuild this map using strings. Discoverer may be nil if discovery
|
||||
// has not started yet.
|
||||
for device, entries := range discoverer.All() {
|
||||
devices[device.String()] = entries
|
||||
}
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(devices)
|
||||
|
||||
@@ -37,12 +37,12 @@ import (
|
||||
"time"
|
||||
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/calmh/logger"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/discover"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/files"
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
@@ -59,6 +59,8 @@ var (
|
||||
BuildDate time.Time
|
||||
BuildHost = "unknown"
|
||||
BuildUser = "unknown"
|
||||
IsRelease bool
|
||||
IsBeta bool
|
||||
LongVersion string
|
||||
GoArchExtra string // "", "v5", "v6", "v7"
|
||||
)
|
||||
@@ -82,6 +84,13 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a clean release build.
|
||||
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-beta[\d\.]+)?$`)
|
||||
IsRelease = exp.MatchString(Version)
|
||||
|
||||
// Check for a beta build
|
||||
IsBeta = strings.Contains(Version, "-beta")
|
||||
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
@@ -178,6 +187,7 @@ var (
|
||||
doUpgradeCheck bool
|
||||
noBrowser bool
|
||||
generateDir string
|
||||
logFile string
|
||||
noRestart = os.Getenv("STNORESTART") != ""
|
||||
guiAddress = os.Getenv("STGUIADDRESS") // legacy
|
||||
guiAuthentication = os.Getenv("STGUIAUTH") // legacy
|
||||
@@ -194,6 +204,15 @@ func main() {
|
||||
if err != nil {
|
||||
l.Fatalln("home:", err)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, we use a log file by default. Setting the -logfile flag
|
||||
// to the empty string disables this behavior.
|
||||
|
||||
logFile = filepath.Join(defConfDir, "syncthing.log")
|
||||
flag.StringVar(&logFile, "logfile", logFile, "Log file name (blank for stdout)")
|
||||
}
|
||||
|
||||
flag.StringVar(&generateDir, "generate", "", "Generate key and config in specified dir, then exit")
|
||||
flag.StringVar(&guiAddress, "gui-address", guiAddress, "Override GUI address")
|
||||
flag.StringVar(&guiAuthentication, "gui-authentication", guiAuthentication, "Override GUI authentication; username:password")
|
||||
@@ -215,6 +234,13 @@ func main() {
|
||||
confDir = defConfDir
|
||||
}
|
||||
|
||||
if confDir != defConfDir && filepath.Dir(logFile) == defConfDir {
|
||||
// The user changed the config dir with -home, but not the log file
|
||||
// location. In this case we assume they meant for the logfile to
|
||||
// still live in it's default location *relative to the config dir*.
|
||||
logFile = filepath.Join(confDir, "syncthing.log")
|
||||
}
|
||||
|
||||
if showVersion {
|
||||
fmt.Println(LongVersion)
|
||||
return
|
||||
@@ -281,7 +307,7 @@ func main() {
|
||||
ensureDir(confDir, 0700)
|
||||
|
||||
if doUpgrade || doUpgradeCheck {
|
||||
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
|
||||
rel, err := upgrade.LatestRelease(IsBeta)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
@@ -537,7 +563,11 @@ func syncthingMain() {
|
||||
}
|
||||
|
||||
if opts.AutoUpgradeIntervalH > 0 {
|
||||
go autoUpgrade()
|
||||
if IsRelease {
|
||||
go autoUpgrade()
|
||||
} else {
|
||||
l.Infof("No automatic upgrades; %s is not a relase version.", Version)
|
||||
}
|
||||
}
|
||||
|
||||
events.Default.Log(events.StartupComplete, nil)
|
||||
@@ -618,7 +648,7 @@ nextFolder:
|
||||
// doesn't exist, try creating it.
|
||||
err = os.MkdirAll(folder.Path, 0700)
|
||||
if err != nil {
|
||||
l.Warnf("Stopping folder %q - %v", err)
|
||||
l.Warnf("Stopping folder %q - %v", folder.ID, err)
|
||||
cfg.InvalidateFolder(id, err.Error())
|
||||
continue nextFolder
|
||||
}
|
||||
@@ -632,7 +662,7 @@ nextFolder:
|
||||
if err != nil {
|
||||
// If there was another error or we could not create the
|
||||
// path, the folder is invalid.
|
||||
l.Warnf("Stopping folder %q - %v", err)
|
||||
l.Warnf("Stopping folder %q - %v", folder.ID, err)
|
||||
cfg.InvalidateFolder(id, err.Error())
|
||||
continue nextFolder
|
||||
}
|
||||
@@ -702,11 +732,11 @@ func setupUPnP() {
|
||||
l.Warnln("Failed to create UPnP port mapping")
|
||||
} else {
|
||||
l.Infof("Created UPnP port mapping for external port %d on UPnP device %s.", externalPort, igd.FriendlyIdentifier())
|
||||
}
|
||||
}
|
||||
|
||||
if opts.UPnPRenewal > 0 {
|
||||
go renewUPnP(port)
|
||||
if opts.UPnPRenewal > 0 {
|
||||
go renewUPnP(port)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -739,14 +769,18 @@ func renewUPnP(port int) {
|
||||
|
||||
// Make sure our IGD reference isn't nil
|
||||
if igd == nil {
|
||||
l.Infoln("Undefined IGD during UPnP port renewal. Re-discovering...")
|
||||
if debugNet {
|
||||
l.Debugln("Undefined IGD during UPnP port renewal. Re-discovering...")
|
||||
}
|
||||
igds := upnp.Discover()
|
||||
if len(igds) > 0 {
|
||||
// Configure the first discovered IGD only. This is a work-around until we have a better mechanism
|
||||
// for handling multiple IGDs, which will require changes to the global discovery service
|
||||
igd = igds[0]
|
||||
} else {
|
||||
l.Infof("Failed to re-discover IGD during UPnP port mapping renewal.")
|
||||
if debugNet {
|
||||
l.Debugln("Failed to discover IGD during UPnP port mapping renewal.")
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -754,10 +788,10 @@ func renewUPnP(port int) {
|
||||
// Just renew the same port that we already have
|
||||
if externalPort != 0 {
|
||||
err := igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", opts.UPnPLease*60)
|
||||
if err == nil {
|
||||
l.Infof("Renewed UPnP port mapping for external port %d on device %s.", externalPort, igd.FriendlyIdentifier())
|
||||
} else {
|
||||
if err != nil {
|
||||
l.Warnf("Error renewing UPnP port mapping for external port %d on device %s: %s", externalPort, igd.FriendlyIdentifier(), err.Error())
|
||||
} else if debugNet {
|
||||
l.Debugf("Renewed UPnP port mapping for external port %d on device %s.", externalPort, igd.FriendlyIdentifier())
|
||||
}
|
||||
|
||||
continue
|
||||
@@ -766,14 +800,18 @@ func renewUPnP(port int) {
|
||||
// Something strange has happened. We didn't have an external port before?
|
||||
// Or perhaps the gateway has changed?
|
||||
// Retry the same port sequence from the beginning.
|
||||
l.Infoln("No UPnP port mapping defined, updating...")
|
||||
if debugNet {
|
||||
l.Debugln("No UPnP port mapping defined, updating...")
|
||||
}
|
||||
|
||||
r := setupExternalPort(igd, port)
|
||||
if r != 0 {
|
||||
externalPort = r
|
||||
l.Infof("Updated UPnP port mapping for external port %d on device %s.", externalPort, igd.FriendlyIdentifier())
|
||||
forwardedPort := setupExternalPort(igd, port)
|
||||
if forwardedPort != 0 {
|
||||
externalPort = forwardedPort
|
||||
discoverer.StopGlobal()
|
||||
discoverer.StartGlobal(opts.GlobalAnnServer, uint16(r))
|
||||
discoverer.StartGlobal(opts.GlobalAnnServer, uint16(forwardedPort))
|
||||
if debugNet {
|
||||
l.Debugf("Updated UPnP port mapping for external port %d on device %s.", forwardedPort, igd.FriendlyIdentifier())
|
||||
}
|
||||
} else {
|
||||
l.Warnf("Failed to update UPnP port mapping for external port on device " + igd.FriendlyIdentifier() + ".")
|
||||
}
|
||||
@@ -1174,7 +1212,7 @@ func autoUpgrade() {
|
||||
skipped = true
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
|
||||
rel, err := upgrade.LatestRelease(IsBeta)
|
||||
if err == upgrade.ErrUpgradeUnsupported {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build freebsd
|
||||
// +build freebsd openbsd
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -22,10 +22,13 @@ import (
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -44,6 +47,32 @@ func monitorMain() {
|
||||
os.Setenv("STMONITORED", "yes")
|
||||
l.SetPrefix("[monitor] ")
|
||||
|
||||
var err error
|
||||
var dst io.Writer = os.Stdout
|
||||
|
||||
if logFile != "" {
|
||||
var fileDst io.Writer
|
||||
|
||||
fileDst, err = os.Create(logFile)
|
||||
if err != nil {
|
||||
l.Fatalln("log file:", err)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Translate line breaks to Windows standard
|
||||
fileDst = osutil.ReplacingWriter{
|
||||
Writer: fileDst,
|
||||
From: '\n',
|
||||
To: []byte{'\r', '\n'},
|
||||
}
|
||||
}
|
||||
|
||||
// Log to both stdout and file.
|
||||
dst = io.MultiWriter(dst, fileDst)
|
||||
|
||||
l.Infof(`Log output saved to file "%s"`, logFile)
|
||||
}
|
||||
|
||||
args := os.Args
|
||||
var restarts [countRestarts]time.Time
|
||||
|
||||
@@ -64,12 +93,12 @@ func monitorMain() {
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
l.Fatalln("stderr:", err)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
l.Fatalln("stdout:", err)
|
||||
}
|
||||
|
||||
l.Infoln("Starting syncthing")
|
||||
@@ -87,8 +116,8 @@ func monitorMain() {
|
||||
stdoutLastLines = make([]string, 0, 50)
|
||||
stdoutMut.Unlock()
|
||||
|
||||
go copyStderr(stderr)
|
||||
go copyStdout(stdout)
|
||||
go copyStderr(stderr, dst)
|
||||
go copyStdout(stdout, dst)
|
||||
|
||||
exit := make(chan error)
|
||||
|
||||
@@ -130,7 +159,7 @@ func monitorMain() {
|
||||
}
|
||||
}
|
||||
|
||||
func copyStderr(stderr io.ReadCloser) {
|
||||
func copyStderr(stderr io.ReadCloser, dst io.Writer) {
|
||||
br := bufio.NewReader(stderr)
|
||||
|
||||
var panicFd *os.File
|
||||
@@ -141,7 +170,7 @@ func copyStderr(stderr io.ReadCloser) {
|
||||
}
|
||||
|
||||
if panicFd == nil {
|
||||
os.Stderr.WriteString(line)
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(filepath.Join(confDir, time.Now().Format("panic-20060102-150405.log")))
|
||||
@@ -173,8 +202,8 @@ func copyStderr(stderr io.ReadCloser) {
|
||||
}
|
||||
}
|
||||
|
||||
func copyStdout(stderr io.ReadCloser) {
|
||||
br := bufio.NewReader(stderr)
|
||||
func copyStdout(stdout io.ReadCloser, dst io.Writer) {
|
||||
br := bufio.NewReader(stdout)
|
||||
for {
|
||||
line, err := br.ReadString('\n')
|
||||
if err != nil {
|
||||
@@ -192,6 +221,6 @@ func copyStdout(stderr io.ReadCloser) {
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
|
||||
os.Stdout.WriteString(line)
|
||||
dst.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -879,7 +879,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
||||
$scope.sharesFolder = function (folderCfg) {
|
||||
var names = [];
|
||||
folderCfg.Devices.forEach(function (device) {
|
||||
names.push($scope.deviceName($scope.findDevice(device.DeviceID)));
|
||||
if (device.DeviceID != $scope.myID) {
|
||||
names.push($scope.deviceName($scope.findDevice(device.DeviceID)));
|
||||
}
|
||||
});
|
||||
names.sort();
|
||||
return names.join(", ");
|
||||
|
||||
@@ -466,9 +466,9 @@
|
||||
</div>
|
||||
<div class="form-group" ng-class="{'has-error': folderEditor.rescanIntervalS.$invalid && folderEditor.rescanIntervalS.$dirty}">
|
||||
<label for="rescanIntervalS"><span translate>Rescan Interval</span> (s)</label>
|
||||
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentFolder.RescanIntervalS" required min="5"></input>
|
||||
<input name="rescanIntervalS" id="rescanIntervalS" class="form-control" type="number" ng-model="currentFolder.RescanIntervalS" required min="0"></input>
|
||||
<p class="help-block">
|
||||
<span translate ng-if="!folderEditor.rescanIntervalS.$valid && folderEditor.rescanIntervalS.$dirty">The rescan interval must be at least 5 seconds.</span>
|
||||
<span translate ng-if="!folderEditor.rescanIntervalS.$valid && folderEditor.rescanIntervalS.$dirty">The rescan interval must be a non-negative number of seconds.</span>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
@@ -789,11 +789,11 @@
|
||||
<li>Daniel Martí</li>
|
||||
<li>Felix Ableitner</li>
|
||||
<li>Felix Unterpaintner</li>
|
||||
<li>Gilli Sigurdsson</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<ul>
|
||||
<li>Gilli Sigurdsson</li>
|
||||
<li>James Patterson</li>
|
||||
<li>Jens Diemer</li>
|
||||
<li>Jochen Voss</li>
|
||||
@@ -805,6 +805,7 @@
|
||||
<li>Ryan Sullivan</li>
|
||||
<li>Tully Robinson</li>
|
||||
<li>Veeti Paananen</li>
|
||||
<li>Vil Brekin</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
140
gui/lang/lang-be.json
Normal file
140
gui/lang/lang-be.json
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"API Key": "Ключ API",
|
||||
"About": "Аб праграме",
|
||||
"Add Device": "Дадаць прыладу",
|
||||
"Add Folder": "Дадаць каталёг",
|
||||
"Address": "Адрас",
|
||||
"Addresses": "Адрасы",
|
||||
"Allow Anonymous Usage Reporting?": "Allow Anonymous Usage Reporting?",
|
||||
"Anonymous Usage Reporting": "Anonymous Usage Reporting",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Any devices configured on an introducer device will be added to this device as well.",
|
||||
"Bugs": "Bugs",
|
||||
"CPU Utilization": "CPU Utilization",
|
||||
"Close": "Close",
|
||||
"Comment, when used at the start of a line": "Comment, when used at the start of a line",
|
||||
"Compression is recommended in most setups.": "Compression is recommended in most setups.",
|
||||
"Connection Error": "Connection Error",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg and the following Contributors:",
|
||||
"Delete": "Delete",
|
||||
"Device ID": "Device ID",
|
||||
"Device Identification": "Device Identification",
|
||||
"Device Name": "Device Name",
|
||||
"Disconnected": "Disconnected",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Download Rate",
|
||||
"Edit": "Edit",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
"Editing": "Editing",
|
||||
"Enable UPnP": "Enable UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.",
|
||||
"Enter ignore patterns, one per line.": "Enter ignore patterns, one per line.",
|
||||
"Error": "Error",
|
||||
"File Versioning": "File Versioning",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "File permission bits are ignored when looking for changes. Use on FAT filesystems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.",
|
||||
"Folder ID": "Folder ID",
|
||||
"Folder Master": "Folder Master",
|
||||
"Folder Path": "Folder Path",
|
||||
"GUI Authentication Password": "GUI Authentication Password",
|
||||
"GUI Authentication User": "GUI Authentication User",
|
||||
"GUI Listen Addresses": "GUI Listen Addresses",
|
||||
"Generate": "Generate",
|
||||
"Global Discovery": "Global Discovery",
|
||||
"Global Discovery Server": "Global Discovery Server",
|
||||
"Global State": "Global State",
|
||||
"Idle": "Idle",
|
||||
"Ignore Patterns": "Ignore Patterns",
|
||||
"Ignore Permissions": "Ignore Permissions",
|
||||
"Incoming Rate Limit (KiB/s)": "Incoming Rate Limit (KiB/s)",
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversion of the given condition (i.e. do not exclude)",
|
||||
"Keep Versions": "Keep Versions",
|
||||
"Last seen": "Last seen",
|
||||
"Latest Release": "Latest Release",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
"Maximum Age": "Maximum Age",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Multi level wildcard (matches multiple directory levels)",
|
||||
"Never": "Never",
|
||||
"No": "No",
|
||||
"No File Versioning": "No File Versioning",
|
||||
"Notice": "Notice",
|
||||
"OK": "OK",
|
||||
"Offline": "Offline",
|
||||
"Online": "Online",
|
||||
"Out Of Sync": "Out Of Sync",
|
||||
"Outgoing Rate Limit (KiB/s)": "Outgoing Rate Limit (KiB/s)",
|
||||
"Override Changes": "Override Changes",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Path where versions should be stored (leave empty for the default .stversions folder in the folder).",
|
||||
"Please wait": "Please wait",
|
||||
"Preview": "Preview",
|
||||
"Preview Usage Report": "Preview Usage Report",
|
||||
"Quick guide to supported patterns": "Quick guide to supported patterns",
|
||||
"RAM Utilization": "RAM Utilization",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval": "Rescan Interval",
|
||||
"Restart": "Restart",
|
||||
"Restart Needed": "Restart Needed",
|
||||
"Restarting": "Restarting",
|
||||
"Save": "Save",
|
||||
"Scanning": "Scanning",
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Settings": "Settings",
|
||||
"Share With Devices": "Share With Devices",
|
||||
"Shared With": "Shared With",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Short identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Show ID": "Show ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.",
|
||||
"Shutdown": "Shutdown",
|
||||
"Simple File Versioning": "Simple File Versioning",
|
||||
"Single level wildcard (matches within a directory only)": "Single level wildcard (matches within a directory only)",
|
||||
"Source Code": "Source Code",
|
||||
"Staggered File Versioning": "Staggered File Versioning",
|
||||
"Start Browser": "Start Browser",
|
||||
"Stopped": "Stopped",
|
||||
"Support / Forum": "Support / Forum",
|
||||
"Sync Protocol Listen Addresses": "Sync Protocol Listen Addresses",
|
||||
"Synchronization": "Synchronization",
|
||||
"Syncing": "Syncing",
|
||||
"Syncthing has been shut down.": "Syncthing has been shut down.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing includes the following software or portions thereof:",
|
||||
"Syncthing is restarting.": "Syncthing is restarting.",
|
||||
"Syncthing is upgrading.": "Syncthing is upgrading.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "The aggregated statistics are publicly available at {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.",
|
||||
"The device ID cannot be blank.": "The device ID cannot be blank.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.",
|
||||
"The folder ID cannot be blank.": "The folder ID cannot be blank.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.",
|
||||
"The folder ID must be unique.": "The folder ID must be unique.",
|
||||
"The folder path cannot be blank.": "The folder path cannot be blank.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.",
|
||||
"The maximum age must be a number and cannot be blank.": "The maximum age must be a number and cannot be blank.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "The maximum time to keep a version (in days, set to 0 to keep versions forever).",
|
||||
"The number of old versions to keep, per file.": "The number of old versions to keep, per file.",
|
||||
"The number of versions must be a number and cannot be blank.": "The number of versions must be a number and cannot be blank.",
|
||||
"The rescan interval must be at least 5 seconds.": "The rescan interval must be at least 5 seconds.",
|
||||
"Unknown": "Unknown",
|
||||
"Up to Date": "Up to Date",
|
||||
"Upgrade To {%version%}": "Upgrade To {{version}}",
|
||||
"Upgrading": "Upgrading",
|
||||
"Upload Rate": "Upload Rate",
|
||||
"Use Compression": "Use Compression",
|
||||
"Use HTTPS for GUI": "Use HTTPS for GUI",
|
||||
"Version": "Version",
|
||||
"Versions Path": "Versions Path",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "When adding a new device, keep in mind that this device must be added on the other side too.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.",
|
||||
"Yes": "Yes",
|
||||
"You must keep at least one version.": "You must keep at least one version.",
|
||||
"full documentation": "full documentation",
|
||||
"items": "items"
|
||||
}
|
||||
@@ -7,12 +7,12 @@
|
||||
"Addresses": "Indirizzi",
|
||||
"Allow Anonymous Usage Reporting?": "Abilitare Statistiche Anonime di Utilizzo?",
|
||||
"Anonymous Usage Reporting": "Statistiche Anonime di Utilizzo",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Any devices configured on an introducer device will be added to this device as well.",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Qualsiasi dispositivo configurato in un introduttore verrà aggiunto anche a questo dispositivo.",
|
||||
"Bugs": "Bug",
|
||||
"CPU Utilization": "Utilizzo CPU",
|
||||
"Close": "Chiudi",
|
||||
"Comment, when used at the start of a line": "Comment, when used at the start of a line",
|
||||
"Compression is recommended in most setups.": "Compression is recommended in most setups.",
|
||||
"Comment, when used at the start of a line": "Per commentare, va inserito all'inizio di una riga",
|
||||
"Compression is recommended in most setups.": "La compressione è raccomandata nella maggior parte delle configurazioni.",
|
||||
"Connection Error": "Errore di Connessione",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg e i seguenti Collaboratori:",
|
||||
"Delete": "Elimina",
|
||||
@@ -25,10 +25,10 @@
|
||||
"Edit": "Modifica",
|
||||
"Edit Device": "Modifica Dispositivo",
|
||||
"Edit Folder": "Modifica Cartella",
|
||||
"Editing": "Editing",
|
||||
"Editing": "Modifica di",
|
||||
"Enable UPnP": "Attiva UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Inserisci gli indirizzi \"ip:porta\" separati da una virgola, altrimenti inserisci \"dynamic\" per effettuare il rilevamento automatico dell'indirizzo.",
|
||||
"Enter ignore patterns, one per line.": "Enter ignore patterns, one per line.",
|
||||
"Enter ignore patterns, one per line.": "Inserisci gli schemi di esclusione, uno per riga.",
|
||||
"Error": "Errore",
|
||||
"File Versioning": "Controllo Versione dei File",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Il software evita i bit dei permessi dei file durante il controllo delle modifiche. Utilizzato nei filesystem FAT.",
|
||||
@@ -43,20 +43,20 @@
|
||||
"Generate": "Genera",
|
||||
"Global Discovery": "Individuazione Globale",
|
||||
"Global Discovery Server": "Server di Ricerca Globale",
|
||||
"Global State": "Global State",
|
||||
"Global State": "Stato Globale",
|
||||
"Idle": "Inattivo",
|
||||
"Ignore Patterns": "Ignore Patterns",
|
||||
"Ignore Patterns": "Schemi Esclusione File",
|
||||
"Ignore Permissions": "Ignora Permessi",
|
||||
"Incoming Rate Limit (KiB/s)": "Incoming Rate Limit (KiB/s)",
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversion of the given condition (i.e. do not exclude)",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite Velocità in Ingresso (KiB/s)",
|
||||
"Introducer": "Introduttore",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversione della condizione indicata (ad es. non escludere)",
|
||||
"Keep Versions": "Versioni Mantenute",
|
||||
"Last seen": "Last seen",
|
||||
"Last seen": "Ultima connessione",
|
||||
"Latest Release": "Ultima Versione",
|
||||
"Local Discovery": "Individuazione Locale",
|
||||
"Local State": "Local State",
|
||||
"Local State": "Stato Locale",
|
||||
"Maximum Age": "Durata Massima",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Multi level wildcard (matches multiple directory levels)",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Metacarattere multi-livello (corrisponde alle cartelle e alle sotto-cartelle)",
|
||||
"Never": "Mai",
|
||||
"No": "No",
|
||||
"No File Versioning": "Nessun Controllo Versione",
|
||||
@@ -65,17 +65,17 @@
|
||||
"Offline": "Non in linea",
|
||||
"Online": "In linea",
|
||||
"Out Of Sync": "Non Sincronizzati",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite di Velocità in Uscita (KiB/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite Velocità in Uscita (KiB/s)",
|
||||
"Override Changes": "Ignora Modifiche",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Path where versions should be stored (leave empty for the default .stversions folder in the folder).",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Percorso della cartella nel computer locale. Verrà creata se non esiste già. Il carattere tilde (~) può essere utilizzato come scorciatoia per",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Percorso di salvataggio delle versioni (lasciare vuoto per utilizzare la cartella predefinita .stversions in questa cartella).",
|
||||
"Please wait": "Attendere prego",
|
||||
"Preview": "Anteprima",
|
||||
"Preview Usage Report": "Anteprima Statistiche di Utilizzo",
|
||||
"Quick guide to supported patterns": "Quick guide to supported patterns",
|
||||
"Quick guide to supported patterns": "Guida veloce agli schemi supportati",
|
||||
"RAM Utilization": "Utilizzo RAM",
|
||||
"Rescan": "Riscansiona",
|
||||
"Rescan Interval": "Intervallo di Scansione",
|
||||
"Rescan Interval": "Intervallo Scansione",
|
||||
"Restart": "Riavvia",
|
||||
"Restart Needed": "Riavvio Necessario",
|
||||
"Restarting": "Riavvio",
|
||||
@@ -83,15 +83,15 @@
|
||||
"Scanning": "Scansione in corso",
|
||||
"Select the devices to share this folder with.": "Seleziona i dispositivi con i quali condividere questa cartella.",
|
||||
"Settings": "Impostazioni",
|
||||
"Share With Devices": "Condividi Con i Dispositivi",
|
||||
"Share With Devices": "Condividi con i Dispositivi",
|
||||
"Shared With": "Condiviso Con",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Short identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Breve identificatore della cartella. Deve essere lo stesso su tutti i dispositivi del cluster.",
|
||||
"Show ID": "Mostra ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Visibile al posto dell'ID Dispositivo nello stato del cluster. Negli altri dispositivi verrà presentato come nome predefinito opzionale.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Visibile al posto dell'ID Dispositivo nello stato del cluster. Se viene lasciato vuoto, verrà utilizzato il nome proposto dal dispositivo.",
|
||||
"Shutdown": "Arresta",
|
||||
"Simple File Versioning": "Controllo Versione Semplice",
|
||||
"Single level wildcard (matches within a directory only)": "Single level wildcard (matches within a directory only)",
|
||||
"Single level wildcard (matches within a directory only)": "Metacarattere di singolo livello (corrisponde solo all'interno di una cartella)",
|
||||
"Source Code": "Codice Sorgente",
|
||||
"Staggered File Versioning": "Controllo Versione Cadenzato",
|
||||
"Start Browser": "Avvia Browser",
|
||||
@@ -108,14 +108,14 @@
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Le statistiche aggregate sono disponibili pubblicamente su {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configurazione è stata salvata ma non attivata. Devi riavviare Syncthing per attivare la nuova configurazione.",
|
||||
"The device ID cannot be blank.": "L'ID del dispositivo non può essere vuoto.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Trova l'ID nella finestra di dialogo \"Modifica > Mostra ID\" dell'altro dispositivo, poi inseriscilo qui. Gli spazi e i trattini sono opzionali (ignorati).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Quotidianamente il software invia le statistiche di utilizzo in forma criptata. Questi dati riguardano i sistemi operativi utilizzati, le dimensioni delle cartelle e le versioni del software. Se i dati riportati sono cambiati, verrà mostrata di nuovo questa finestra di dialogo.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del dispositivo inserito non sembra valido. Dovrebbe essere una stringa di 52 o 56 caratteri costituita da lettere e numeri, con spazi e trattini opzionali.",
|
||||
"The folder ID cannot be blank.": "L'ID della cartella non può essere vuoto.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "L'ID della cartella dev'essere un identificatore breve (64 caratteri o meno) costituito solamente da lettere, numeri, punti (.), trattini (-) e trattini bassi (_).",
|
||||
"The folder ID must be unique.": "L'ID della cartella dev'essere unico.",
|
||||
"The folder path cannot be blank.": "Il percorso della cartella non può essere vuoto.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Vengono utilizzati i seguenti intervalli temporali: per la prima ora viene mantenuta una versione ogni 30 secondi, per il primo giorno viene mantenuta una versione ogni ora, per i primi 30 giorni viene mantenuta una versione al giorno, successivamente viene mantenuta una versione ogni settimana fino al periodo massimo impostato.",
|
||||
"The maximum age must be a number and cannot be blank.": "La durata massima dev'essere un numero e non può essere vuoto.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "La durata massima di una versione (in giorni, imposta a 0 per mantenere le versioni per sempre).",
|
||||
"The number of old versions to keep, per file.": "Il numero di vecchie versioni da mantenere, per file.",
|
||||
@@ -123,16 +123,16 @@
|
||||
"The rescan interval must be at least 5 seconds.": "L'intervallo di scansione non può essere inferiore a 5 secondi.",
|
||||
"Unknown": "Sconosciuto",
|
||||
"Up to Date": "Sincronizzato",
|
||||
"Upgrade To {%version%}": "Aggiorna Alla {{version}}",
|
||||
"Upgrade To {%version%}": "Aggiorna alla {{version}}",
|
||||
"Upgrading": "Aggiornamento",
|
||||
"Upload Rate": "Velocità Upload",
|
||||
"Use Compression": "Utilizza Compressione",
|
||||
"Use HTTPS for GUI": "Utilizza HTTPS per l'interfaccia grafica",
|
||||
"Version": "Versione",
|
||||
"Versions Path": "Percorso Cartella delle Versioni",
|
||||
"Versions Path": "Percorso Cartella Versioni",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Le versioni vengono eliminate automaticamente se superano la durata massima o il numero di file permessi in un determinato intervallo temporale.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "When adding a new device, keep in mind that this device must be added on the other side too.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Anche nel nuovo dispositivo devi aggiungere l'ID di questo, con la stessa procedura.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Quando aggiungi una nuova cartella, ricordati che gli ID vengono utilizzati per collegare le cartelle nei dispositivi. Distinguono maiuscole e minuscole e devono corrispondere esattamente su tutti i dispositivi.",
|
||||
"Yes": "Sì",
|
||||
"You must keep at least one version.": "È necessario mantenere almeno una versione.",
|
||||
"full documentation": "documentazione completa",
|
||||
|
||||
140
gui/lang/lang-nb.json
Normal file
140
gui/lang/lang-nb.json
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"API Key": "API-nøkkel",
|
||||
"About": "Om",
|
||||
"Add Device": "Legg Til Enhet",
|
||||
"Add Folder": "Legg Til Mappe",
|
||||
"Address": "Adresse",
|
||||
"Addresses": "Adresser",
|
||||
"Allow Anonymous Usage Reporting?": "Tillat Anonym Datainnsamling?",
|
||||
"Anonymous Usage Reporting": "Anonym Datainnsamling",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Enheter konfigurert på en introduksjonsenhet vil også bli lagt til denne enheten.",
|
||||
"Bugs": "Programfeil",
|
||||
"CPU Utilization": "CPU-utnyttelse",
|
||||
"Close": "Lukk",
|
||||
"Comment, when used at the start of a line": "Kommentar, når det blir brukt i starten av en linje.",
|
||||
"Compression is recommended in most setups.": "Komprimering er anbefalt i de fleste tilfeller.",
|
||||
"Connection Error": "Tilkoblingsfeil",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright @ 2014 Jakob Borg og følgende Bidragsytere:",
|
||||
"Delete": "Slett",
|
||||
"Device ID": "Enhet ID",
|
||||
"Device Identification": "Enhetskjennemerke",
|
||||
"Device Name": "Navn På Enhet",
|
||||
"Disconnected": "Frakoblet",
|
||||
"Documentation": "Dokumentasjon",
|
||||
"Download Rate": "Nedlastingsrate",
|
||||
"Edit": "Rediger",
|
||||
"Edit Device": "Rediger Enhet",
|
||||
"Edit Folder": "Rediger Mappe",
|
||||
"Editing": "Redigerer",
|
||||
"Enable UPnP": "Aktiver UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": " Skriv inn kommaseparerte \"ip:port\"-adresser, eller \"dynamic\" for å automatisk finne adressen.",
|
||||
"Enter ignore patterns, one per line.": "Skriv inn mønster som skal utelates, ett per linje.",
|
||||
"Error": "Feilmelding",
|
||||
"File Versioning": "Versjonskontroll",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Ignorer bit som styrer filtilgang når en ser etter endringer. Bruk på FAT-filsystem.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Filer blir flyttet til tidsstemplede versjoner i en .stversions-mappe når de blir erstattet eller slettet av syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Filer er beskyttet mot endringer som er gjort på andre enheter, men endringer som er gjort på denne enheten blir sendt til resten av gruppen.",
|
||||
"Folder ID": "Mappe ID",
|
||||
"Folder Master": "Styrende Mappe",
|
||||
"Folder Path": "Mappeplassering",
|
||||
"GUI Authentication Password": "GUI Passord",
|
||||
"GUI Authentication User": "GUI Bruker",
|
||||
"GUI Listen Addresses": "GUI Lytteadresse",
|
||||
"Generate": "Generer",
|
||||
"Global Discovery": "Global Søking",
|
||||
"Global Discovery Server": "Global Søkemotor",
|
||||
"Global State": "Global Tilstand",
|
||||
"Idle": "Pause",
|
||||
"Ignore Patterns": "Utelatelsesmønster",
|
||||
"Ignore Permissions": "Ignorer Tilgangsbit",
|
||||
"Incoming Rate Limit (KiB/s)": "Innkommende Hastighetsbegrensning (KiB/s)",
|
||||
"Introducer": "Introduktør",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Invers av den gitte tilstanden (t.d. ikke ekskluder)",
|
||||
"Keep Versions": "Behold Versjoner",
|
||||
"Last seen": "Sist sett",
|
||||
"Latest Release": "Nyeste Versjon",
|
||||
"Local Discovery": "Lokal Søking",
|
||||
"Local State": "Lokal Tilstand",
|
||||
"Maximum Age": "Maksimal Levetid",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Multinivåsøk (søker på flere mappenivå)",
|
||||
"Never": "Aldri",
|
||||
"No": "Nei",
|
||||
"No File Versioning": "Ingen Versjonskontroll",
|
||||
"Notice": "Notis",
|
||||
"OK": "OK",
|
||||
"Offline": "Frakobla",
|
||||
"Online": "Tilkobla",
|
||||
"Out Of Sync": "Ikke Synkronisert",
|
||||
"Outgoing Rate Limit (KiB/s)": "Utgående Hastighetsbegrensning (KiB/s)",
|
||||
"Override Changes": "Overstyr Endringer",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappen på datamaskinen. Blir opprettet om den ikke finnes. Krøllstrektegnet (~) kan brukes som forkortelse for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Plasseringen for lagrede versjoner (la denne være tom for å bruke standard .stversions-mappen i mappen).",
|
||||
"Please wait": "Vennligst vent",
|
||||
"Preview": "Forhåndsvisning",
|
||||
"Preview Usage Report": "Forhåndsvisning Av Datainnsamling",
|
||||
"Quick guide to supported patterns": "Kjapp innføring i godkjente mønster",
|
||||
"RAM Utilization": "RAM-utnyttelse",
|
||||
"Rescan": "Skann På Ny",
|
||||
"Rescan Interval": "Skanneintervall",
|
||||
"Restart": "Omstart",
|
||||
"Restart Needed": "Omstart Kreves",
|
||||
"Restarting": "Starter På Ny",
|
||||
"Save": "Lagre",
|
||||
"Scanning": "Skanner",
|
||||
"Select the devices to share this folder with.": "Velg enhetene du vil dele denne mappen med.",
|
||||
"Settings": "Innstillinger",
|
||||
"Share With Devices": "Del Med Enheter",
|
||||
"Shared With": "Del Med",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort kjennemerke på mappen. Må være det samme på alle enheter i en gruppe.",
|
||||
"Show ID": "Vis ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Vis i stedet for Eining ID i gruppestatus. Vil bli kringkastet til andre enheter som et valgfritt standardnavn.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Vist i stedet for Mappe-ID i gruppestatus. Vil bli oppdatert til navnet enheten kringkaster dersom tomt.",
|
||||
"Shutdown": "Slå Av",
|
||||
"Simple File Versioning": "Enkel Versjonskontroll",
|
||||
"Single level wildcard (matches within a directory only)": "Enkeltnivåsøk (søker kun i en mappe)",
|
||||
"Source Code": "Kildekode",
|
||||
"Staggered File Versioning": "Forskjøvet Versjonskontroll",
|
||||
"Start Browser": "Start Nettleser",
|
||||
"Stopped": "Stoppa",
|
||||
"Support / Forum": "Brukerstøtte / Forum",
|
||||
"Sync Protocol Listen Addresses": "Lytteadresse For Synkroniseringsprotokoll",
|
||||
"Synchronization": "Synkronisering",
|
||||
"Syncing": "Synkroniserer",
|
||||
"Syncthing has been shut down.": "Syncthing har blitt slått av.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing inkluderer helt eller delvis følgende programvare:",
|
||||
"Syncthing is restarting.": "Syncthing starter på ny.",
|
||||
"Syncthing is upgrading.": "Syncthing oppgraderer.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing ser ut til å være nede, eller så er det et problem med nettforbindelsen din. Prøver på ny …",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Samlet statistikk er åpent tilgjengelig på {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Innstillingene har blitt lagret men ikke aktivert. Syncthing må starte på ny for å aktivere de nye innstillingene.",
|
||||
"The device ID cannot be blank.": "Enhets-ID kan ikke være tom.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Enhet-IDen lagt til her kan hentes fram via \"Rediger > Vis ID\"-dialogboksen på den andre enheten. Mellomrom og bindestrek er valgfritt (blir ignorert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Kryptert informasjon om bruken av programmet blir gjort daglig. Dette blir brukt til å følge med på vanlig brukte systemoppsett, størrelser på mapper, og versjoner av programmet. Om datasettet endrer seg vil denne dialogboksen dukke opp og du vil bli bedt om å godkjenne dette.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "IDen for denne enheten er ikke godkjent. Det bør være 52 eller 56 tegn bestående av bokstaver og tall, valgfritt med mellomrom og bindestrek.",
|
||||
"The folder ID cannot be blank.": "Mappe-ID kan ikke være tom.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Mappe-IDen må være et kort kjennemerke (64 tegn eller mindre) bestående kun av bokstaver, tall og tegnene punktum (.), mellomrom (-) og strek (_).",
|
||||
"The folder ID must be unique.": "Mappe-ID må være unik.",
|
||||
"The folder path cannot be blank.": "Mappeplasseringen kan ikke være tom.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Følgende intervall blir brukt: den første timen blir en versjon lagret hvert 30. sekund, den første dagen blir en versjon lagret hver time, de første 30 dagene blir en versjon lagret hver dag, og inntil maksimal levetid blir en versjon lagret hver uke.",
|
||||
"The maximum age must be a number and cannot be blank.": "Maksimal levetid må være et tall og kan ikke være tomt.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimal tid å beholde en versjon (i dager, sett til 0 for å beholde versjoner på ubegrenset tid).",
|
||||
"The number of old versions to keep, per file.": "Antall gamle versjoner å beholde, per fil.",
|
||||
"The number of versions must be a number and cannot be blank.": "Antall versjoner må være et tall og kan ikke være tomt.",
|
||||
"The rescan interval must be at least 5 seconds.": "Skanneintervallet må være minst 5 sekund.",
|
||||
"Unknown": "Ukjent",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
"Upload Rate": "Opplastingsrate",
|
||||
"Use Compression": "Bruk Komprimering",
|
||||
"Use HTTPS for GUI": "Bruk HTTPS for GUI",
|
||||
"Version": "Versjon",
|
||||
"Versions Path": "Plassering Av Versjoner",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Versjoner blir automatisk slettet når maksimal levetid er nådd eller når antall filer er oversteget.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Merk at når en ny enhet blir lagt til må denne også legges til på andre siden.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Når en ny mappe blir lagt til, husk at Mappe-ID blir brukt til å binde sammen mapper mellom enheter. Det er forskjell på store og små bokstaver, så IDene må være identiske på alle enhetene.",
|
||||
"Yes": "Ja",
|
||||
"You must keep at least one version.": "Du må beholde minst én versjon",
|
||||
"full documentation": "all dokumentasjon",
|
||||
"items": "element"
|
||||
}
|
||||
140
gui/lang/lang-nn.json
Normal file
140
gui/lang/lang-nn.json
Normal file
@@ -0,0 +1,140 @@
|
||||
{
|
||||
"API Key": "API-nøkkel",
|
||||
"About": "Om",
|
||||
"Add Device": "Legg Til Eining",
|
||||
"Add Folder": "Legg Til Mappe",
|
||||
"Address": "Adresse",
|
||||
"Addresses": "Adresser",
|
||||
"Allow Anonymous Usage Reporting?": "Tillat Anonym Datainnsamling?",
|
||||
"Anonymous Usage Reporting": "Tillat Anonym Datainnsamling",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Einingar konfigurert på ein introduksjonseining vil òg verte lagt til denne eininga.",
|
||||
"Bugs": "Programfeil",
|
||||
"CPU Utilization": "CPU-utnytting",
|
||||
"Close": "Lukk",
|
||||
"Comment, when used at the start of a line": "Kommentar, når det vert brukt i starten av ei linje.",
|
||||
"Compression is recommended in most setups.": "Komprimering er tilrådd i dei fleste høve.",
|
||||
"Connection Error": "Tilkoplingsfeil",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright @ 2014 Jakob Borg og følgjande Bidragsytarar:",
|
||||
"Delete": "Slett",
|
||||
"Device ID": "Eining ID",
|
||||
"Device Identification": "Einingskjennemerkje",
|
||||
"Device Name": "Namn På Eining",
|
||||
"Disconnected": "Fråkopla",
|
||||
"Documentation": "Dokumentasjon",
|
||||
"Download Rate": "Nedlastingsrate",
|
||||
"Edit": "Rediger",
|
||||
"Edit Device": "Rediger Eining",
|
||||
"Edit Folder": "Rediger Mappe",
|
||||
"Editing": "Redigerer",
|
||||
"Enable UPnP": "Aktiver UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Skriv inn \"ip:port\"-adresser med komma mellom kvar adresse, eller \"dynamisk\" for å automatisk søkja opp adressa.",
|
||||
"Enter ignore patterns, one per line.": "Skriv inn mønster som skal utelatast, eitt per linje.",
|
||||
"Error": "Feilmelding",
|
||||
"File Versioning": "Versjonskontroll",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Ignorer bit som styrer filtilgang når ein ser etter endringar. Bruk på FAT-filsystem.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Filer vert flytta til tidsstempla versjonar i ei .stversions-mappe når dei vert erstatta eller sletta av syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Filer er beskytta mot endringar gjort på andre einingar, men endringar gjort på denne eininga vert sendt til resten av gruppa.",
|
||||
"Folder ID": "Mappe ID",
|
||||
"Folder Master": "Styrande Mappe",
|
||||
"Folder Path": "Mappeplassering",
|
||||
"GUI Authentication Password": "GUI Passord",
|
||||
"GUI Authentication User": "GUI Brukar",
|
||||
"GUI Listen Addresses": "GUI Lytteadresse",
|
||||
"Generate": "Generer",
|
||||
"Global Discovery": "Global Søking",
|
||||
"Global Discovery Server": "Global Søkjemotor",
|
||||
"Global State": "Global Tilstand",
|
||||
"Idle": "Pause",
|
||||
"Ignore Patterns": "Utelatingsmønster",
|
||||
"Ignore Permissions": "Ignorer Tilgangsbit",
|
||||
"Incoming Rate Limit (KiB/s)": "Innkomande Hastigheitsgrense (KiB/s)",
|
||||
"Introducer": "Introduktør",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Invers av den gitte tilstanden (t.d. ikkje ekskluder)",
|
||||
"Keep Versions": "Behald Versjonar",
|
||||
"Last seen": "Sist sett",
|
||||
"Latest Release": "Nyaste Versjon",
|
||||
"Local Discovery": "Lokal Søking",
|
||||
"Local State": "Lokal Tilstand",
|
||||
"Maximum Age": "Maksimal Levetid",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Multinivåsøk (søkjer på fleire mappenivå)",
|
||||
"Never": "Aldri",
|
||||
"No": "Nei",
|
||||
"No File Versioning": "Ingen Versjonskontroll",
|
||||
"Notice": "Notis",
|
||||
"OK": "OK",
|
||||
"Offline": "Fråkopla",
|
||||
"Online": "Tilkopla",
|
||||
"Out Of Sync": "Ikkje Synkronisert",
|
||||
"Outgoing Rate Limit (KiB/s)": "Utgåande Hastigheitsgrense (KiB/s)",
|
||||
"Override Changes": "Overstyr Endringar",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappa på datamaskinen. Vert oppretta om han ikkje finst. Krøllstrekteiknet (~) kan brukast som forkorting for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Plasseringa for lagra versjonar (la denne vera tom for å bruka standard .stversions-mappa i mappa).",
|
||||
"Please wait": "Ver venleg og vent",
|
||||
"Preview": "Førehandsvisning",
|
||||
"Preview Usage Report": "Førehandsvis Datainnsamling",
|
||||
"Quick guide to supported patterns": "Kjapp innføring i godkjente mønster",
|
||||
"RAM Utilization": "RAM-utnytting",
|
||||
"Rescan": "Skann På Ny",
|
||||
"Rescan Interval": "Skanneintervall",
|
||||
"Restart": "Omstart",
|
||||
"Restart Needed": "Omstart Trengs",
|
||||
"Restarting": "Startar På Ny",
|
||||
"Save": "Lagre",
|
||||
"Scanning": "Skannar",
|
||||
"Select the devices to share this folder with.": "Vel einingane du vil dela denne mappa med.",
|
||||
"Settings": "Innstillingar",
|
||||
"Share With Devices": "Del Med Einingar",
|
||||
"Shared With": "Delt Med",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort kjennemerke på mappa. Må vera det same på alle einingar i ei gruppe.",
|
||||
"Show ID": "Vis ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Vis i staden for Eining ID i gruppestatus. Vil verta kringkasta til andre einingar som eit valfritt standardnamn.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Vist i staden for Mappe-ID i gruppestatus. Vil verta oppdatert til namnet eininga kringkastar dersom tomt.",
|
||||
"Shutdown": "Slå Av",
|
||||
"Simple File Versioning": "Enkel Versjonskontroll",
|
||||
"Single level wildcard (matches within a directory only)": "Enkeltnivåsøk (søkjer kun i ei mappe)",
|
||||
"Source Code": "Kildekode",
|
||||
"Staggered File Versioning": "Forskyvd Versjonskontroll",
|
||||
"Start Browser": "Start Nettlesar",
|
||||
"Stopped": "Stoppa",
|
||||
"Support / Forum": "Brukarstøtte / Forum",
|
||||
"Sync Protocol Listen Addresses": "Lytteadresse For Synkroniseringsprotokoll",
|
||||
"Synchronization": "Synkronisering",
|
||||
"Syncing": "Synkroniserer",
|
||||
"Syncthing has been shut down.": "Syncthing har blitt slått av.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing inkluderer føljande programvare heilt eller delvis:",
|
||||
"Syncthing is restarting.": "Syncthing startar på ny.",
|
||||
"Syncthing is upgrading.": "Syncthing oppgraderer.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing ser ut til å vera nede, eller så er det eit problem med nettilkoplinga di. Prøvar på ny …",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Samla statistikk er opent tilgjengeleg på {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Instillingane har blitt lagra men ikkje aktivert. Syncthing må starta på ny for å aktivera dei nye instillingane.",
|
||||
"The device ID cannot be blank.": "Eining ID kan ikkje vera tom.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Eining-IDen lagt til her kan hentast fram via \"Rediger > Vis ID\"-dialogboksen på den andre eininga. Mellomrom og bindestrek er valfritt (vert ignorert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Kryptert informasjon om bruken av programmet vert gjort dagleg. Dette vert brukt til å følgja med på vanleg brukte systemoppsett, storleikar på mapper, og versjonar av programmet. Om datasettet endrar seg vil denne dialogboksen dukka opp og du vil bli bedt om å godkjenna dette.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "IDen for denne eininga er ikkje godkjend. Det bør vera 52 eller 56 teikn samansett av bokstavar og tal, valfritt med mellomrom og bindestrek.",
|
||||
"The folder ID cannot be blank.": "Mappe ID kan ikkje vera tom.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Mappe-IDen må vera eit kort kjennemerke (64 teikn eller mindre) samansett kun av bokstavar, tal og teikna punktum (.), mellomrom (-) og strek (_).",
|
||||
"The folder ID must be unique.": "Mappe ID må vera unik.",
|
||||
"The folder path cannot be blank.": "Mappeplasseringa kan ikkje vera tom.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Fylgjande intervall vert brukt: den fyrste timen vert ein versjon lagra kvart 30. sekund, den fyrste dagen vert ein versjon lagra kvar time, dei fyrste 30 dagane vert ein versjon lagra kvar dag, og inntil høgaste levetid vert ein versjon lagra kvar uke.",
|
||||
"The maximum age must be a number and cannot be blank.": "Maksimal levetid må vera eit tal og kan ikkje vera blankt.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimalt tidsrom å behalda ein versjon (i dagar, set til 0 for å behalda versjonar for ubegrensa tid.)",
|
||||
"The number of old versions to keep, per file.": "Tal på gamle versjonar ein skal behalda, per fil.",
|
||||
"The number of versions must be a number and cannot be blank.": "Tal på versjonar må vera eit tal og kan ikkje vera tomt.",
|
||||
"The rescan interval must be at least 5 seconds.": "Skanneintervallet må vera minst 5 sekund.",
|
||||
"Unknown": "Ukjent",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
"Upload Rate": "Opplastingsrate",
|
||||
"Use Compression": "Bruk Komprimering",
|
||||
"Use HTTPS for GUI": "Bruk HTTPS for GUI",
|
||||
"Version": "Versjon",
|
||||
"Versions Path": "Plassering Av Versjonar",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Versjonar vert automatisk sletta når maksimal levetid er nådd eller når det tillatte talet på filer er overstige.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Merk at når ein ny eining vert lagt til må denne òg leggast til på andre sida.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Når ei ny mappe vert lagt til, hugs at Mappe-ID vert brukt til å binda saman mapper mellom einingar. Det er skilnad på store og små bokstavar, så IDane må vera identiske på alle einingane.",
|
||||
"Yes": "Ja",
|
||||
"You must keep at least one version.": "Du må behalda minst ein versjon.",
|
||||
"full documentation": "all dokumentasjon",
|
||||
"items": "element"
|
||||
}
|
||||
@@ -39,7 +39,7 @@
|
||||
"Folder Path": "Ścieżka folderu",
|
||||
"GUI Authentication Password": "Hasło",
|
||||
"GUI Authentication User": "Użytkownik",
|
||||
"GUI Listen Addresses": "Adres nasłuchu",
|
||||
"GUI Listen Addresses": "Adres nasłuchiwania",
|
||||
"Generate": "Generuj",
|
||||
"Global Discovery": "Globalne odnajdywanie",
|
||||
"Global Discovery Server": "Globalny serwer rozgłoszeniowy",
|
||||
@@ -82,7 +82,7 @@
|
||||
"Save": "Zapisz",
|
||||
"Scanning": "Skanowanie",
|
||||
"Select the devices to share this folder with.": "Wybierz urządzenie, któremu udostępnić folder.",
|
||||
"Settings": "Ustrawienia",
|
||||
"Settings": "Ustawienia",
|
||||
"Share With Devices": "Udostępnij dla urządzenia",
|
||||
"Shared With": "Współdzielony z",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Krótki identyfikator folderu. Musi być taki sam na wszystkich urządzeniach.",
|
||||
@@ -136,5 +136,5 @@
|
||||
"Yes": "Tak",
|
||||
"You must keep at least one version.": "Musisz posiadać przynajmniej jedną wersję",
|
||||
"full documentation": "pełna dokumentacja",
|
||||
"items": "pozycji."
|
||||
"items": "pozycji"
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
var validLangs = ["bg","de","en","fr","hu","it","lt","pl","pt-PT","sv","zh-CN","zh-TW"]
|
||||
var validLangs = ["be","bg","de","en","fr","hu","it","lt","nb","nn","pl","pt-PT","sv","zh-CN","zh-TW"]
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -203,7 +203,7 @@ func (d *Discoverer) announcementPkt() []byte {
|
||||
Magic: AnnouncementMagic,
|
||||
This: Device{d.myID[:], addrs},
|
||||
}
|
||||
return pkt.MarshalXDR()
|
||||
return pkt.MustMarshalXDR()
|
||||
}
|
||||
|
||||
func (d *Discoverer) sendLocalAnnouncements() {
|
||||
@@ -213,7 +213,7 @@ func (d *Discoverer) sendLocalAnnouncements() {
|
||||
Magic: AnnouncementMagic,
|
||||
This: Device{d.myID[:], addrs},
|
||||
}
|
||||
msg := pkt.MarshalXDR()
|
||||
msg := pkt.MustMarshalXDR()
|
||||
|
||||
for {
|
||||
if d.multicastBeacon != nil {
|
||||
@@ -253,7 +253,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
Magic: AnnouncementMagic,
|
||||
This: Device{d.myID[:], []Address{{Port: d.extPort}}},
|
||||
}
|
||||
buf = pkt.MarshalXDR()
|
||||
buf = pkt.MustMarshalXDR()
|
||||
} else {
|
||||
buf = d.announcementPkt()
|
||||
}
|
||||
@@ -425,7 +425,7 @@ func (d *Discoverer) externalLookup(device protocol.DeviceID) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
buf := Query{QueryMagic, device[:]}.MarshalXDR()
|
||||
buf := Query{QueryMagic, device[:]}.MustMarshalXDR()
|
||||
_, err = conn.Write(buf)
|
||||
if err != nil {
|
||||
if debug {
|
||||
|
||||
@@ -40,21 +40,29 @@ func (o Query) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Query) MarshalXDR() []byte {
|
||||
func (o Query) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Query) AppendXDR(bs []byte) []byte {
|
||||
func (o Query) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Query) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
if len(o.DeviceID) > 32 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.DeviceID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("DeviceID", l, 32)
|
||||
}
|
||||
xw.WriteBytes(o.DeviceID)
|
||||
return xw.Tot(), xw.Error()
|
||||
@@ -109,15 +117,23 @@ func (o Announce) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Announce) MarshalXDR() []byte {
|
||||
func (o Announce) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Announce) AppendXDR(bs []byte) []byte {
|
||||
func (o Announce) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Announce) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Announce) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -126,8 +142,8 @@ func (o Announce) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
if len(o.Extra) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Extra); l > 16 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Extra", l, 16)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Extra)))
|
||||
for i := range o.Extra {
|
||||
@@ -155,7 +171,7 @@ func (o *Announce) decodeXDR(xr *xdr.Reader) error {
|
||||
(&o.This).decodeXDR(xr)
|
||||
_ExtraSize := int(xr.ReadUint32())
|
||||
if _ExtraSize > 16 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("Extra", _ExtraSize, 16)
|
||||
}
|
||||
o.Extra = make([]Device, _ExtraSize)
|
||||
for i := range o.Extra {
|
||||
@@ -197,24 +213,32 @@ func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Device) MarshalXDR() []byte {
|
||||
func (o Device) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Device) AppendXDR(bs []byte) []byte {
|
||||
func (o Device) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Device) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.ID) > 32 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.ID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
|
||||
}
|
||||
xw.WriteBytes(o.ID)
|
||||
if len(o.Addresses) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Addresses); l > 16 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Addresses", l, 16)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Addresses)))
|
||||
for i := range o.Addresses {
|
||||
@@ -241,7 +265,7 @@ func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
_AddressesSize := int(xr.ReadUint32())
|
||||
if _AddressesSize > 16 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("Addresses", _AddressesSize, 16)
|
||||
}
|
||||
o.Addresses = make([]Address, _AddressesSize)
|
||||
for i := range o.Addresses {
|
||||
@@ -279,20 +303,28 @@ func (o Address) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Address) MarshalXDR() []byte {
|
||||
func (o Address) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Address) AppendXDR(bs []byte) []byte {
|
||||
func (o Address) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Address) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Address) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.IP) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.IP); l > 16 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("IP", l, 16)
|
||||
}
|
||||
xw.WriteBytes(o.IP)
|
||||
xw.WriteUint16(o.Port)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
1
internal/files/.gitignore
vendored
Normal file
1
internal/files/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
testdata/*.db
|
||||
@@ -90,6 +90,17 @@ func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// Discard block map state, removing the given files
|
||||
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
||||
batch := new(leveldb.Batch)
|
||||
for _, file := range files {
|
||||
for _, block := range file.Blocks {
|
||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
||||
}
|
||||
}
|
||||
return m.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// Drop block map, removing all entries related to this block map from the db.
|
||||
func (m *BlockMap) Drop() error {
|
||||
batch := new(leveldb.Batch)
|
||||
@@ -168,6 +179,18 @@ func (f *BlockFinder) Iterate(hash []byte, iterFn func(string, string, uint32) b
|
||||
return false
|
||||
}
|
||||
|
||||
// A method for repairing incorrect blockmap entries, removes the old entry
|
||||
// and replaces it with a new entry for the given block
|
||||
func (f *BlockFinder) Fix(folder, file string, index uint32, oldHash, newHash []byte) error {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(index))
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
batch.Delete(toBlockKey(oldHash, folder, file))
|
||||
batch.Put(toBlockKey(newHash, folder, file), buf)
|
||||
return f.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// m.blockKey returns a byte slice encoding the following information:
|
||||
// keyTypeBlock (1 byte)
|
||||
// folder (64 bytes)
|
||||
|
||||
@@ -173,7 +173,7 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||
f3.Flags = 0
|
||||
}
|
||||
|
||||
func TestBlockMapFinderLookup(t *testing.T) {
|
||||
func TestBlockFinderLookup(t *testing.T) {
|
||||
db, f := setup()
|
||||
|
||||
m1 := NewBlockMap(db, "folder1")
|
||||
@@ -232,4 +232,37 @@ func TestBlockMapFinderLookup(t *testing.T) {
|
||||
if counter != 1 {
|
||||
t.Fatal("Incorrect count")
|
||||
}
|
||||
|
||||
f1.Flags = 0
|
||||
}
|
||||
|
||||
func TestBlockFinderFix(t *testing.T) {
|
||||
db, f := setup()
|
||||
|
||||
iterFn := func(folder, file string, index uint32) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
m := NewBlockMap(db, "folder1")
|
||||
err := m.Add([]protocol.FileInfo{f1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !f.Iterate(f1.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Block not found")
|
||||
}
|
||||
|
||||
err = f.Fix("folder1", f1.Name, 0, f1.Blocks[0].Hash, f2.Blocks[0].Hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if f.Iterate(f1.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Unexpected block")
|
||||
}
|
||||
|
||||
if !f.Iterate(f2.Blocks[0].Hash, iterFn) {
|
||||
t.Fatal("Block not found")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package files_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -10,63 +9,40 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var items map[string][]byte
|
||||
var keys map[string]string
|
||||
var keys [][]byte
|
||||
|
||||
func init() {
|
||||
for i := 0; i < nItems; i++ {
|
||||
keys = append(keys, randomData(1))
|
||||
}
|
||||
}
|
||||
|
||||
const nItems = 10000
|
||||
|
||||
func setupMaps() {
|
||||
|
||||
// Set up two simple maps, one "key" => data and one "indirect key" =>
|
||||
// "key".
|
||||
|
||||
items = make(map[string][]byte, nItems)
|
||||
keys = make(map[string]string, nItems)
|
||||
|
||||
for i := 0; i < nItems; i++ {
|
||||
k1 := fmt.Sprintf("key%d", i)
|
||||
data := make([]byte, 87)
|
||||
_, err := rand.Reader.Read(data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
items[k1] = data
|
||||
|
||||
k2 := fmt.Sprintf("indirect%d", i)
|
||||
keys[k2] = k1
|
||||
func randomData(prefix byte) []byte {
|
||||
data := make([]byte, 1+32+64+32)
|
||||
_, err := rand.Reader.Read(data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeK1(s string) []byte {
|
||||
k1 := make([]byte, 1+len(s))
|
||||
k1[0] = 1
|
||||
copy(k1[1:], []byte(s))
|
||||
return k1
|
||||
}
|
||||
|
||||
func makeK2(s string) []byte {
|
||||
k2 := make([]byte, 1+len(s))
|
||||
k2[0] = 2 // Only difference from makeK1
|
||||
copy(k2[1:], []byte(s))
|
||||
return k2
|
||||
return append([]byte{prefix}, data...)
|
||||
}
|
||||
|
||||
func setItems(db *leveldb.DB) error {
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
batch := new(leveldb.Batch)
|
||||
for _, k1 := range keys {
|
||||
k2 := randomData(2)
|
||||
// k2 -> data
|
||||
batch.Put(k2, randomData(42))
|
||||
// k1 -> k2
|
||||
batch.Put(k1, k2)
|
||||
}
|
||||
defer snap.Release()
|
||||
|
||||
batch := &leveldb.Batch{}
|
||||
for k2, k1 := range keys {
|
||||
// Create k1 => item mapping first
|
||||
batch.Put(makeK1(k1), items[k1])
|
||||
// Then the k2 => k1 mapping
|
||||
batch.Put(makeK2(k2), makeK1(k1))
|
||||
if testing.Verbose() {
|
||||
log.Printf("batch write (set) %p", batch)
|
||||
}
|
||||
return db.Write(batch, nil)
|
||||
}
|
||||
@@ -78,77 +54,100 @@ func clearItems(db *leveldb.DB) error {
|
||||
}
|
||||
defer snap.Release()
|
||||
|
||||
// Iterate from the start of k2 space to the end
|
||||
it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil)
|
||||
// Iterate over k2
|
||||
|
||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
defer it.Release()
|
||||
|
||||
batch := &leveldb.Batch{}
|
||||
batch := new(leveldb.Batch)
|
||||
for it.Next() {
|
||||
k2 := it.Key()
|
||||
k1 := it.Value()
|
||||
k1 := it.Key()
|
||||
k2 := it.Value()
|
||||
|
||||
// k1 should exist
|
||||
_, err := snap.Get(k1, nil)
|
||||
// k2 should exist
|
||||
_, err := snap.Get(k2, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the k2 => k1 mapping first
|
||||
batch.Delete(k2)
|
||||
// Then the k1 => key mapping
|
||||
// Delete the k1 => k2 mapping first
|
||||
batch.Delete(k1)
|
||||
// Then the k2 => data mapping
|
||||
batch.Delete(k2)
|
||||
}
|
||||
if testing.Verbose() {
|
||||
log.Printf("batch write (clear) %p", batch)
|
||||
}
|
||||
return db.Write(batch, nil)
|
||||
}
|
||||
|
||||
func scanItems(db *leveldb.DB) error {
|
||||
snap, err := db.GetSnapshot()
|
||||
if testing.Verbose() {
|
||||
log.Printf("snap create %p", snap)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer snap.Release()
|
||||
defer func() {
|
||||
if testing.Verbose() {
|
||||
log.Printf("snap release %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
// Iterate from the start of k2 space to the end
|
||||
it := snap.NewIterator(&util.Range{Start: []byte{2}, Limit: []byte{2, 0xff, 0xff, 0xff, 0xff}}, nil)
|
||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
defer it.Release()
|
||||
|
||||
i := 0
|
||||
for it.Next() {
|
||||
// k2 => k1 => data
|
||||
k2 := it.Key()
|
||||
k1 := it.Value()
|
||||
_, err := snap.Get(k1, nil)
|
||||
k1 := it.Key()
|
||||
k2 := it.Value()
|
||||
_, err := snap.Get(k2, nil)
|
||||
if err != nil {
|
||||
log.Printf("k1: %q (%x)", k1, k1)
|
||||
log.Printf("k2: %q (%x)", k2, k2)
|
||||
log.Printf("k1: %x", k1)
|
||||
log.Printf("k2: %x (missing)", k2)
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
if testing.Verbose() {
|
||||
log.Println("scanned", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConcurrent(t *testing.T) {
|
||||
setupMaps()
|
||||
func TestConcurrentSetClear(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
dur := 2 * time.Second
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
os.RemoveAll("testdata/global.db")
|
||||
db, err := leveldb.OpenFile("testdata/global.db", nil)
|
||||
os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{CachedOpenFiles: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata/global.db")
|
||||
defer os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := setItems(db); err != nil {
|
||||
t.Fatal(err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if err := clearItems(db); err != nil {
|
||||
t.Fatal(err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -158,11 +157,71 @@ func TestConcurrent(t *testing.T) {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := scanItems(db); err != nil {
|
||||
t.Fatal(err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
go func() {
|
||||
wg.Wait()
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func TestConcurrentSetOnly(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{CachedOpenFiles: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := setItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for time.Since(t0) < dur {
|
||||
if err := scanItems(db); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,10 +19,11 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all"
|
||||
debugDB = strings.Contains(os.Getenv("STTRACE"), "db") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
|
||||
@@ -174,11 +174,23 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
if debugDB {
|
||||
l.Debugf("new batch %p", batch)
|
||||
}
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -204,12 +216,15 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
|
||||
cmp := bytes.Compare(newName, oldName)
|
||||
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
|
||||
}
|
||||
|
||||
switch {
|
||||
case moreFs && (!moreDb || cmp == -1):
|
||||
if debugDB {
|
||||
l.Debugln("generic replace; missing - insert")
|
||||
}
|
||||
// Database is missing this file. Insert it.
|
||||
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
|
||||
maxLocalVer = lv
|
||||
@@ -225,10 +240,16 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
// File exists on both sides - compare versions. We might get an
|
||||
// update with the same version and different flags if a device has
|
||||
// marked a file as invalid, so handle that too.
|
||||
if debugDB {
|
||||
l.Debugln("generic replace; exists - compare")
|
||||
}
|
||||
var ef protocol.FileInfoTruncated
|
||||
ef.UnmarshalXDR(dbi.Value())
|
||||
if fs[fsi].Version > ef.Version ||
|
||||
(fs[fsi].Version == ef.Version && fs[fsi].Flags != ef.Flags) {
|
||||
if debugDB {
|
||||
l.Debugln("generic replace; differs - insert")
|
||||
}
|
||||
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
|
||||
maxLocalVer = lv
|
||||
}
|
||||
@@ -237,11 +258,17 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
} else {
|
||||
ldbUpdateGlobal(snap, batch, folder, device, newName, fs[fsi].Version)
|
||||
}
|
||||
} else if debugDB {
|
||||
l.Debugln("generic replace; equal - ignore")
|
||||
}
|
||||
|
||||
fsi++
|
||||
moreDb = dbi.Next()
|
||||
|
||||
case moreDb && (!moreFs || cmp == 1):
|
||||
if debugDB {
|
||||
l.Debugln("generic replace; exists - remove")
|
||||
}
|
||||
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
|
||||
maxLocalVer = lv
|
||||
}
|
||||
@@ -249,6 +276,9 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
}
|
||||
}
|
||||
|
||||
if debugDB {
|
||||
l.Debugf("db.Write %p", batch)
|
||||
}
|
||||
err = db.Write(batch, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -261,10 +291,13 @@ func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) u
|
||||
// TODO: Return the remaining maxLocalVer?
|
||||
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) uint64 {
|
||||
// Database has a file that we are missing. Remove it.
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
|
||||
}
|
||||
ldbRemoveFromGlobal(db, batch, folder, device, name)
|
||||
if debugDB {
|
||||
l.Debugf("batch.Delete %p %x", batch, dbi.Key())
|
||||
}
|
||||
batch.Delete(dbi.Key())
|
||||
return 0
|
||||
})
|
||||
@@ -278,7 +311,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.F
|
||||
panic(err)
|
||||
}
|
||||
if !tf.IsDeleted() {
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("mark deleted; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
|
||||
}
|
||||
ts := clock(tf.LocalVersion)
|
||||
@@ -289,7 +322,11 @@ func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.F
|
||||
Flags: tf.Flags | protocol.FlagDeleted,
|
||||
Modified: tf.Modified,
|
||||
}
|
||||
batch.Put(dbi.Key(), f.MarshalXDR())
|
||||
bs, _ := f.MarshalXDR()
|
||||
if debugDB {
|
||||
l.Debugf("batch.Put %p %x", batch, dbi.Key())
|
||||
}
|
||||
batch.Put(dbi.Key(), bs)
|
||||
ldbUpdateGlobal(db, batch, folder, device, deviceKeyName(dbi.Key()), f.Version)
|
||||
return ts
|
||||
}
|
||||
@@ -301,16 +338,30 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) ui
|
||||
runtime.GC()
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
if debugDB {
|
||||
l.Debugf("new batch %p", batch)
|
||||
}
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
var maxLocalVer uint64
|
||||
for _, f := range fs {
|
||||
name := []byte(f.Name)
|
||||
fk := deviceKey(folder, device, name)
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, fk)
|
||||
}
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
|
||||
@@ -343,6 +394,9 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) ui
|
||||
}
|
||||
}
|
||||
|
||||
if debugDB {
|
||||
l.Debugf("db.Write %p", batch)
|
||||
}
|
||||
err = db.Write(batch, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -352,7 +406,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) ui
|
||||
}
|
||||
|
||||
func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) uint64 {
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
|
||||
}
|
||||
|
||||
@@ -362,7 +416,10 @@ func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) ui
|
||||
|
||||
name := []byte(file.Name)
|
||||
nk := deviceKey(folder, device, name)
|
||||
batch.Put(nk, file.MarshalXDR())
|
||||
if debugDB {
|
||||
l.Debugf("batch.Put %p %x", batch, nk)
|
||||
}
|
||||
batch.Put(nk, file.MustMarshalXDR())
|
||||
|
||||
return file.LocalVersion
|
||||
}
|
||||
@@ -371,7 +428,7 @@ func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) ui
|
||||
// file. If the device is already present in the list, the version is updated.
|
||||
// If the file does not have an entry in the global list, it is created.
|
||||
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version uint64) bool {
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file, version)
|
||||
}
|
||||
gk := globalKey(folder, file)
|
||||
@@ -416,7 +473,11 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, v
|
||||
fl.versions = append(fl.versions, nv)
|
||||
|
||||
done:
|
||||
batch.Put(gk, fl.MarshalXDR())
|
||||
if debugDB {
|
||||
l.Debugf("batch.Put %p %x", batch, gk)
|
||||
l.Debugf("new global after update: %v", fl)
|
||||
}
|
||||
batch.Put(gk, fl.MustMarshalXDR())
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -425,7 +486,7 @@ done:
|
||||
// given file. If the version list is empty after this, the file entry is
|
||||
// removed entirely.
|
||||
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) {
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
|
||||
}
|
||||
|
||||
@@ -451,9 +512,16 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byt
|
||||
}
|
||||
|
||||
if len(fl.versions) == 0 {
|
||||
if debugDB {
|
||||
l.Debugf("batch.Delete %p %x", batch, gk)
|
||||
}
|
||||
batch.Delete(gk)
|
||||
} else {
|
||||
batch.Put(gk, fl.MarshalXDR())
|
||||
if debugDB {
|
||||
l.Debugf("batch.Put %p %x", batch, gk)
|
||||
l.Debugf("new global after remove: %v", fl)
|
||||
}
|
||||
batch.Put(gk, fl.MustMarshalXDR())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -464,7 +532,16 @@ func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIt
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -488,7 +565,16 @@ func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []b
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -529,8 +615,19 @@ func ldbGetGlobal(db *leveldb.DB, folder, file []byte) protocol.FileInfo {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, k)
|
||||
}
|
||||
bs, err := snap.Get(k, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return protocol.FileInfo{}
|
||||
@@ -550,6 +647,9 @@ func ldbGetGlobal(db *leveldb.DB, folder, file []byte) protocol.FileInfo {
|
||||
}
|
||||
|
||||
k = deviceKey(folder, vl.versions[0].device, file)
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, k)
|
||||
}
|
||||
bs, err = snap.Get(k, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -572,7 +672,16 @@ func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -588,13 +697,18 @@ func ldbWithGlobal(db *leveldb.DB, folder []byte, truncate bool, fn fileIterator
|
||||
}
|
||||
name := globalKeyName(dbi.Key())
|
||||
fk := deviceKey(folder, vl.versions[0].device, name)
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, fk)
|
||||
}
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err != nil {
|
||||
l.Debugf("folder: %q (%x)", folder, folder)
|
||||
l.Debugf("key: %q (%x)", dbi.Key(), dbi.Key())
|
||||
l.Debugf("vl: %v", vl)
|
||||
l.Debugf("vl.versions[0].device: %x", vl.versions[0].device)
|
||||
l.Debugf("name: %q (%x)", name, name)
|
||||
l.Debugf("fk: %q (%x)", fk, fk)
|
||||
l.Debugf("fk: %q", fk)
|
||||
l.Debugf("fk: %x %x %x", fk[1:1+64], fk[1+64:1+64+32], fk[1+64+32:])
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -646,7 +760,16 @@ func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn fileIt
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -684,6 +807,9 @@ outer:
|
||||
continue outer
|
||||
}
|
||||
fk := deviceKey(folder, vl.versions[i].device, name)
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, fk)
|
||||
}
|
||||
bs, err := snap.Get(fk, nil)
|
||||
if err != nil {
|
||||
var id protocol.DeviceID
|
||||
@@ -713,7 +839,7 @@ outer:
|
||||
continue outer
|
||||
}
|
||||
|
||||
if debug {
|
||||
if debugDB {
|
||||
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version)
|
||||
}
|
||||
|
||||
@@ -737,7 +863,16 @@ func ldbListFolders(db *leveldb.DB) []string {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
@@ -765,7 +900,15 @@ func ldbDropFolder(db *leveldb.DB, folder []byte) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer snap.Release()
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
// Remove all items related to the given folder from the device->file bucket
|
||||
start := []byte{keyTypeDevice}
|
||||
@@ -803,3 +946,70 @@ func unmarshalTrunc(bs []byte, truncate bool) (protocol.FileIntf, error) {
|
||||
return tf, err
|
||||
}
|
||||
}
|
||||
|
||||
func ldbCheckGlobals(db *leveldb.DB, folder []byte) {
|
||||
defer runtime.GC()
|
||||
|
||||
snap, err := db.GetSnapshot()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if debugDB {
|
||||
l.Debugf("created snapshot %p", snap)
|
||||
}
|
||||
defer func() {
|
||||
if debugDB {
|
||||
l.Debugf("close snapshot %p", snap)
|
||||
}
|
||||
snap.Release()
|
||||
}()
|
||||
|
||||
start := globalKey(folder, nil)
|
||||
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
batch := new(leveldb.Batch)
|
||||
if debugDB {
|
||||
l.Debugf("new batch %p", batch)
|
||||
}
|
||||
for dbi.Next() {
|
||||
gk := dbi.Key()
|
||||
var vl versionList
|
||||
err := vl.UnmarshalXDR(dbi.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Check the global version list for consistency. An issue in previous
|
||||
// versions of goleveldb could result in reordered writes so that
|
||||
// there are global entries pointing to no longer existing files. Here
|
||||
// we find those and clear them out.
|
||||
|
||||
name := globalKeyName(gk)
|
||||
var newVL versionList
|
||||
for _, version := range vl.versions {
|
||||
fk := deviceKey(folder, version.device, name)
|
||||
if debugDB {
|
||||
l.Debugf("snap.Get %p %x", snap, fk)
|
||||
}
|
||||
_, err := snap.Get(fk, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newVL.versions = append(newVL.versions, version)
|
||||
}
|
||||
|
||||
if len(newVL.versions) != len(vl.versions) {
|
||||
l.Infof("db repair: rewriting global version list for %x %x", gk[1:1+64], gk[1+64:])
|
||||
batch.Put(dbi.Key(), newVL.MustMarshalXDR())
|
||||
}
|
||||
}
|
||||
if debugDB {
|
||||
l.Infoln("db check completed for %q", folder)
|
||||
}
|
||||
db.Write(batch, nil)
|
||||
}
|
||||
|
||||
@@ -42,15 +42,23 @@ func (o fileVersion) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o fileVersion) MarshalXDR() []byte {
|
||||
func (o fileVersion) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o fileVersion) AppendXDR(bs []byte) []byte {
|
||||
func (o fileVersion) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o fileVersion) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -102,15 +110,23 @@ func (o versionList) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o versionList) MarshalXDR() []byte {
|
||||
func (o versionList) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o versionList) AppendXDR(bs []byte) []byte {
|
||||
func (o versionList) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o versionList) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o versionList) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
@@ -53,6 +53,8 @@ func NewSet(folder string, db *leveldb.DB) *Set {
|
||||
blockmap: NewBlockMap(db, folder),
|
||||
}
|
||||
|
||||
ldbCheckGlobals(db, []byte(folder))
|
||||
|
||||
var deviceID protocol.DeviceID
|
||||
ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f protocol.FileInfoTruncated) bool {
|
||||
copy(deviceID[:], device)
|
||||
@@ -111,12 +113,22 @@ func (s *Set) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if device == protocol.LocalDeviceID {
|
||||
discards := make([]protocol.FileInfo, 0, len(fs))
|
||||
updates := make([]protocol.FileInfo, 0, len(fs))
|
||||
for _, newFile := range fs {
|
||||
existingFile := ldbGet(s.db, []byte(s.folder), device[:], []byte(newFile.Name))
|
||||
if existingFile.Version <= newFile.Version {
|
||||
discards = append(discards, existingFile)
|
||||
updates = append(updates, newFile)
|
||||
}
|
||||
}
|
||||
s.blockmap.Discard(discards)
|
||||
s.blockmap.Update(updates)
|
||||
}
|
||||
if lv := ldbUpdate(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
|
||||
s.localVersion[device] = lv
|
||||
}
|
||||
if device == protocol.LocalDeviceID {
|
||||
s.blockmap.Update(fs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Set) WithNeed(device protocol.DeviceID, fn fileIterator) {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -733,14 +733,13 @@ func (m *Model) ConnectedTo(deviceID protocol.DeviceID) bool {
|
||||
func (m *Model) GetIgnores(folder string) ([]string, error) {
|
||||
var lines []string
|
||||
|
||||
m.fmut.RLock()
|
||||
cfg, ok := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return lines, fmt.Errorf("Folder %s does not exist", folder)
|
||||
}
|
||||
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
|
||||
fd, err := os.Open(filepath.Join(cfg.Path, ".stignore"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@@ -1239,10 +1238,9 @@ func (m *Model) Override(folder string) {
|
||||
// This is guaranteed to increment if the contents of the local folder has
|
||||
// changed.
|
||||
func (m *Model) CurrentLocalVersion(folder string) uint64 {
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
|
||||
m.fmut.RLock()
|
||||
fs, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
// The folder might not exist, since this can be called with a user
|
||||
// specified folder name from the REST interface.
|
||||
@@ -1256,12 +1254,14 @@ func (m *Model) CurrentLocalVersion(folder string) uint64 {
|
||||
// sent by remote peers. This is guaranteed to increment if the contents of
|
||||
// the remote or global folder has changed.
|
||||
func (m *Model) RemoteLocalVersion(folder string) uint64 {
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
m.fmut.RLock()
|
||||
defer m.fmut.RUnlock()
|
||||
|
||||
fs, ok := m.folderFiles[folder]
|
||||
if !ok {
|
||||
panic("bug: LocalVersion called for nonexistent folder " + folder)
|
||||
// The folder might not exist, since this can be called with a user
|
||||
// specified folder name from the REST interface.
|
||||
return 0
|
||||
}
|
||||
|
||||
var ver uint64
|
||||
@@ -1273,15 +1273,26 @@ func (m *Model) RemoteLocalVersion(folder string) uint64 {
|
||||
}
|
||||
|
||||
func (m *Model) availability(folder string, file string) []protocol.DeviceID {
|
||||
m.fmut.Lock()
|
||||
defer m.fmut.Unlock()
|
||||
// Acquire this lock first, as the value returned from foldersFiles can
|
||||
// gen heavily modified on Close()
|
||||
m.pmut.RLock()
|
||||
defer m.pmut.RUnlock()
|
||||
|
||||
m.fmut.RLock()
|
||||
fs, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fs.Availability(file)
|
||||
availableDevices := []protocol.DeviceID{}
|
||||
for _, device := range fs.Availability(file) {
|
||||
_, ok := m.protoConn[device]
|
||||
if ok {
|
||||
availableDevices = append(availableDevices, device)
|
||||
}
|
||||
}
|
||||
return availableDevices
|
||||
}
|
||||
|
||||
func (m *Model) String() string {
|
||||
|
||||
@@ -17,6 +17,7 @@ package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -141,9 +142,16 @@ loop:
|
||||
}
|
||||
p.model.setState(p.folder, FolderSyncing)
|
||||
tries := 0
|
||||
checksum := false
|
||||
for {
|
||||
tries++
|
||||
changed := p.pullerIteration(copiersPerFolder, pullersPerFolder, finishersPerFolder)
|
||||
// Last resort mode, to get around corrupt/invalid block maps.
|
||||
if tries == 10 {
|
||||
l.Infoln("Desperation mode ON")
|
||||
checksum = true
|
||||
}
|
||||
|
||||
changed := p.pullerIteration(copiersPerFolder, pullersPerFolder, finishersPerFolder, checksum)
|
||||
if debug {
|
||||
l.Debugln(p, "changed", changed)
|
||||
}
|
||||
@@ -234,7 +242,7 @@ func (p *Puller) String() string {
|
||||
// finisher routines are used. It's seldom efficient to use more than one
|
||||
// copier routine, while multiple pullers are essential and multiple finishers
|
||||
// may be useful (they are primarily CPU bound due to hashing).
|
||||
func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
||||
func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int, checksum bool) int {
|
||||
pullChan := make(chan pullBlockState)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
finisherChan := make(chan *sharedPullerState)
|
||||
@@ -247,7 +255,7 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
|
||||
copyWg.Add(1)
|
||||
go func() {
|
||||
// copierRoutine finishes when copyChan is closed
|
||||
p.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
p.copierRoutine(copyChan, pullChan, finisherChan, checksum)
|
||||
copyWg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -404,6 +412,16 @@ func (p *Puller) handleDir(file protocol.FileInfo) {
|
||||
// deleteDir attempts to delete the given directory
|
||||
func (p *Puller) deleteDir(file protocol.FileInfo) {
|
||||
realName := filepath.Join(p.dir, file.Name)
|
||||
// Delete any temporary files lying around in the directory
|
||||
dir, _ := os.Open(realName)
|
||||
if dir != nil {
|
||||
files, _ := dir.Readdirnames(-1)
|
||||
for _, file := range files {
|
||||
if defTempNamer.IsTemporary(file) {
|
||||
osutil.InWritableDir(os.Remove, filepath.Join(realName, file))
|
||||
}
|
||||
}
|
||||
}
|
||||
err := osutil.InWritableDir(os.Remove, realName)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
p.model.updateLocal(p.folder, file)
|
||||
@@ -549,7 +567,7 @@ func (p *Puller) shortcutFile(file protocol.FileInfo) {
|
||||
|
||||
// copierRoutine reads copierStates until the in channel closes and performs
|
||||
// the relevant copies when possible, or passes it to the puller routine.
|
||||
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState, checksum bool) {
|
||||
buf := make([]byte, protocol.BlockSize)
|
||||
|
||||
nextFile:
|
||||
@@ -574,10 +592,10 @@ nextFile:
|
||||
}
|
||||
}()
|
||||
|
||||
hasher := sha256.New()
|
||||
for _, block := range state.blocks {
|
||||
buf = buf[:int(block.Size)]
|
||||
|
||||
success := p.model.finder.Iterate(block.Hash, func(folder, file string, index uint32) bool {
|
||||
found := p.model.finder.Iterate(block.Hash, func(folder, file string, index uint32) bool {
|
||||
path := filepath.Join(p.model.folderCfgs[folder].Path, file)
|
||||
|
||||
var fd *os.File
|
||||
@@ -598,6 +616,23 @@ nextFile:
|
||||
return false
|
||||
}
|
||||
|
||||
// Only done on second to last puller attempt
|
||||
if checksum {
|
||||
hasher.Write(buf)
|
||||
hash := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
if !bytes.Equal(hash, block.Hash) {
|
||||
if debug {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
}
|
||||
err = p.model.finder.Fix(folder, file, index, block.Hash, hash)
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
if err != nil {
|
||||
state.earlyClose("dst write", err)
|
||||
@@ -612,7 +647,7 @@ nextFile:
|
||||
break
|
||||
}
|
||||
|
||||
if !success {
|
||||
if !found {
|
||||
state.pullStarted()
|
||||
ps := pullBlockState{
|
||||
sharedPullerState: state.sharedPullerState,
|
||||
@@ -636,7 +671,7 @@ nextBlock:
|
||||
continue nextBlock
|
||||
}
|
||||
|
||||
// Select the least busy device to pull the block frop.model. If we found no
|
||||
// Select the least busy device to pull the block from. If we found no
|
||||
// feasible device at all, fail the block (and in the long run, the
|
||||
// file).
|
||||
potentialDevices := p.model.availability(p.folder, state.file.Name)
|
||||
|
||||
@@ -210,7 +210,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single fetcher routine
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan, false)
|
||||
|
||||
p.handleFile(requiredFile, copyChan, finisherChan)
|
||||
|
||||
@@ -250,3 +250,125 @@ func TestCopierFinder(t *testing.T) {
|
||||
|
||||
os.Remove(tempFile)
|
||||
}
|
||||
|
||||
// Test that updating a file removes it's old blocks from the blockmap
|
||||
func TestCopierCleanup(t *testing.T) {
|
||||
iterFn := func(folder, file string, index uint32) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"}
|
||||
cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
// Create a file
|
||||
file := protocol.FileInfo{
|
||||
Name: "test",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Blocks: []protocol.BlockInfo{blocks[0]},
|
||||
}
|
||||
|
||||
// Add file to index
|
||||
m.updateLocal("default", file)
|
||||
|
||||
if !m.finder.Iterate(blocks[0].Hash, iterFn) {
|
||||
t.Error("Expected block not found")
|
||||
}
|
||||
|
||||
file.Blocks = []protocol.BlockInfo{blocks[1]}
|
||||
file.Version++
|
||||
// Update index (removing old blocks)
|
||||
m.updateLocal("default", file)
|
||||
|
||||
if m.finder.Iterate(blocks[0].Hash, iterFn) {
|
||||
t.Error("Unexpected block found")
|
||||
}
|
||||
|
||||
if !m.finder.Iterate(blocks[1].Hash, iterFn) {
|
||||
t.Error("Expected block not found")
|
||||
}
|
||||
|
||||
file.Blocks = []protocol.BlockInfo{blocks[0]}
|
||||
file.Version++
|
||||
// Update index (removing old blocks)
|
||||
m.updateLocal("default", file)
|
||||
|
||||
if !m.finder.Iterate(blocks[0].Hash, iterFn) {
|
||||
t.Error("Unexpected block found")
|
||||
}
|
||||
|
||||
if m.finder.Iterate(blocks[1].Hash, iterFn) {
|
||||
t.Error("Expected block not found")
|
||||
}
|
||||
}
|
||||
|
||||
// On the 10th iteration, we start hashing the content which we receive by
|
||||
// following blockfinder's instructions. Make sure that the copier routine
|
||||
// hashes the content when asked, and pulls if it fails to find the block.
|
||||
func TestLastResortPulling(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"}
|
||||
cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
// Add a file to index (with the incorrect block representation, as content
|
||||
// doesn't actually match the block list)
|
||||
file := protocol.FileInfo{
|
||||
Name: "empty",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Blocks: []protocol.BlockInfo{blocks[0]},
|
||||
}
|
||||
m.updateLocal("default", file)
|
||||
|
||||
// Pretend that we are handling a new file of the same content but
|
||||
// with a different name (causing to copy that particular block)
|
||||
file.Name = "newfile"
|
||||
|
||||
iterFn := func(folder, file string, index uint32) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check that that particular block is there
|
||||
if !m.finder.Iterate(blocks[0].Hash, iterFn) {
|
||||
t.Error("Expected block not found")
|
||||
}
|
||||
|
||||
p := Puller{
|
||||
folder: "default",
|
||||
dir: "testdata",
|
||||
model: m,
|
||||
}
|
||||
|
||||
copyChan := make(chan copyBlocksState)
|
||||
pullChan := make(chan pullBlockState, 1)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single copier routine with checksumming enabled
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan, true)
|
||||
|
||||
p.handleFile(file, copyChan, finisherChan)
|
||||
|
||||
// Copier should hash empty file, realise that the region it has read
|
||||
// doesn't match the hash which was advertised by the block map, fix it
|
||||
// and ask to pull the block.
|
||||
<-pullChan
|
||||
|
||||
// Verify that it did fix the incorrect hash.
|
||||
if m.finder.Iterate(blocks[0].Hash, iterFn) {
|
||||
t.Error("Found unexpected block")
|
||||
}
|
||||
|
||||
if !m.finder.Iterate(scanner.SHA256OfNothing, iterFn) {
|
||||
t.Error("Expected block not found")
|
||||
}
|
||||
|
||||
(<-finisherChan).fd.Close()
|
||||
os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (s *sharedPullerState) tempFile() (*os.File, error) {
|
||||
if info, err := os.Stat(dir); err != nil {
|
||||
s.earlyCloseLocked("dst stat dir", err)
|
||||
return nil, err
|
||||
} else if info.Mode()&04 == 0 {
|
||||
} else if info.Mode()&0200 == 0 {
|
||||
err := os.Chmod(dir, 0755)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
@@ -136,7 +136,6 @@ func (s *sharedPullerState) earlyCloseLocked(context string, err error) {
|
||||
s.err = err
|
||||
if s.fd != nil {
|
||||
s.fd.Close()
|
||||
os.Remove(s.tempName)
|
||||
}
|
||||
s.closed = true
|
||||
}
|
||||
|
||||
@@ -15,7 +15,11 @@
|
||||
|
||||
package model
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSourceFileOK(t *testing.T) {
|
||||
s := sharedPullerState{
|
||||
@@ -61,3 +65,23 @@ func TestSourceFileBad(t *testing.T) {
|
||||
t.Fatal("Unexpected nil failed()")
|
||||
}
|
||||
}
|
||||
|
||||
// Test creating temporary file inside read-only directory
|
||||
func TestReadOnlyDir(t *testing.T) {
|
||||
s := sharedPullerState{
|
||||
tempName: "testdata/read_only_dir/.temp_name",
|
||||
}
|
||||
|
||||
// Ensure dir is read-only (git doesn't store full permissions)
|
||||
os.Chmod(filepath.Dir(s.tempName), 0555)
|
||||
|
||||
fd, err := s.tempFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fd == nil {
|
||||
t.Fatal("Unexpected nil fd")
|
||||
}
|
||||
|
||||
s.earlyClose("Test done", nil)
|
||||
}
|
||||
|
||||
0
internal/model/testdata/read_only_dir/a
vendored
Normal file
0
internal/model/testdata/read_only_dir/a
vendored
Normal file
@@ -66,7 +66,7 @@ func Rename(from, to string) error {
|
||||
// containing `path` is writable for the duration of the call.
|
||||
func InWritableDir(fn func(string) error, path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if info, err := os.Stat(dir); err == nil && info.IsDir() && info.Mode()&04 == 0 {
|
||||
if info, err := os.Stat(dir); err == nil && info.IsDir() && info.Mode()&0200 == 0 {
|
||||
// A non-writeable directory (for this user; we assume that's the
|
||||
// relevant part). Temporarily change the mode so we can delete the
|
||||
// file or directory inside it.
|
||||
|
||||
57
internal/osutil/replacingwriter.go
Normal file
57
internal/osutil/replacingwriter.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
type ReplacingWriter struct {
|
||||
Writer io.Writer
|
||||
From byte
|
||||
To []byte
|
||||
}
|
||||
|
||||
func (w ReplacingWriter) Write(bs []byte) (int, error) {
|
||||
var n, written int
|
||||
var err error
|
||||
|
||||
newlineIdx := bytes.IndexByte(bs, w.From)
|
||||
for newlineIdx >= 0 {
|
||||
n, err = w.Writer.Write(bs[:newlineIdx])
|
||||
written += n
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(w.To) > 0 {
|
||||
n, err := w.Writer.Write(w.To)
|
||||
if n == len(w.To) {
|
||||
written++
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
bs = bs[newlineIdx+1:]
|
||||
newlineIdx = bytes.IndexByte(bs, w.From)
|
||||
}
|
||||
|
||||
n, err = w.Writer.Write(bs)
|
||||
written += n
|
||||
|
||||
return written, err
|
||||
}
|
||||
53
internal/osutil/replacingwriter_test.go
Normal file
53
internal/osutil/replacingwriter_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testcases = []struct {
|
||||
from byte
|
||||
to []byte
|
||||
a, b string
|
||||
}{
|
||||
{'\n', []byte{'\r', '\n'}, "", ""},
|
||||
{'\n', []byte{'\r', '\n'}, "foo", "foo"},
|
||||
{'\n', []byte{'\r', '\n'}, "foo\n", "foo\r\n"},
|
||||
{'\n', []byte{'\r', '\n'}, "foo\nbar", "foo\r\nbar"},
|
||||
{'\n', []byte{'\r', '\n'}, "foo\nbar\nbaz", "foo\r\nbar\r\nbaz"},
|
||||
{'\n', []byte{'\r', '\n'}, "\nbar", "\r\nbar"},
|
||||
{'o', []byte{'x', 'l', 'r'}, "\nfoo", "\nfxlrxlr"},
|
||||
{'o', nil, "\nfoo", "\nf"},
|
||||
{'f', []byte{}, "\nfoo", "\noo"},
|
||||
}
|
||||
|
||||
func TestReplacingWriter(t *testing.T) {
|
||||
for _, tc := range testcases {
|
||||
var buf bytes.Buffer
|
||||
w := ReplacingWriter{
|
||||
Writer: &buf,
|
||||
From: tc.from,
|
||||
To: tc.to,
|
||||
}
|
||||
fmt.Fprint(w, tc.a)
|
||||
if buf.String() != tc.b {
|
||||
t.Errorf("%q != %q", buf.String(), tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -137,8 +137,8 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
ID string // max:64
|
||||
Devices []Device // max:64
|
||||
ID string // max:64
|
||||
Devices []Device
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
|
||||
@@ -44,20 +44,28 @@ func (o IndexMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o IndexMessage) MarshalXDR() []byte {
|
||||
func (o IndexMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o IndexMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o IndexMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o IndexMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Folder) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Folder); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Folder)
|
||||
xw.WriteUint32(uint32(len(o.Files)))
|
||||
@@ -142,20 +150,28 @@ func (o FileInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o FileInfo) MarshalXDR() []byte {
|
||||
func (o FileInfo) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o FileInfo) AppendXDR(bs []byte) []byte {
|
||||
func (o FileInfo) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o FileInfo) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Name) > 8192 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Name); l > 8192 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
@@ -244,20 +260,28 @@ func (o FileInfoTruncated) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) MarshalXDR() []byte {
|
||||
func (o FileInfoTruncated) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) AppendXDR(bs []byte) []byte {
|
||||
func (o FileInfoTruncated) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Name) > 8192 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Name); l > 8192 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
@@ -318,21 +342,29 @@ func (o BlockInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o BlockInfo) MarshalXDR() []byte {
|
||||
func (o BlockInfo) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o BlockInfo) AppendXDR(bs []byte) []byte {
|
||||
func (o BlockInfo) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o BlockInfo) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Size)
|
||||
if len(o.Hash) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Hash); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Hash", l, 64)
|
||||
}
|
||||
xw.WriteBytes(o.Hash)
|
||||
return xw.Tot(), xw.Error()
|
||||
@@ -396,24 +428,32 @@ func (o RequestMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o RequestMessage) MarshalXDR() []byte {
|
||||
func (o RequestMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o RequestMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o RequestMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o RequestMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Folder) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Folder); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Folder)
|
||||
if len(o.Name) > 8192 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Name); l > 8192 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint64(o.Offset)
|
||||
@@ -466,15 +506,23 @@ func (o ResponseMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o ResponseMessage) MarshalXDR() []byte {
|
||||
func (o ResponseMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o ResponseMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o ResponseMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o ResponseMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o ResponseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -545,28 +593,36 @@ func (o ClusterConfigMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) MarshalXDR() []byte {
|
||||
func (o ClusterConfigMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o ClusterConfigMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.ClientName) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.ClientName); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ClientName", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ClientName)
|
||||
if len(o.ClientVersion) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.ClientVersion); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ClientVersion", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ClientVersion)
|
||||
if len(o.Folders) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Folders); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folders", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Folders)))
|
||||
for i := range o.Folders {
|
||||
@@ -575,8 +631,8 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
}
|
||||
if len(o.Options) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Options); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Options", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
@@ -604,7 +660,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.ClientVersion = xr.ReadStringMax(64)
|
||||
_FoldersSize := int(xr.ReadUint32())
|
||||
if _FoldersSize > 64 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("Folders", _FoldersSize, 64)
|
||||
}
|
||||
o.Folders = make([]Folder, _FoldersSize)
|
||||
for i := range o.Folders {
|
||||
@@ -612,7 +668,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
@@ -644,7 +700,7 @@ Folder Structure:
|
||||
|
||||
struct Folder {
|
||||
string ID<64>;
|
||||
Device Devices<64>;
|
||||
Device Devices<>;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -654,25 +710,30 @@ func (o Folder) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Folder) MarshalXDR() []byte {
|
||||
func (o Folder) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Folder) AppendXDR(bs []byte) []byte {
|
||||
func (o Folder) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Folder) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.ID) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.ID); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ID)
|
||||
if len(o.Devices) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Devices)))
|
||||
for i := range o.Devices {
|
||||
_, err := o.Devices[i].encodeXDR(xw)
|
||||
@@ -697,9 +758,6 @@ func (o *Folder) UnmarshalXDR(bs []byte) error {
|
||||
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadStringMax(64)
|
||||
_DevicesSize := int(xr.ReadUint32())
|
||||
if _DevicesSize > 64 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
o.Devices = make([]Device, _DevicesSize)
|
||||
for i := range o.Devices {
|
||||
(&o.Devices[i]).decodeXDR(xr)
|
||||
@@ -741,20 +799,28 @@ func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Device) MarshalXDR() []byte {
|
||||
func (o Device) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Device) AppendXDR(bs []byte) []byte {
|
||||
func (o Device) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Device) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.ID) > 32 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.ID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
|
||||
}
|
||||
xw.WriteBytes(o.ID)
|
||||
xw.WriteUint32(o.Flags)
|
||||
@@ -813,24 +879,32 @@ func (o Option) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Option) MarshalXDR() []byte {
|
||||
func (o Option) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o Option) AppendXDR(bs []byte) []byte {
|
||||
func (o Option) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o Option) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Option) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Key) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Key); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Key", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Key)
|
||||
if len(o.Value) > 1024 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Value); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Value", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.Value)
|
||||
return xw.Tot(), xw.Error()
|
||||
@@ -879,20 +953,28 @@ func (o CloseMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o CloseMessage) MarshalXDR() []byte {
|
||||
func (o CloseMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o CloseMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o CloseMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o CloseMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o CloseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Reason) > 1024 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.Reason); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Reason", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.Reason)
|
||||
return xw.Tot(), xw.Error()
|
||||
@@ -933,15 +1015,23 @@ func (o EmptyMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o EmptyMessage) MarshalXDR() []byte {
|
||||
func (o EmptyMessage) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o EmptyMessage) AppendXDR(bs []byte) []byte {
|
||||
func (o EmptyMessage) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o EmptyMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o EmptyMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
@@ -126,7 +126,7 @@ type hdrMsg struct {
|
||||
}
|
||||
|
||||
type encodable interface {
|
||||
AppendXDR([]byte) []byte
|
||||
AppendXDR([]byte) ([]byte, error)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -483,7 +483,11 @@ func (c *rawConnection) writerLoop() {
|
||||
case hm := <-c.outbox:
|
||||
if hm.msg != nil {
|
||||
// Uncompressed message in uncBuf
|
||||
uncBuf = hm.msg.AppendXDR(uncBuf[:0])
|
||||
uncBuf, err = hm.msg.AppendXDR(uncBuf[:0])
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(uncBuf) >= c.compressionThreshold {
|
||||
// Use compression for large messages
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
@@ -369,7 +370,7 @@ func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
|
||||
}
|
||||
|
||||
_, err := m1.EncodeXDR(&buf)
|
||||
if err == xdr.ErrElementSizeExceeded {
|
||||
if err != nil && strings.Contains(err.Error(), "exceeds size") {
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
)
|
||||
|
||||
var sha256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||
|
||||
// Blocks returns the blockwise hash of the reader.
|
||||
func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
|
||||
@@ -61,7 +61,7 @@ func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, e
|
||||
blocks = append(blocks, protocol.BlockInfo{
|
||||
Offset: 0,
|
||||
Size: 0,
|
||||
Hash: sha256OfNothing,
|
||||
Hash: SHA256OfNothing,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
|
||||
@@ -113,8 +114,8 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
return nil
|
||||
}
|
||||
|
||||
if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stversions" ||
|
||||
sn == ".stfolder" || (w.Matcher != nil && w.Matcher.Match(rn)) {
|
||||
if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" ||
|
||||
strings.HasPrefix(rn, ".stversions") || (w.Matcher != nil && w.Matcher.Match(rn)) {
|
||||
// An ignored file
|
||||
if debug {
|
||||
l.Debugln("ignored:", rn)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -44,12 +44,37 @@ type IGD struct {
|
||||
localIPAddress string
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's UUID.
|
||||
func (n *IGD) UUID() string {
|
||||
return n.uuid
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's friendly name.
|
||||
func (n *IGD) FriendlyName() string {
|
||||
return n.friendlyName
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's friendly identifier (friendly name + IP address).
|
||||
func (n *IGD) FriendlyIdentifier() string {
|
||||
return "'" + n.FriendlyName() + "' (" + strings.Split(n.URL().Host, ":")[0] + ")"
|
||||
}
|
||||
|
||||
// The URL of the InternetGatewayDevice's root device description.
|
||||
func (n *IGD) URL() *url.URL {
|
||||
return n.url
|
||||
}
|
||||
|
||||
// A container for relevant properties of a UPnP service of an IGD.
|
||||
type IGDService struct {
|
||||
serviceID string
|
||||
serviceURL string
|
||||
serviceURN string
|
||||
}
|
||||
|
||||
func (s *IGDService) ID() string {
|
||||
return s.serviceID
|
||||
}
|
||||
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
@@ -58,6 +83,7 @@ const (
|
||||
)
|
||||
|
||||
type upnpService struct {
|
||||
ServiceID string `xml:"serviceId"`
|
||||
ServiceType string `xml:"serviceType"`
|
||||
ControlURL string `xml:"controlURL"`
|
||||
}
|
||||
@@ -94,7 +120,7 @@ func Discover() []*IGD {
|
||||
l.Debugln("[" + resultDevice.uuid + "]")
|
||||
|
||||
for _, resultService := range resultDevice.services {
|
||||
l.Debugln("* " + resultService.serviceURL)
|
||||
l.Debugln("* [" + resultService.serviceID + "] " + resultService.serviceURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -184,6 +210,17 @@ Mx: %d
|
||||
|
||||
// Collect our results from the result handlers using the result channel
|
||||
for result := range resultChannel {
|
||||
// Check for existing results (some routers send multiple response packets)
|
||||
for _, existingResult := range results {
|
||||
if existingResult.uuid == result.uuid {
|
||||
if debug {
|
||||
l.Debugln("Already processed device with UUID", existingResult.uuid, "continuing...")
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// No existing results, okay to append
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
@@ -236,8 +273,7 @@ func handleSearchResponse(deviceType string, knownDevices []*IGD, resp []byte, l
|
||||
deviceUUID := strings.TrimLeft(strings.Split(deviceUSN, "::")[0], "uuid:")
|
||||
matched, err := regexp.MatchString("[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}", deviceUUID)
|
||||
if !matched {
|
||||
l.Infoln("Invalid IGD response: invalid device UUID " + deviceUUID)
|
||||
return
|
||||
l.Infoln("Invalid IGD response: invalid device UUID", deviceUUID, "(continuing anyway)")
|
||||
}
|
||||
|
||||
// Don't re-add devices that are already known
|
||||
@@ -399,7 +435,7 @@ func getIGDServices(rootURL string, device upnpDevice, wanDeviceURN string, wanC
|
||||
l.Debugln("[" + rootURL + "] Found " + service.ServiceType + " with URL " + u.String())
|
||||
}
|
||||
|
||||
service := IGDService{serviceURL: u.String(), serviceURN: service.ServiceType}
|
||||
service := IGDService{serviceID: service.ServiceID, serviceURL: u.String(), serviceURN: service.ServiceType}
|
||||
|
||||
result = append(result, service)
|
||||
}
|
||||
@@ -427,8 +463,8 @@ func replaceRawPath(u *url.URL, rp string) {
|
||||
u.RawQuery = q
|
||||
}
|
||||
|
||||
func soapRequest(url, device, function, message string) ([]byte, error) {
|
||||
tpl := ` <?xml version="1.0" ?>
|
||||
func soapRequest(url, service, function, message string) ([]byte, error) {
|
||||
tpl := `<?xml version="1.0" ?>
|
||||
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
|
||||
<s:Body>%s</s:Body>
|
||||
</s:Envelope>
|
||||
@@ -443,13 +479,14 @@ func soapRequest(url, device, function, message string) ([]byte, error) {
|
||||
}
|
||||
req.Header.Set("Content-Type", `text/xml; charset="utf-8"`)
|
||||
req.Header.Set("User-Agent", "syncthing/1.0")
|
||||
req.Header.Set("SOAPAction", fmt.Sprintf(`"%s#%s"`, device, function))
|
||||
req.Header.Set("SOAPAction", fmt.Sprintf(`"%s#%s"`, service, function))
|
||||
req.Header.Set("Connection", "Close")
|
||||
req.Header.Set("Cache-Control", "no-cache")
|
||||
req.Header.Set("Pragma", "no-cache")
|
||||
|
||||
if debug {
|
||||
l.Debugln(req.Header.Get("SOAPAction"))
|
||||
l.Debugln("SOAP Request URL: " + url)
|
||||
l.Debugln("SOAP Action: " + req.Header.Get("SOAPAction"))
|
||||
l.Debugln("SOAP Request:\n\n" + body)
|
||||
}
|
||||
|
||||
@@ -474,7 +511,7 @@ func soapRequest(url, device, function, message string) ([]byte, error) {
|
||||
|
||||
// Add a port mapping to all relevant services on the specified InternetGatewayDevice.
|
||||
// Port mapping will fail and return an error if action is fails for _any_ of the relevant services.
|
||||
// For this reason, it is generally better to configure port mapping for each individual service instead.
|
||||
// For this reason, it is generally better to configure port mapping for each individual service instead.
|
||||
func (n *IGD) AddPortMapping(protocol Protocol, externalPort, internalPort int, description string, timeout int) error {
|
||||
for _, service := range n.services {
|
||||
err := service.AddPortMapping(n.localIPAddress, protocol, externalPort, internalPort, description, timeout)
|
||||
@@ -487,7 +524,7 @@ func (n *IGD) AddPortMapping(protocol Protocol, externalPort, internalPort int,
|
||||
|
||||
// Delete a port mapping from all relevant services on the specified InternetGatewayDevice.
|
||||
// Port mapping will fail and return an error if action is fails for _any_ of the relevant services.
|
||||
// For this reason, it is generally better to configure port mapping for each individual service instead.
|
||||
// For this reason, it is generally better to configure port mapping for each individual service instead.
|
||||
func (n *IGD) DeletePortMapping(protocol Protocol, externalPort int) error {
|
||||
for _, service := range n.services {
|
||||
err := service.DeletePortMapping(protocol, externalPort)
|
||||
@@ -498,26 +535,6 @@ func (n *IGD) DeletePortMapping(protocol Protocol, externalPort int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's UUID.
|
||||
func (n *IGD) UUID() string {
|
||||
return n.uuid
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's friendly name.
|
||||
func (n *IGD) FriendlyName() string {
|
||||
return n.friendlyName
|
||||
}
|
||||
|
||||
// The InternetGatewayDevice's friendly identifier (friendly name + IP address).
|
||||
func (n *IGD) FriendlyIdentifier() string {
|
||||
return "'" + n.FriendlyName() + "' (" + strings.Split(n.URL().Host, ":")[0] + ")"
|
||||
}
|
||||
|
||||
// The URL of the InternetGatewayDevice's root device description.
|
||||
func (n *IGD) URL() *url.URL {
|
||||
return n.url
|
||||
}
|
||||
|
||||
type soapGetExternalIPAddressResponseEnvelope struct {
|
||||
XMLName xml.Name
|
||||
Body soapGetExternalIPAddressResponseBody `xml:"Body"`
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/logger"
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -44,7 +44,7 @@ const (
|
||||
|
||||
var env = []string{
|
||||
"HOME=.",
|
||||
"STTRACE=model",
|
||||
"STTRACE=model,protocol",
|
||||
"STGUIAPIKEY=" + apiKey,
|
||||
"STNORESTART=1",
|
||||
"STPERFSTATS=1",
|
||||
@@ -121,6 +121,32 @@ func (p *syncthingProcess) get(path string) (*http.Response, error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *syncthingProcess) post(path string, data io.Reader) (*http.Response, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 2 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
req, err := http.NewRequest("POST", fmt.Sprintf("http://127.0.0.1:%d%s", p.port, path), data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.apiKey != "" {
|
||||
req.Header.Add("X-API-Key", p.apiKey)
|
||||
}
|
||||
if p.csrfToken != "" {
|
||||
req.Header.Add("X-CSRF-Token", p.csrfToken)
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (p *syncthingProcess) peerCompletion() (map[string]int, error) {
|
||||
resp, err := p.get("/rest/debug/peerCompletion")
|
||||
if err != nil {
|
||||
|
||||
@@ -1,8 +1,105 @@
|
||||
<configuration version="6">
|
||||
<folder id="default" path="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
||||
<device id="AF2HXQA-DOKKIMI-PKOG4RE-E25UTJ7-PSGQ7T5-WEY7YT5-SG6N7W5-NNA4BQM"></device>
|
||||
<device id="AND7GW6-DZN66F2-TKYJSTC-ACI7MYT-75X4T63-SE5S4MQ-GBSDHDL-4CLHJA6"></device>
|
||||
<device id="ATFGYHM-ZTA5Z7B-NYPY4OK-VJWDD6Y-SLCDKED-L2VDESD-TUBLMLH-CHAJEAI"></device>
|
||||
<device id="AUI2JXT-PXK3PMB-YATDUNI-RIGBTKQ-VBUJLUH-JLWZEEZ-3UV5X2G-AWKOVQH"></device>
|
||||
<device id="AZCVBDG-6GATBA3-XX7HOH3-LY5R3L2-SPCQOIT-KFVLUCC-GQ3KJTJ-ZSP3RAQ"></device>
|
||||
<device id="A4W7JO6-IM2ZWBX-AYYLCEP-NFTJKQV-2QRGZJ4-QGSP3HI-R7UHNYF-GAKDLAS"></device>
|
||||
<device id="BDZ662J-Q5IXRE3-CEPHNIM-V5D76GA-U26VZRI-UHTDERX-UABQEQ3-CKE5CQD"></device>
|
||||
<device id="BP6X6FF-2SEA3QR-RW2Y55O-B4X7LR6-5EXGA6D-KM725CC-CIDD7ZZ-7VNKXAT"></device>
|
||||
<device id="CDUKUIK-56DOHM3-M7VHCL6-5HGTZHB-QULOOBK-3DK6QXC-4DDDXKS-H5OOZAQ"></device>
|
||||
<device id="CJRFL4X-PBD3H6K-2GCPPYS-JCZ73GF-SN63HMU-WGYMZQZ-W2AWEDG-AZ623QB"></device>
|
||||
<device id="DDGR3XN-CKDGXS4-VR6G2T3-GQJVO7E-XDZGWUJ-733J3G7-NMGQFS5-ETFWKA6"></device>
|
||||
<device id="DFAWRIU-3MSLR65-L7DZE53-UNR6KEX-FTJZJ2B-NVYEPQH-ZYDYU6D-ROAKFAJ"></device>
|
||||
<device id="DMFDJGV-UDOMXZU-FKKYXCT-BEGE7K7-OVAFMQW-NBLQ46C-J6VORML-27T3SA2"></device>
|
||||
<device id="DM4ZS3J-WF2WJGP-YNPTRRZ-MKUEDUC-7N5Q6EX-IHLR7DM-VHMP3FE-KXGHDAG"></device>
|
||||
<device id="DPJWHYQ-S7N7D4C-P4CY65M-ZJ6MFOD-OR5CF7N-2TYSQ6T-IZQDK67-NYDMCQR"></device>
|
||||
<device id="DYVI4HB-DO3WULH-YC4CVUV-KOHXVYH-MSRQH5O-P4JX5T4-WCVDJK2-QPWZPQQ"></device>
|
||||
<device id="EAELU3R-WFMEOHV-VUF3ZQU-T6QLRUQ-S7HFGGG-447PNIB-Z32VNM6-3XRG2QM"></device>
|
||||
<device id="EHYW5K4-WLONYO4-LVJFY7T-7Y6W7EA-N3O5ADS-NPXYFBA-TOSC3X2-J4BYNAV"></device>
|
||||
<device id="E3KQXYD-ST7GNHR-EQVEBUK-I7X4NFR-J6JEGSL-XSU3236-PXDEOYF-SIGVJQD"></device>
|
||||
<device id="FDN276E-J7AQ32B-Y4CBY3Q-TWCQ2RV-QQTQ5NJ-NLLFCGR-UOXSYQN-VLHSHAK"></device>
|
||||
<device id="FOUTNQ4-M3X4ZL6-VPG3ZD6-3EFUKTT-XVNS6N6-G2E56OA-LYBDCXY-S52OLQJ"></device>
|
||||
<device id="F3CJABZ-VOJPUJJ-A6V6J2O-PQF32IV-5VZY45B-ASPFXOE-OMZOBYX-IIVGJQR"></device>
|
||||
<device id="F722I3J-JNESQSZ-NWHMLX7-OX5YQPY-Q4P7AKS-RAVRYFO-LUFHTBF-MV5ZEAW"></device>
|
||||
<device id="GRCZC6E-PRZ5EPJ-5XZK2WL-K7PBC4D-EU7J5K3-YHME4GD-AKHMR3U-HTGE3AJ"></device>
|
||||
<device id="GTZORAO-4U2BMRT-NJ7VGJV-DEFE7X4-GK7GOKP-56NADQB-DKRY64G-GK4JSAN"></device>
|
||||
<device id="HHKXJNI-2FOFGMU-KT55UTA-J7CDGBX-KE3RMHK-NDVQYCB-DPUAFTN-C62LTAJ"></device>
|
||||
<device id="HOA63TU-7NASFTP-7JKRNDD-CEBCBTL-3UB6GEH-LE3TBF6-UHBJUX2-B53JYQO"></device>
|
||||
<device id="H4IJPSS-SGRGAHF-4HVWJXK-AQ2EV4C-EMWQWG4-63ORWGX-CTBONEL-3IO2VA2"></device>
|
||||
<device id="IEDANYN-UEK237R-Z6J75BD-3SK2RPX-66FJ2F2-T347HN3-KYY2PW5-47ZAIAE"></device>
|
||||
<device id="IHFBE2Q-VJKGLJN-PYEX3SL-ORFUM6B-GUDQVUN-32TZFZO-GMYDUFI-VBICSA3"></device>
|
||||
<device id="ITFMT23-LPJ4LN3-Y6HQUYG-GO47ZU7-PO6SUJ6-7UO2JNG-SKI5CTG-WJCK2AP"></device>
|
||||
<device id="IWWUWMQ-3KMUGCT-JJII2HN-J6NN6OW-43AM5TT-MHJXRCR-D7MJVWE-HLAD4QV"></device>
|
||||
<device id="IW5I76B-NKCHGJ5-HAQNOBW-LRHKYSH-54VEQQC-HQAXHVC-DX4ME3O-C7XGUAQ"></device>
|
||||
<device id="I2VEKOT-P63JCHG-LKHJAOJ-XL4VNH2-Y7WQGAW-JKQQQQY-QHIOB77-Q7WLYAW"></device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||
<device id="JWPMLRX-U5Q3LSU-LZJCM2Z-GIZLQO2-UBSZKUO-INZXYTZ-7E6PDK2-PPDQWA4"></device>
|
||||
<device id="KBBKAQ6-VINPVTG-LXSDXXF-T67N5DU-ONRJI3P-WIAZBY7-UCMAJJ4-274ZBA5"></device>
|
||||
<device id="KNOUKFP-JEJTAZO-FUI7KMO-4PQJOMB-CX255W3-U3SHR2I-LJ4LBHL-AY6BCA5"></device>
|
||||
<device id="KOUXI7Y-6PWCH2V-552D2Y4-6D3HQJZ-CEKC5IY-XVUV2KC-452GVYG-AJN4TQB"></device>
|
||||
<device id="K3Z2ESL-SKLK2UF-CLA2RLL-DWQVXMU-7PQQ5MV-2BWWSCI-MAOF7YR-277YSAL"></device>
|
||||
<device id="LW7D6SQ-3UIMICX-EEPGTNA-P6NNR2M-35WLLB5-QKW5KWV-3PGTWA3-QM65DA4"></device>
|
||||
<device id="LYYJOWF-WZWZ3Z6-O4JRBOM-44LPK33-YDB5EFZ-CTX2FNI-C5BNMOH-ZBREKAK"></device>
|
||||
<device id="L5GRUQX-3FPGQG6-CWJ3CQN-QJO7BFV-FDXYBXG-AKAZ465-3D37XCR-SHPHDQS"></device>
|
||||
<device id="L7L37HS-3STS6WA-G76FKTT-FAIFH4G-46EORMF-6PA5JFZ-RW4QJDM-SDSBIAM"></device>
|
||||
<device id="MNLYDAL-BTM5JJL-PYOD2C6-MWKXW5N-I7KGYQJ-TAJ2NQE-K2MI7C5-RIM7CA7"></device>
|
||||
<device id="NBO5E2S-OIKGPUN-AXQSTWG-YNSZTQD-YSQ3JGC-BAPD72J-5BUYGBC-4U75XQK"></device>
|
||||
<device id="NWKOEM4-T62TE6V-H4X75L2-GX75Q3C-XCA3OS7-X7MHSVV-IOS5V3U-6Y4IAAZ"></device>
|
||||
<device id="NZKXQ5V-GXMQEY4-77E6DUN-UJK5O3O-633AEIQ-2ZB6FJL-ZPT3AFY-IFMTVAS"></device>
|
||||
<device id="OYRDGZZ-BBBYTOG-PPOEKNF-RFOU5YJ-LOJXGDC-PZFHUMW-EHKAEGM-ZI5O6QX"></device>
|
||||
<device id="O5BUAKV-5PXZUBI-TGCAIHE-646RUCT-5KSKVSY-NMVEZNC-XWIDH4M-N4FWJQK"></device>
|
||||
<device id="PR7FK7P-O57IUUQ-L7NVOBM-BJ6B6HV-A4Q2ZM7-3ZB7YPB-2CEI225-VNE27QZ"></device>
|
||||
<device id="P74TJEI-WJADRBZ-J5ZWR3D-WNGTH7F-QNTKVBW-QJSYCKN-VSXWFFY-BFZXZQH"></device>
|
||||
<device id="QLQ4VDH-DCRQOCW-45ZU3NH-KGVNZ4K-RY5VH5M-E54B35V-3GDPECV-FATMQQN"></device>
|
||||
<device id="QTSMMK3-7LDPTPP-NFQJBKI-MXPNRLC-RBJ46YH-EUOIHQX-X5OBHY5-DSEHQAB"></device>
|
||||
<device id="RNEPSQ3-XBYBJX2-DC63XIJ-KOV6OWX-3RBQ245-SDO5NPG-7DNYUIJ-V3BVBAR"></device>
|
||||
<device id="RSPOEAQ-WEOFVAO-7F5OHVV-FPEK2XA-KG23ACJ-ISANINA-EVNGPFM-6GUGUAY"></device>
|
||||
<device id="SMFBW3X-ZG7GEVQ-Q5EY2HV-QTTJ2AK-CMKRW7K-4ZYN5KF-D5LHXBA-J2WJPQA"></device>
|
||||
<device id="SRC4EBU-57YZZF2-U72JRON-LWMXERT-NL3R4YY-7T3H6FI-VPK7KMQ-Q7LGBQE"></device>
|
||||
<device id="SVUEDXK-GOM6UEO-BK725GI-4R4GRA2-SL6EJQ7-2TQZDF4-IQEM34P-P32HLAN"></device>
|
||||
<device id="S257JIU-2LMK7R6-J4EPHQE-DHBSA2V-5S45UV5-NQNI6G4-UUPXTWK-5L645AB"></device>
|
||||
<device id="S475AJS-UF2H3WA-ZKSFH7J-YJC6Q4P-L6O7NDM-Q7LJKVE-CQ2DTVS-N6TY6QJ"></device>
|
||||
<device id="TE3RAAB-2F65ZF2-MHDELWZ-YJHF72E-5S7HGWF-NJ6CKZK-3LJHQ72-YGOPOAR"></device>
|
||||
<device id="TJEEOEJ-ADKANZG-T3RYCWO-FYJW3GQ-TX7FDLE-I3X3QLX-TQ37HW4-DQQZEAK"></device>
|
||||
<device id="TN64K4J-D7S33RD-55TMUBA-D2VKVRW-FAPQRGY-2VXAMMZ-F2RBYC7-DJAXFAI"></device>
|
||||
<device id="TP4ORAM-C2K6L3Z-3SFLGQF-NILNVZQ-3VSHEGT-LLKQU7K-BV66VQZ-TP65EAS"></device>
|
||||
<device id="T345SQH-CZNXG3Q-WVMQDHY-S524RKJ-C3AP767-QTYGY3X-GASFDCK-LURRWAC"></device>
|
||||
<device id="UMMN7QY-46JHZ4X-UJ5TWWV-EU6OKDA-4GM76QK-UUJJLPH-27H6P22-XQALWQ6"></device>
|
||||
<device id="UZPBG42-GSUBWXD-ALOS27M-AQAWWWJ-XP5DA46-3GWEZ4U-I5I2JAZ-5VPHYQI"></device>
|
||||
<device id="VU764LI-72YK4KV-APLQAXJ-4Q46MTT-FCQFB33-JTCH4E5-G7OJRFI-YGPJYAJ"></device>
|
||||
<device id="WDGE4EI-CO3MSS6-JM5ASRP-YHBCKBN-EWAT5KQ-GGQQTZE-OSDTGH2-P2BE3AA"></device>
|
||||
<device id="WGURZJA-JLZYF3P-KRQDIAL-6RGUYSZ-UZEJ3DO-U6JOGTO-PYQRA7K-NOEFSQ2"></device>
|
||||
<device id="WLTEXSS-RUQSDSJ-FWYVE4T-S25G37F-4XPT7GI-VLBSHCS-SLHXBEF-QSKCQAK"></device>
|
||||
<device id="WNK25O3-CDDKOM6-VDJKB2C-GZ6L6GA-HI7WHXU-G2SCLLF-EEDJX6N-AYY5OQM"></device>
|
||||
<device id="WTA46HO-WMIMK2N-GMNLWSQ-UOZI2O7-JVG3YY2-FWXQ3NJ-NWG5PLX-RN26AAG"></device>
|
||||
<device id="XG6FGXG-CCB54FX-AVREF7W-YMR66ED-66H4QQD-KXK5K7C-DVVYKU2-GF77LQK"></device>
|
||||
<device id="XSWE7HJ-AHTZGEZ-UNH6ZSK-WZHRS2C-55LQZDR-7GLSE6Z-VWEDFWT-ZS4DCAK"></device>
|
||||
<device id="XXXMH6A-3J53GPW-7BVBR76-5UJNW3E-6H2ACDZ-DY46Z6T-FD7GRPE-RGIQFAE"></device>
|
||||
<device id="X2J2EIR-6MCQUSZ-DZU37TI-Y5S4QUN-JPTDKHT-ANBGXU5-YRD3EGF-6VOEHQG"></device>
|
||||
<device id="X3WHUR4-N5BSQ4T-YFC3A3S-KI6KXQ2-6GL66YV-G2YGVKY-LEKVIXY-M62C6A7"></device>
|
||||
<device id="X4EX7JM-CQSNCZ5-H5UNVUW-RNWT4U7-VPFKPB4-P65IWZC-N74SCAM-RLLFUAT"></device>
|
||||
<device id="YEKECF3-5YAC2EZ-ADYLARL-THNGWI3-ZFU6UNK-3KCFZWB-IRYFH6X-LO5ZHQC"></device>
|
||||
<device id="YP66HGG-CIWZ6N2-A3T4PXT-KP47YWK-3ZVW5UX-MGTZRHL-YNB22IK-IEEWNAY"></device>
|
||||
<device id="YRG374E-AB2PK5W-UKR2MOA-E43VSQF-ICO72FN-FDZDX7F-Q75UE2H-EUCIFQJ"></device>
|
||||
<device id="ZCZPFDT-GHJKTPJ-22WKCWP-2UO7X33-NUD2YAK-P6MDHN6-2JVAETG-JPXXLAR"></device>
|
||||
<device id="2MDVGTI-EGVOHIO-SPFMLV6-NRFP2EC-HWMLWVO-QVLLTC3-DSQPHMY-JDEIQQ5"></device>
|
||||
<device id="22UBH4D-HGCP3EW-EHVJTZ4-6PB2HOB-LDPOC3X-ONY6PA6-2DFNBIX-D3X5UQT"></device>
|
||||
<device id="3OQ6R42-O76KQJM-JYN7EGL-PWKZUPG-276FWG2-CP3CTV3-SM545L3-KTTH3AG"></device>
|
||||
<device id="3QW353X-J52M5LW-3DUY2Z5-SG5JU7Z-6NSJFAM-74YPGMA-VHL2W3N-5PX2JQO"></device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||
<device id="4IB56JJ-IXYAHRI-6W75NTN-TEIMPRE-25NAG6F-MYGBJBG-JB33DXY-L2WINQK"></device>
|
||||
<device id="4JSTGIZ-Q2UJXIG-V6QT62A-FSRBKLF-J6FWGIY-PFIAFC3-7GP7DOY-RVNGKQU"></device>
|
||||
<device id="5BKVB65-I2E5UCX-DWWPLNP-IQ7VRZY-I3P42KF-ZKAF7XO-TQTCLVU-TH45GA3"></device>
|
||||
<device id="5OOL3EO-A5DTJ5N-CZHPYVP-YH74NL4-DE3O7AU-ZDFSM6D-KG5IUWA-WJJXBQL"></device>
|
||||
<device id="5RXIYVZ-NOEGGYD-TY4WQTH-U6FQ5IR-LJTW3P3-HFQBN6C-RC4AB5V-MO44PQV"></device>
|
||||
<device id="5WGZMYP-FNTWGLG-PQKEB5V-PJ4LEPL-53SRHBR-HHSHHLD-5KFL3P2-W6MTRQY"></device>
|
||||
<device id="6I5KLVE-VVRXZ55-Z3IQIZH-3FBHKTZ-6MIPAYO-V4QX6K7-7J432VD-T5TY6AU"></device>
|
||||
<device id="6R2BAIE-VRYZJXA-ZKJT4ZK-BQTXA46-VBWMLNY-PINTWYG-TS4Y6GZ-7Z2KVQG"></device>
|
||||
<device id="7BT3IUI-RYS757D-YJPBP6U-WPL2ZI6-CMICBLW-UDYDSVT-NG62TB5-PM2O3A6"></device>
|
||||
<device id="7TVCUYX-MLZH6GF-GDLVMJ6-REPXXUD-DCLPXP2-67HS7VR-QTTABOA-4ZXJOQD"></device>
|
||||
<versioning></versioning>
|
||||
<lenientMtimes>false</lenientMtimes>
|
||||
</folder>
|
||||
@@ -18,15 +115,306 @@
|
||||
<versioning></versioning>
|
||||
<lenientMtimes>false</lenientMtimes>
|
||||
</folder>
|
||||
<device id="AF2HXQA-DOKKIMI-PKOG4RE-E25UTJ7-PSGQ7T5-WEY7YT5-SG6N7W5-NNA4BQM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AND7GW6-DZN66F2-TKYJSTC-ACI7MYT-75X4T63-SE5S4MQ-GBSDHDL-4CLHJA6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ATFGYHM-ZTA5Z7B-NYPY4OK-VJWDD6Y-SLCDKED-L2VDESD-TUBLMLH-CHAJEAI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AUI2JXT-PXK3PMB-YATDUNI-RIGBTKQ-VBUJLUH-JLWZEEZ-3UV5X2G-AWKOVQH" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AZCVBDG-6GATBA3-XX7HOH3-LY5R3L2-SPCQOIT-KFVLUCC-GQ3KJTJ-ZSP3RAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="A4W7JO6-IM2ZWBX-AYYLCEP-NFTJKQV-2QRGZJ4-QGSP3HI-R7UHNYF-GAKDLAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="BDZ662J-Q5IXRE3-CEPHNIM-V5D76GA-U26VZRI-UHTDERX-UABQEQ3-CKE5CQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="BP6X6FF-2SEA3QR-RW2Y55O-B4X7LR6-5EXGA6D-KM725CC-CIDD7ZZ-7VNKXAT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="CDUKUIK-56DOHM3-M7VHCL6-5HGTZHB-QULOOBK-3DK6QXC-4DDDXKS-H5OOZAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="CJRFL4X-PBD3H6K-2GCPPYS-JCZ73GF-SN63HMU-WGYMZQZ-W2AWEDG-AZ623QB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DDGR3XN-CKDGXS4-VR6G2T3-GQJVO7E-XDZGWUJ-733J3G7-NMGQFS5-ETFWKA6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DFAWRIU-3MSLR65-L7DZE53-UNR6KEX-FTJZJ2B-NVYEPQH-ZYDYU6D-ROAKFAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DMFDJGV-UDOMXZU-FKKYXCT-BEGE7K7-OVAFMQW-NBLQ46C-J6VORML-27T3SA2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DM4ZS3J-WF2WJGP-YNPTRRZ-MKUEDUC-7N5Q6EX-IHLR7DM-VHMP3FE-KXGHDAG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DPJWHYQ-S7N7D4C-P4CY65M-ZJ6MFOD-OR5CF7N-2TYSQ6T-IZQDK67-NYDMCQR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DYVI4HB-DO3WULH-YC4CVUV-KOHXVYH-MSRQH5O-P4JX5T4-WCVDJK2-QPWZPQQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="EAELU3R-WFMEOHV-VUF3ZQU-T6QLRUQ-S7HFGGG-447PNIB-Z32VNM6-3XRG2QM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="EHYW5K4-WLONYO4-LVJFY7T-7Y6W7EA-N3O5ADS-NPXYFBA-TOSC3X2-J4BYNAV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="E3KQXYD-ST7GNHR-EQVEBUK-I7X4NFR-J6JEGSL-XSU3236-PXDEOYF-SIGVJQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="FDN276E-J7AQ32B-Y4CBY3Q-TWCQ2RV-QQTQ5NJ-NLLFCGR-UOXSYQN-VLHSHAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="FOUTNQ4-M3X4ZL6-VPG3ZD6-3EFUKTT-XVNS6N6-G2E56OA-LYBDCXY-S52OLQJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="F3CJABZ-VOJPUJJ-A6V6J2O-PQF32IV-5VZY45B-ASPFXOE-OMZOBYX-IIVGJQR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="F722I3J-JNESQSZ-NWHMLX7-OX5YQPY-Q4P7AKS-RAVRYFO-LUFHTBF-MV5ZEAW" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="GRCZC6E-PRZ5EPJ-5XZK2WL-K7PBC4D-EU7J5K3-YHME4GD-AKHMR3U-HTGE3AJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="GTZORAO-4U2BMRT-NJ7VGJV-DEFE7X4-GK7GOKP-56NADQB-DKRY64G-GK4JSAN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="HHKXJNI-2FOFGMU-KT55UTA-J7CDGBX-KE3RMHK-NDVQYCB-DPUAFTN-C62LTAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="HOA63TU-7NASFTP-7JKRNDD-CEBCBTL-3UB6GEH-LE3TBF6-UHBJUX2-B53JYQO" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="H4IJPSS-SGRGAHF-4HVWJXK-AQ2EV4C-EMWQWG4-63ORWGX-CTBONEL-3IO2VA2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IEDANYN-UEK237R-Z6J75BD-3SK2RPX-66FJ2F2-T347HN3-KYY2PW5-47ZAIAE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IHFBE2Q-VJKGLJN-PYEX3SL-ORFUM6B-GUDQVUN-32TZFZO-GMYDUFI-VBICSA3" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ITFMT23-LPJ4LN3-Y6HQUYG-GO47ZU7-PO6SUJ6-7UO2JNG-SKI5CTG-WJCK2AP" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IWWUWMQ-3KMUGCT-JJII2HN-J6NN6OW-43AM5TT-MHJXRCR-D7MJVWE-HLAD4QV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IW5I76B-NKCHGJ5-HAQNOBW-LRHKYSH-54VEQQC-HQAXHVC-DX4ME3O-C7XGUAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="I2VEKOT-P63JCHG-LKHJAOJ-XL4VNH2-Y7WQGAW-JKQQQQY-QHIOB77-Q7WLYAW" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</device>
|
||||
<device id="JWPMLRX-U5Q3LSU-LZJCM2Z-GIZLQO2-UBSZKUO-INZXYTZ-7E6PDK2-PPDQWA4" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KBBKAQ6-VINPVTG-LXSDXXF-T67N5DU-ONRJI3P-WIAZBY7-UCMAJJ4-274ZBA5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KNOUKFP-JEJTAZO-FUI7KMO-4PQJOMB-CX255W3-U3SHR2I-LJ4LBHL-AY6BCA5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KOUXI7Y-6PWCH2V-552D2Y4-6D3HQJZ-CEKC5IY-XVUV2KC-452GVYG-AJN4TQB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="K3Z2ESL-SKLK2UF-CLA2RLL-DWQVXMU-7PQQ5MV-2BWWSCI-MAOF7YR-277YSAL" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="LW7D6SQ-3UIMICX-EEPGTNA-P6NNR2M-35WLLB5-QKW5KWV-3PGTWA3-QM65DA4" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="LYYJOWF-WZWZ3Z6-O4JRBOM-44LPK33-YDB5EFZ-CTX2FNI-C5BNMOH-ZBREKAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="L5GRUQX-3FPGQG6-CWJ3CQN-QJO7BFV-FDXYBXG-AKAZ465-3D37XCR-SHPHDQS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="L7L37HS-3STS6WA-G76FKTT-FAIFH4G-46EORMF-6PA5JFZ-RW4QJDM-SDSBIAM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="MNLYDAL-BTM5JJL-PYOD2C6-MWKXW5N-I7KGYQJ-TAJ2NQE-K2MI7C5-RIM7CA7" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NBO5E2S-OIKGPUN-AXQSTWG-YNSZTQD-YSQ3JGC-BAPD72J-5BUYGBC-4U75XQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NWKOEM4-T62TE6V-H4X75L2-GX75Q3C-XCA3OS7-X7MHSVV-IOS5V3U-6Y4IAAZ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NZKXQ5V-GXMQEY4-77E6DUN-UJK5O3O-633AEIQ-2ZB6FJL-ZPT3AFY-IFMTVAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="OYRDGZZ-BBBYTOG-PPOEKNF-RFOU5YJ-LOJXGDC-PZFHUMW-EHKAEGM-ZI5O6QX" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="O5BUAKV-5PXZUBI-TGCAIHE-646RUCT-5KSKVSY-NMVEZNC-XWIDH4M-N4FWJQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="PR7FK7P-O57IUUQ-L7NVOBM-BJ6B6HV-A4Q2ZM7-3ZB7YPB-2CEI225-VNE27QZ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="P74TJEI-WJADRBZ-J5ZWR3D-WNGTH7F-QNTKVBW-QJSYCKN-VSXWFFY-BFZXZQH" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="QLQ4VDH-DCRQOCW-45ZU3NH-KGVNZ4K-RY5VH5M-E54B35V-3GDPECV-FATMQQN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="QTSMMK3-7LDPTPP-NFQJBKI-MXPNRLC-RBJ46YH-EUOIHQX-X5OBHY5-DSEHQAB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="RNEPSQ3-XBYBJX2-DC63XIJ-KOV6OWX-3RBQ245-SDO5NPG-7DNYUIJ-V3BVBAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="RSPOEAQ-WEOFVAO-7F5OHVV-FPEK2XA-KG23ACJ-ISANINA-EVNGPFM-6GUGUAY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SMFBW3X-ZG7GEVQ-Q5EY2HV-QTTJ2AK-CMKRW7K-4ZYN5KF-D5LHXBA-J2WJPQA" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SRC4EBU-57YZZF2-U72JRON-LWMXERT-NL3R4YY-7T3H6FI-VPK7KMQ-Q7LGBQE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SVUEDXK-GOM6UEO-BK725GI-4R4GRA2-SL6EJQ7-2TQZDF4-IQEM34P-P32HLAN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="S257JIU-2LMK7R6-J4EPHQE-DHBSA2V-5S45UV5-NQNI6G4-UUPXTWK-5L645AB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="S475AJS-UF2H3WA-ZKSFH7J-YJC6Q4P-L6O7NDM-Q7LJKVE-CQ2DTVS-N6TY6QJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TE3RAAB-2F65ZF2-MHDELWZ-YJHF72E-5S7HGWF-NJ6CKZK-3LJHQ72-YGOPOAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TJEEOEJ-ADKANZG-T3RYCWO-FYJW3GQ-TX7FDLE-I3X3QLX-TQ37HW4-DQQZEAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TN64K4J-D7S33RD-55TMUBA-D2VKVRW-FAPQRGY-2VXAMMZ-F2RBYC7-DJAXFAI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TP4ORAM-C2K6L3Z-3SFLGQF-NILNVZQ-3VSHEGT-LLKQU7K-BV66VQZ-TP65EAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="T345SQH-CZNXG3Q-WVMQDHY-S524RKJ-C3AP767-QTYGY3X-GASFDCK-LURRWAC" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="UMMN7QY-46JHZ4X-UJ5TWWV-EU6OKDA-4GM76QK-UUJJLPH-27H6P22-XQALWQ6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="UZPBG42-GSUBWXD-ALOS27M-AQAWWWJ-XP5DA46-3GWEZ4U-I5I2JAZ-5VPHYQI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="VU764LI-72YK4KV-APLQAXJ-4Q46MTT-FCQFB33-JTCH4E5-G7OJRFI-YGPJYAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WDGE4EI-CO3MSS6-JM5ASRP-YHBCKBN-EWAT5KQ-GGQQTZE-OSDTGH2-P2BE3AA" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WGURZJA-JLZYF3P-KRQDIAL-6RGUYSZ-UZEJ3DO-U6JOGTO-PYQRA7K-NOEFSQ2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WLTEXSS-RUQSDSJ-FWYVE4T-S25G37F-4XPT7GI-VLBSHCS-SLHXBEF-QSKCQAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WNK25O3-CDDKOM6-VDJKB2C-GZ6L6GA-HI7WHXU-G2SCLLF-EEDJX6N-AYY5OQM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WTA46HO-WMIMK2N-GMNLWSQ-UOZI2O7-JVG3YY2-FWXQ3NJ-NWG5PLX-RN26AAG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XG6FGXG-CCB54FX-AVREF7W-YMR66ED-66H4QQD-KXK5K7C-DVVYKU2-GF77LQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XSWE7HJ-AHTZGEZ-UNH6ZSK-WZHRS2C-55LQZDR-7GLSE6Z-VWEDFWT-ZS4DCAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XXXMH6A-3J53GPW-7BVBR76-5UJNW3E-6H2ACDZ-DY46Z6T-FD7GRPE-RGIQFAE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X2J2EIR-6MCQUSZ-DZU37TI-Y5S4QUN-JPTDKHT-ANBGXU5-YRD3EGF-6VOEHQG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X3WHUR4-N5BSQ4T-YFC3A3S-KI6KXQ2-6GL66YV-G2YGVKY-LEKVIXY-M62C6A7" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X4EX7JM-CQSNCZ5-H5UNVUW-RNWT4U7-VPFKPB4-P65IWZC-N74SCAM-RLLFUAT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YEKECF3-5YAC2EZ-ADYLARL-THNGWI3-ZFU6UNK-3KCFZWB-IRYFH6X-LO5ZHQC" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YP66HGG-CIWZ6N2-A3T4PXT-KP47YWK-3ZVW5UX-MGTZRHL-YNB22IK-IEEWNAY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YRG374E-AB2PK5W-UKR2MOA-E43VSQF-ICO72FN-FDZDX7F-Q75UE2H-EUCIFQJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ZCZPFDT-GHJKTPJ-22WKCWP-2UO7X33-NUD2YAK-P6MDHN6-2JVAETG-JPXXLAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="2MDVGTI-EGVOHIO-SPFMLV6-NRFP2EC-HWMLWVO-QVLLTC3-DSQPHMY-JDEIQQ5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="22UBH4D-HGCP3EW-EHVJTZ4-6PB2HOB-LDPOC3X-ONY6PA6-2DFNBIX-D3X5UQT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="3OQ6R42-O76KQJM-JYN7EGL-PWKZUPG-276FWG2-CP3CTV3-SM545L3-KTTH3AG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="3QW353X-J52M5LW-3DUY2Z5-SG5JU7Z-6NSJFAM-74YPGMA-VHL2W3N-5PX2JQO" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</device>
|
||||
<device id="4IB56JJ-IXYAHRI-6W75NTN-TEIMPRE-25NAG6F-MYGBJBG-JB33DXY-L2WINQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="4JSTGIZ-Q2UJXIG-V6QT62A-FSRBKLF-J6FWGIY-PFIAFC3-7GP7DOY-RVNGKQU" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5BKVB65-I2E5UCX-DWWPLNP-IQ7VRZY-I3P42KF-ZKAF7XO-TQTCLVU-TH45GA3" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5OOL3EO-A5DTJ5N-CZHPYVP-YH74NL4-DE3O7AU-ZDFSM6D-KG5IUWA-WJJXBQL" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5RXIYVZ-NOEGGYD-TY4WQTH-U6FQ5IR-LJTW3P3-HFQBN6C-RC4AB5V-MO44PQV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5WGZMYP-FNTWGLG-PQKEB5V-PJ4LEPL-53SRHBR-HHSHHLD-5KFL3P2-W6MTRQY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="6I5KLVE-VVRXZ55-Z3IQIZH-3FBHKTZ-6MIPAYO-V4QX6K7-7J432VD-T5TY6AU" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="6R2BAIE-VRYZJXA-ZKJT4ZK-BQTXA46-VBWMLNY-PINTWYG-TS4Y6GZ-7Z2KVQG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="7BT3IUI-RYS757D-YJPBP6U-WPL2ZI6-CMICBLW-UDYDSVT-NG62TB5-PM2O3A6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="7TVCUYX-MLZH6GF-GDLVMJ6-REPXXUD-DCLPXP2-67HS7VR-QTTABOA-4ZXJOQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8082</address>
|
||||
<apikey>abc123</apikey>
|
||||
|
||||
@@ -51,7 +51,10 @@ func TestStressHTTP(t *testing.T) {
|
||||
|
||||
tc := &tls.Config{InsecureSkipVerify: true}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: tc,
|
||||
TLSClientConfig: tc,
|
||||
DisableKeepAlives: true,
|
||||
ResponseHeaderTimeout: time.Second,
|
||||
TLSHandshakeTimeout: time.Second,
|
||||
}
|
||||
client := &http.Client{
|
||||
Transport: tr,
|
||||
|
||||
125
test/manypeers_test.go
Normal file
125
test/manypeers_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build integration
|
||||
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
)
|
||||
|
||||
func TestManyPeers(t *testing.T) {
|
||||
log.Println("Cleaning...")
|
||||
err := removeAll("s1", "s2", "h1/index", "h2/index")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Generating files...")
|
||||
err = generateFiles("s1", 200, 20, "../bin/syncthing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
receiver := syncthingProcess{ // id2
|
||||
log: "2.out",
|
||||
argv: []string{"-home", "h2"},
|
||||
port: 8082,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err = receiver.start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer receiver.stop()
|
||||
|
||||
resp, err := receiver.get("/rest/config")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("Code %d != 200", resp.StatusCode)
|
||||
}
|
||||
|
||||
var cfg config.Configuration
|
||||
json.NewDecoder(resp.Body).Decode(&cfg)
|
||||
resp.Body.Close()
|
||||
|
||||
for len(cfg.Devices) < 100 {
|
||||
bs := make([]byte, 16)
|
||||
ReadRand(bs)
|
||||
id := protocol.NewDeviceID(bs)
|
||||
cfg.Devices = append(cfg.Devices, config.DeviceConfiguration{DeviceID: id})
|
||||
cfg.Folders[0].Devices = append(cfg.Folders[0].Devices, config.FolderDeviceConfiguration{DeviceID: id})
|
||||
}
|
||||
|
||||
osutil.Rename("h2/config.xml", "h2/config.xml.orig")
|
||||
defer osutil.Rename("h2/config.xml.orig", "h2/config.xml")
|
||||
|
||||
var buf bytes.Buffer
|
||||
json.NewEncoder(&buf).Encode(cfg)
|
||||
resp, err = receiver.post("/rest/config", &buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("Code %d != 200", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
log.Println("Starting up...")
|
||||
sender := syncthingProcess{ // id1
|
||||
log: "1.out",
|
||||
argv: []string{"-home", "h1"},
|
||||
port: 8081,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err = sender.start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer sender.stop()
|
||||
|
||||
for {
|
||||
comp, err := sender.peerCompletion()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
if comp[id2] == 100 {
|
||||
return
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
log.Println("Comparing directories...")
|
||||
err = compareDirectories("s1", "s2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user