Compare commits
97 Commits
v0.11.0-be
...
v0.11.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
15b87ae297 | ||
|
|
02fdf59839 | ||
|
|
d9da02b7a8 | ||
|
|
8f2ad6418d | ||
|
|
ff984425a3 | ||
|
|
ac1058359f | ||
|
|
9afbca3001 | ||
|
|
ec3f17cb9c | ||
|
|
73b9d5c5f9 | ||
|
|
ecc8591c95 | ||
|
|
696b67e4b1 | ||
|
|
266a5116a1 | ||
|
|
131f2be857 | ||
|
|
be7b3a9952 | ||
|
|
bb31b1785b | ||
|
|
2a60f4b1e9 | ||
|
|
33a4fb5a1a | ||
|
|
aece6e8b6c | ||
|
|
7bf55dd14f | ||
|
|
e158f17c2b | ||
|
|
c5027d9478 | ||
|
|
36c1d82146 | ||
|
|
bd4f404d45 | ||
|
|
43d39844f7 | ||
|
|
e041a4d212 | ||
|
|
433b923ea7 | ||
|
|
f8f1c72b44 | ||
|
|
542716e216 | ||
|
|
b35958d024 | ||
|
|
9ee3541655 | ||
|
|
bf7d84c12a | ||
|
|
34c691087e | ||
|
|
08c383012f | ||
|
|
e2420495f3 | ||
|
|
d530c5eda7 | ||
|
|
ef7420ecf6 | ||
|
|
c905a41e2a | ||
|
|
42ff4b5bf0 | ||
|
|
4fb74a32cc | ||
|
|
c741465328 | ||
|
|
fbca537a40 | ||
|
|
83420b0199 | ||
|
|
33d3ba1b45 | ||
|
|
497f85a236 | ||
|
|
a624c302ab | ||
|
|
cebe21a3af | ||
|
|
9eb679d70a | ||
|
|
6d84443db8 | ||
|
|
da8a1f242c | ||
|
|
946d98b71f | ||
|
|
dff51fc707 | ||
|
|
7d954dd5d1 | ||
|
|
c6300a5da8 | ||
|
|
9359daa0d9 | ||
|
|
2322e9cff7 | ||
|
|
a876e1e348 | ||
|
|
6a863c8f71 | ||
|
|
392b006b06 | ||
|
|
96289f42b7 | ||
|
|
1b69c2441c | ||
|
|
8ca85a4918 | ||
|
|
2a31031cbc | ||
|
|
d148cd8ccc | ||
|
|
d1cc1828b8 | ||
|
|
069e8cf122 | ||
|
|
45cbcaca6d | ||
|
|
102a2db1f3 | ||
|
|
9f81c85ca7 | ||
|
|
ba4a6fc0c5 | ||
|
|
aa803ce2ff | ||
|
|
a027a60f5d | ||
|
|
270649535e | ||
|
|
cf80ba71f4 | ||
|
|
b74df18a4a | ||
|
|
5cd2906a39 | ||
|
|
bc37b69d17 | ||
|
|
94f6e400ad | ||
|
|
b95a6ccf80 | ||
|
|
7df9c1b6e4 | ||
|
|
75348c0158 | ||
|
|
75fb14acaf | ||
|
|
5350315b68 | ||
|
|
658e39c270 | ||
|
|
ef7ce6c7e1 | ||
|
|
509e2411bf | ||
|
|
65c906f951 | ||
|
|
1f159e8233 | ||
|
|
936c76119d | ||
|
|
f45865606a | ||
|
|
cfc9776bae | ||
|
|
0cb7ed9e4e | ||
|
|
4b07609458 | ||
|
|
e41e58e781 | ||
|
|
f5030f1c2c | ||
|
|
2a48fb8e87 | ||
|
|
df6dbc5fa4 | ||
|
|
4b1d2839e8 |
9
.gitattributes
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# Text files use LF line endings in this repository
|
||||
* text=auto
|
||||
|
||||
# Except the dependencies, which we leave alone
|
||||
Godeps/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
gui.files.go -diff
|
||||
*.svg -diff
|
||||
3
AUTHORS
@@ -11,16 +11,19 @@ Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brendan Long <self@brendanlong.com>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann <moter8@gmail.com>
|
||||
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Elias Jarlebring <jarlebring@gmail.com>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell <fxgsell@gmail.com>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
|
||||
4
Godeps/Godeps.json
generated
@@ -19,7 +19,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "bccf335c34c01760bdc89f98c952fcda696e27d2"
|
||||
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "6277c0595c18d42e9db75dfe900463ef093a82d2"
|
||||
"Rev": "e7db2648034fb71b051902a02bc25d4468ed492e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
@@ -67,7 +67,7 @@ func BenchmarkThisEncode(b *testing.B) {
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.encodeXDR(w)
|
||||
_, err := s.EncodeXDRInto(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func BenchmarkThisDecoder(b *testing.B) {
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.decodeXDR(r)
|
||||
err := t.DecodeXDRFrom(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
28
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
@@ -26,7 +26,9 @@ XDRBenchStruct Structure:
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
/ /
|
||||
\ uint8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@@ -69,7 +71,7 @@ struct XDRBenchStruct {
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
||||
@@ -87,11 +89,11 @@ func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
@@ -111,16 +113,16 @@ func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *XDRBenchStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
@@ -155,7 +157,7 @@ struct repeatReader {
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
||||
@@ -173,27 +175,27 @@ func (o repeatReader) MustMarshalXDR() []byte {
|
||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o repeatReader) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *repeatReader) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -143,6 +143,9 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
if _{{$fieldInfo.Name}}Size < 0 {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
@@ -32,11 +32,11 @@ type TestStruct struct {
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
|
||||
func (u *Opaque) EncodeXDRInto(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
|
||||
func (u *Opaque) DecodeXDRFrom(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
|
||||
43
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
@@ -18,17 +18,23 @@ TestStruct Structure:
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int |
|
||||
/ /
|
||||
\ int Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int8 |
|
||||
/ /
|
||||
\ int8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
/ /
|
||||
\ uint8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int16 |
|
||||
| 0x0000 | I16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int32 |
|
||||
| I32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@@ -52,7 +58,9 @@ TestStruct Structure:
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
/ /
|
||||
\ Opaque Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@@ -68,9 +76,9 @@ struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int16 I16;
|
||||
int I16;
|
||||
unsigned int UI16;
|
||||
int32 I32;
|
||||
int I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
@@ -84,7 +92,7 @@ struct TestStruct {
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
||||
@@ -102,11 +110,11 @@ func (o TestStruct) MustMarshalXDR() []byte {
|
||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
@@ -124,7 +132,7 @@ func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
_, err := o.C.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@@ -140,16 +148,16 @@ func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
@@ -161,8 +169,11 @@ func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytesMax(1024)
|
||||
o.S = xr.ReadStringMax(1024)
|
||||
(&o.C).decodeXDR(xr)
|
||||
(&o.C).DecodeXDRFrom(xr)
|
||||
_SSSize := int(xr.ReadUint32())
|
||||
if _SSSize < 0 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
if _SSSize > 1024 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
@@ -68,7 +68,8 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
if l < 0 || max > 0 && l > max {
|
||||
// l may be negative on 32 bit builds
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
27
Godeps/_workspace/src/github.com/syncthing/protocol/message_xdr.go
generated
vendored
@@ -110,12 +110,18 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
||||
func (o *IndexMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadString()
|
||||
_FilesSize := int(xr.ReadUint32())
|
||||
if _FilesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Files", _FilesSize, 0)
|
||||
}
|
||||
o.Files = make([]FileInfo, _FilesSize)
|
||||
for i := range o.Files {
|
||||
(&o.Files[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
@@ -236,6 +242,9 @@ func (o *FileInfo) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
(&o.Version).DecodeXDRFrom(xr)
|
||||
o.LocalVersion = int64(xr.ReadUint64())
|
||||
_BlocksSize := int(xr.ReadUint32())
|
||||
if _BlocksSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Blocks", _BlocksSize, 0)
|
||||
}
|
||||
o.Blocks = make([]BlockInfo, _BlocksSize)
|
||||
for i := range o.Blocks {
|
||||
(&o.Blocks[i]).DecodeXDRFrom(xr)
|
||||
@@ -442,6 +451,9 @@ func (o *RequestMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Hash = xr.ReadBytesMax(64)
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
@@ -633,11 +645,17 @@ func (o *ClusterConfigMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ClientName = xr.ReadStringMax(64)
|
||||
o.ClientVersion = xr.ReadStringMax(64)
|
||||
_FoldersSize := int(xr.ReadUint32())
|
||||
if _FoldersSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Folders", _FoldersSize, 0)
|
||||
}
|
||||
o.Folders = make([]Folder, _FoldersSize)
|
||||
for i := range o.Folders {
|
||||
(&o.Folders[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
@@ -750,12 +768,18 @@ func (o *Folder) UnmarshalXDR(bs []byte) error {
|
||||
func (o *Folder) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadStringMax(64)
|
||||
_DevicesSize := int(xr.ReadUint32())
|
||||
if _DevicesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Devices", _DevicesSize, 0)
|
||||
}
|
||||
o.Devices = make([]Device, _DevicesSize)
|
||||
for i := range o.Devices {
|
||||
(&o.Devices[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
@@ -862,6 +886,9 @@ func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.MaxLocalVersion = int64(xr.ReadUint64())
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
|
||||
10
Godeps/_workspace/src/github.com/syncthing/protocol/vector.go
generated
vendored
@@ -103,3 +103,13 @@ func (a Vector) Concurrent(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == ConcurrentGreater || comp == ConcurrentLesser
|
||||
}
|
||||
|
||||
// Counter returns the current value of the given counter ID.
|
||||
func (v Vector) Counter(id uint64) uint64 {
|
||||
for _, c := range v {
|
||||
if c.ID == id {
|
||||
return c.Value
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
14
Godeps/_workspace/src/github.com/syncthing/protocol/vector_test.go
generated
vendored
@@ -118,5 +118,17 @@ func TestMerge(t *testing.T) {
|
||||
t.Errorf("%d: %+v.Merge(%+v) == %+v (expected %+v)", i, tc.a, tc.b, m, tc.m)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCounterValue(t *testing.T) {
|
||||
v0 := Vector{Counter{42, 1}, Counter{64, 5}}
|
||||
if v0.Counter(42) != 1 {
|
||||
t.Error("Counter error, %d != %d", v0.Counter(42), 1)
|
||||
}
|
||||
if v0.Counter(64) != 5 {
|
||||
t.Error("Counter error, %d != %d", v0.Counter(64), 5)
|
||||
}
|
||||
if v0.Counter(72) != 0 {
|
||||
t.Error("Counter error, %d != %d", v0.Counter(72), 0)
|
||||
}
|
||||
}
|
||||
|
||||
1
Godeps/_workspace/src/github.com/thejerf/suture/pre-commit
generated
vendored
@@ -9,3 +9,4 @@ if [ ! -z "$GOLINTOUT" -o "$?" != 0 ]; then
|
||||
fi
|
||||
|
||||
go test
|
||||
|
||||
|
||||
3
NICKS
@@ -3,6 +3,7 @@
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
@@ -23,6 +24,7 @@ facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
gillisig <gilli@vx.is>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
jpjp <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
@@ -50,3 +52,4 @@ tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
|
||||
19
README.md
@@ -5,18 +5,17 @@ syncthing
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
|
||||
This is the `syncthing` project. The following are the project goals:
|
||||
This is the `syncthing` project which pursues the following goals:
|
||||
|
||||
1. Define a protocol for synchronization of a folder between a number of
|
||||
collaborating devices. The protocol should be well defined, unambiguous,
|
||||
collaborating devices. This protocol should be well defined, unambiguous,
|
||||
easily understood, free to use, efficient, secure and language neutral.
|
||||
This is the [Block Exchange
|
||||
This is called the [Block Exchange
|
||||
Protocol](https://github.com/syncthing/specs/blob/master/BEPv1.md).
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility. It is the hope that
|
||||
alternative, compatible implementations of the protocol will come to
|
||||
exist.
|
||||
said protocol. This is the `syncthing` utility. We hope that
|
||||
alternative, compatible implementations of the protocol will arrise.
|
||||
|
||||
The two are evolving together; the protocol is not to be considered
|
||||
stable until syncthing 1.0 is released, at which point it is locked down
|
||||
@@ -32,20 +31,20 @@ There are a few examples for keeping syncthing running in the background
|
||||
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
|
||||
|
||||
There is an IRC channel, `#syncthing` on Freenode, for talking directly
|
||||
to developers and users (when awake and present, etc.).
|
||||
to developers and users.
|
||||
|
||||
Building
|
||||
--------
|
||||
|
||||
Building Syncthing from source is easy, and there's a
|
||||
[guide](https://github.com/syncthing/syncthing/wiki/Building).
|
||||
that describes it for both Unix and Windows.
|
||||
that describes it for both Unix and Windows systems.
|
||||
|
||||
Signed Releases
|
||||
---------------
|
||||
|
||||
As of v0.10.15 and onwards, git tags and release binaries are GPG signed
|
||||
with the key D26E6ED000654A3E (see http://syncthing.net/security.html).
|
||||
with the key D26E6ED000654A3E (see https://syncthing.net/security.html).
|
||||
For release binaries, MD5 and SHA1 checksums are calculated and signed,
|
||||
available in the md5sum.txt.asc and sha1sum.txt.asc files.
|
||||
|
||||
@@ -57,4 +56,4 @@ documentation](https://github.com/syncthing/syncthing/wiki/) is on the
|
||||
Github wiki.
|
||||
|
||||
All code is licensed under the
|
||||
[MPLv2](https://github.com/syncthing/syncthing/blob/master/LICENSE).
|
||||
[MPLv2 License](https://github.com/syncthing/syncthing/blob/master/LICENSE).
|
||||
|
||||
|
Before Width: | Height: | Size: 3.7 KiB After Width: | Height: | Size: 3.6 KiB |
|
Before Width: | Height: | Size: 1.8 KiB After Width: | Height: | Size: 1.7 KiB |
|
Before Width: | Height: | Size: 3.8 KiB After Width: | Height: | Size: 3.7 KiB |
@@ -4,6 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
69
cmd/syncthing/audit.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
)
|
||||
|
||||
// The auditSvc subscribes to events and writes these in JSON format, one
|
||||
// event per line, to the specified writer.
|
||||
type auditSvc struct {
|
||||
w io.Writer // audit destination
|
||||
stop chan struct{} // signals time to stop
|
||||
started chan struct{} // signals startup complete
|
||||
stopped chan struct{} // signals stop complete
|
||||
}
|
||||
|
||||
func newAuditSvc(w io.Writer) *auditSvc {
|
||||
return &auditSvc{
|
||||
w: w,
|
||||
stop: make(chan struct{}),
|
||||
started: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Serve runs the audit service.
|
||||
func (s *auditSvc) Serve() {
|
||||
defer close(s.stopped)
|
||||
sub := events.Default.Subscribe(events.AllEvents)
|
||||
defer events.Default.Unsubscribe(sub)
|
||||
enc := json.NewEncoder(s.w)
|
||||
|
||||
// We're ready to start processing events.
|
||||
close(s.started)
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.C():
|
||||
enc.Encode(ev)
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the audit service.
|
||||
func (s *auditSvc) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
// WaitForStart returns once the audit service is ready to receive events, or
|
||||
// immediately if it's already running.
|
||||
func (s *auditSvc) WaitForStart() {
|
||||
<-s.started
|
||||
}
|
||||
|
||||
// WaitForStop returns once the audit service has stopped.
|
||||
// (Needed by the tests.)
|
||||
func (s *auditSvc) WaitForStop() {
|
||||
<-s.stopped
|
||||
}
|
||||
54
cmd/syncthing/auditsvc_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
)
|
||||
|
||||
func TestAuditService(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
svc := newAuditSvc(buf)
|
||||
|
||||
// Event sent before start, will not be logged
|
||||
events.Default.Log(events.Ping, "the first event")
|
||||
|
||||
go svc.Serve()
|
||||
svc.WaitForStart()
|
||||
|
||||
// Event that should end up in the audit log
|
||||
events.Default.Log(events.Ping, "the second event")
|
||||
|
||||
// We need to give the events time to arrive, since the channels are buffered etc.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
svc.Stop()
|
||||
svc.WaitForStop()
|
||||
|
||||
// This event should not be logged, since we have stopped.
|
||||
events.Default.Log(events.Ping, "the third event")
|
||||
|
||||
result := string(buf.Bytes())
|
||||
t.Log(result)
|
||||
|
||||
if strings.Contains(result, "first event") {
|
||||
t.Error("Unexpected first event")
|
||||
}
|
||||
|
||||
if !strings.Contains(result, "second event") {
|
||||
t.Error("Missing second event")
|
||||
}
|
||||
|
||||
if strings.Contains(result, "third event") {
|
||||
t.Error("Missing third event")
|
||||
}
|
||||
}
|
||||
@@ -15,23 +15,84 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
|
||||
var conns = make(chan *tls.Conn)
|
||||
// The connection service listens on TLS and dials configured unconnected
|
||||
// devices. Successfull connections are handed to the model.
|
||||
type connectionSvc struct {
|
||||
*suture.Supervisor
|
||||
cfg *config.Wrapper
|
||||
myID protocol.DeviceID
|
||||
model *model.Model
|
||||
tlsCfg *tls.Config
|
||||
conns chan *tls.Conn
|
||||
}
|
||||
|
||||
// Listen
|
||||
for _, addr := range cfg.Options().ListenAddress {
|
||||
go listenTLS(conns, addr, tlsCfg)
|
||||
func newConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, model *model.Model, tlsCfg *tls.Config) *connectionSvc {
|
||||
svc := &connectionSvc{
|
||||
Supervisor: suture.NewSimple("connectionSvc"),
|
||||
cfg: cfg,
|
||||
myID: myID,
|
||||
model: model,
|
||||
tlsCfg: tlsCfg,
|
||||
conns: make(chan *tls.Conn),
|
||||
}
|
||||
|
||||
// Connect
|
||||
go dialTLS(m, conns, tlsCfg)
|
||||
// There are several moving parts here; one routine per listening address
|
||||
// to handle incoming connections, one routine to periodically attempt
|
||||
// outgoing connections, and lastly one routine to the the common handling
|
||||
// regardless of whether the connection was incoming or outgoing. It ends
|
||||
// up as in the diagram below. We embed a Supervisor to manage the
|
||||
// routines (i.e. log and restart if they crash or exit, etc).
|
||||
//
|
||||
// +-----------------+
|
||||
// Incoming | +---------------+-+ +-----------------+
|
||||
// Connections | | | | | Outgoing
|
||||
// -------------->| | svc.listen | | | Connections
|
||||
// | | (1 per listen | | svc.connect |-------------->
|
||||
// | | address) | | |
|
||||
// +-+ | | |
|
||||
// +-----------------+ +-----------------+
|
||||
// v v
|
||||
// | |
|
||||
// | |
|
||||
// +------------+-----------+
|
||||
// |
|
||||
// | svc.conns
|
||||
// v
|
||||
// +-----------------+
|
||||
// | |
|
||||
// | |
|
||||
// | svc.handle |------> model.AddConnection()
|
||||
// | |
|
||||
// | |
|
||||
// +-----------------+
|
||||
//
|
||||
// TODO: Clean shutdown, and/or handling config changes on the fly. We
|
||||
// partly do this now - new devices and addresses will be picked up, but
|
||||
// not new listen addresses and we don't support disconnecting devices
|
||||
// that are removed and so on...
|
||||
|
||||
svc.Add(serviceFunc(svc.connect))
|
||||
for _, addr := range svc.cfg.Options().ListenAddress {
|
||||
addr := addr
|
||||
listener := serviceFunc(func() {
|
||||
svc.listen(addr)
|
||||
})
|
||||
svc.Add(listener)
|
||||
}
|
||||
svc.Add(serviceFunc(svc.handle))
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func (s *connectionSvc) handle() {
|
||||
next:
|
||||
for conn := range conns {
|
||||
for conn := range s.conns {
|
||||
cs := conn.ConnectionState()
|
||||
|
||||
// We should have negotiated the next level protocol "bep/1.0" as part
|
||||
@@ -69,13 +130,13 @@ next:
|
||||
// this one. But in case we are two devices connecting to each other
|
||||
// in parallell we don't want to do that or we end up with no
|
||||
// connections still established...
|
||||
if m.ConnectedTo(remoteID) {
|
||||
if s.model.ConnectedTo(remoteID) {
|
||||
l.Infof("Connected to already connected device (%s)", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
for deviceID, deviceCfg := range s.cfg.Devices() {
|
||||
if deviceID == remoteID {
|
||||
// Verify the name on the certificate. By default we set it to
|
||||
// "syncthing" when generating, but the user may have replaced
|
||||
@@ -97,7 +158,7 @@ next:
|
||||
// If rate limiting is set, and based on the address we should
|
||||
// limit the connection, then we wrap it in a limiter.
|
||||
|
||||
limit := shouldLimit(conn.RemoteAddr())
|
||||
limit := s.shouldLimit(conn.RemoteAddr())
|
||||
|
||||
wr := io.Writer(conn)
|
||||
if limit && writeRateLimit != nil {
|
||||
@@ -110,7 +171,7 @@ next:
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
if debugNet {
|
||||
@@ -121,12 +182,12 @@ next:
|
||||
"addr": conn.RemoteAddr().String(),
|
||||
})
|
||||
|
||||
m.AddConnection(conn, protoConn)
|
||||
s.model.AddConnection(conn, protoConn)
|
||||
continue next
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg.IgnoredDevice(remoteID) {
|
||||
if !s.cfg.IgnoredDevice(remoteID) {
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
@@ -140,7 +201,7 @@ next:
|
||||
}
|
||||
}
|
||||
|
||||
func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
||||
func (s *connectionSvc) listen(addr string) {
|
||||
if debugNet {
|
||||
l.Debugln("listening on", addr)
|
||||
}
|
||||
@@ -166,9 +227,9 @@ func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
||||
}
|
||||
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
setTCPOptions(tcpConn)
|
||||
s.setTCPOptions(tcpConn)
|
||||
|
||||
tc := tls.Server(conn, tlsCfg)
|
||||
tc := tls.Server(conn, s.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
@@ -176,21 +237,20 @@ func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
s.conns <- tc
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||
func (s *connectionSvc) connect() {
|
||||
delay := time.Second
|
||||
for {
|
||||
nextDevice:
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
for deviceID, deviceCfg := range s.cfg.Devices() {
|
||||
if deviceID == myID {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(deviceID) {
|
||||
if s.model.ConnectedTo(deviceID) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -238,9 +298,9 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||
continue
|
||||
}
|
||||
|
||||
setTCPOptions(conn)
|
||||
s.setTCPOptions(conn)
|
||||
|
||||
tc := tls.Client(conn, tlsCfg)
|
||||
tc := tls.Client(conn, s.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
@@ -248,20 +308,20 @@ func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
s.conns <- tc
|
||||
continue nextDevice
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if maxD := time.Duration(cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
if maxD := time.Duration(s.cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
delay = maxD
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setTCPOptions(conn *net.TCPConn) {
|
||||
func (*connectionSvc) setTCPOptions(conn *net.TCPConn) {
|
||||
var err error
|
||||
if err = conn.SetLinger(0); err != nil {
|
||||
l.Infoln(err)
|
||||
@@ -277,8 +337,8 @@ func setTCPOptions(conn *net.TCPConn) {
|
||||
}
|
||||
}
|
||||
|
||||
func shouldLimit(addr net.Addr) bool {
|
||||
if cfg.Options().LimitBandwidthInLan {
|
||||
func (s *connectionSvc) shouldLimit(addr net.Addr) bool {
|
||||
if s.cfg.Options().LimitBandwidthInLan {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/syncthing/syncthing/internal/upgrade"
|
||||
"github.com/vitrun/qart/qr"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
@@ -45,27 +45,25 @@ type guiError struct {
|
||||
}
|
||||
|
||||
var (
|
||||
configInSync = true
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut sync.Mutex
|
||||
startTime = time.Now()
|
||||
configInSync = true
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut sync.Mutex = sync.NewMutex()
|
||||
startTime = time.Now()
|
||||
eventSub *events.BufferedSubscription
|
||||
)
|
||||
|
||||
var (
|
||||
lastEventRequest time.Time
|
||||
lastEventRequestMut sync.Mutex
|
||||
lastEventRequestMut sync.Mutex = sync.NewMutex()
|
||||
)
|
||||
|
||||
func init() {
|
||||
l.AddHandler(logger.LevelWarn, showGuiError)
|
||||
sub := events.Default.Subscribe(events.AllEvents)
|
||||
eventSub = events.NewBufferedSubscription(sub, 1000)
|
||||
}
|
||||
|
||||
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
|
||||
var err error
|
||||
|
||||
l.AddHandler(logger.LevelWarn, showGuiError)
|
||||
sub := events.Default.Subscribe(events.AllEvents)
|
||||
eventSub = events.NewBufferedSubscription(sub, 1000)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(locations[locHTTPSCertFile], locations[locHTTPSKeyFile])
|
||||
if err != nil {
|
||||
l.Infoln("Loading HTTPS certificate:", err)
|
||||
@@ -111,9 +109,9 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
// The GET handlers
|
||||
getRestMux := http.NewServeMux()
|
||||
getRestMux.HandleFunc("/rest/db/completion", withModel(m, restGetDBCompletion)) // device folder
|
||||
getRestMux.HandleFunc("/rest/db/file", withModel(m, restGetDBFile)) // folder file [blocks]
|
||||
getRestMux.HandleFunc("/rest/db/file", withModel(m, restGetDBFile)) // folder file
|
||||
getRestMux.HandleFunc("/rest/db/ignores", withModel(m, restGetDBIgnores)) // folder
|
||||
getRestMux.HandleFunc("/rest/db/need", withModel(m, restGetDBNeed)) // folder
|
||||
getRestMux.HandleFunc("/rest/db/need", withModel(m, restGetDBNeed)) // folder [perpage] [page]
|
||||
getRestMux.HandleFunc("/rest/db/status", withModel(m, restGetDBStatus)) // folder
|
||||
getRestMux.HandleFunc("/rest/db/browse", withModel(m, restGetDBBrowse)) // folder [prefix] [dirsonly] [levels]
|
||||
getRestMux.HandleFunc("/rest/events", restGetEvents) // since [limit]
|
||||
@@ -135,7 +133,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
|
||||
// The POST handlers
|
||||
postRestMux := http.NewServeMux()
|
||||
postRestMux.HandleFunc("/rest/db/prio", withModel(m, restPostDBPrio)) // folder file
|
||||
postRestMux.HandleFunc("/rest/db/prio", withModel(m, restPostDBPrio)) // folder file [perpage] [page]
|
||||
postRestMux.HandleFunc("/rest/db/ignores", withModel(m, restPostDBIgnores)) // folder
|
||||
postRestMux.HandleFunc("/rest/db/override", withModel(m, restPostDBOverride)) // folder
|
||||
postRestMux.HandleFunc("/rest/db/scan", withModel(m, restPostDBScan)) // folder [sub...]
|
||||
@@ -354,7 +352,12 @@ func folderSummary(m *model.Model, folder string) map[string]interface{} {
|
||||
|
||||
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
||||
|
||||
res["state"], res["stateChanged"] = m.State(folder)
|
||||
var err error
|
||||
res["state"], res["stateChanged"], err = m.State(folder)
|
||||
if err != nil {
|
||||
res["error"] = err.Error()
|
||||
}
|
||||
|
||||
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
|
||||
|
||||
ignorePatterns, _, _ := m.GetIgnores(folder)
|
||||
@@ -376,15 +379,29 @@ func restPostDBOverride(m *model.Model, w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
func restGetDBNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var folder = qs.Get("folder")
|
||||
qs := r.URL.Query()
|
||||
|
||||
folder := qs.Get("folder")
|
||||
|
||||
page, err := strconv.Atoi(qs.Get("page"))
|
||||
if err != nil || page < 1 {
|
||||
page = 1
|
||||
}
|
||||
perpage, err := strconv.Atoi(qs.Get("perpage"))
|
||||
if err != nil || perpage < 1 {
|
||||
perpage = 1 << 16
|
||||
}
|
||||
|
||||
progress, queued, rest, total := m.NeedFolderFiles(folder, page, perpage)
|
||||
|
||||
progress, queued, rest := m.NeedFolderFiles(folder, 100)
|
||||
// Convert the struct to a more loose structure, and inject the size.
|
||||
output := map[string][]map[string]interface{}{
|
||||
output := map[string]interface{}{
|
||||
"progress": toNeedSlice(progress),
|
||||
"queued": toNeedSlice(queued),
|
||||
"rest": toNeedSlice(rest),
|
||||
"total": total,
|
||||
"page": page,
|
||||
"perpage": perpage,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
@@ -413,19 +430,13 @@ func restGetDBFile(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
file := qs.Get("file")
|
||||
withBlocks := qs.Get("blocks") != ""
|
||||
gf, _ := m.CurrentGlobalFile(folder, file)
|
||||
lf, _ := m.CurrentFolderFile(folder, file)
|
||||
|
||||
if !withBlocks {
|
||||
gf.Blocks = nil
|
||||
lf.Blocks = nil
|
||||
}
|
||||
|
||||
av := m.Availability(folder, file)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"global": gf,
|
||||
"local": lf,
|
||||
"global": jsonFileInfo(gf),
|
||||
"local": jsonFileInfo(lf),
|
||||
"availability": av,
|
||||
})
|
||||
}
|
||||
@@ -525,7 +536,7 @@ func flushResponse(s string, w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
var cpuUsagePercent [10]float64 // The last ten seconds
|
||||
var cpuUsageLock sync.RWMutex
|
||||
var cpuUsageLock sync.RWMutex = sync.NewRWMutex()
|
||||
|
||||
func restGetSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
var m runtime.MemStats
|
||||
@@ -681,7 +692,7 @@ func restGetSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
||||
return
|
||||
}
|
||||
rel, err := upgrade.LatestGithubRelease(Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
@@ -689,7 +700,8 @@ func restGetSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
res := make(map[string]interface{})
|
||||
res["running"] = Version
|
||||
res["latest"] = rel.Tag
|
||||
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == 1
|
||||
res["newer"] = upgrade.CompareVersions(rel.Tag, Version) == upgrade.Newer
|
||||
res["majorNewer"] = upgrade.CompareVersions(rel.Tag, Version) == upgrade.MajorNewer
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
@@ -723,14 +735,14 @@ func restGetLang(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func restPostSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
rel, err := upgrade.LatestGithubRelease(Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
l.Warnln("getting latest release:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
|
||||
if upgrade.CompareVersions(rel.Tag, Version) > upgrade.Equal {
|
||||
err = upgrade.To(rel)
|
||||
if err != nil {
|
||||
l.Warnln("upgrading:", err)
|
||||
@@ -907,17 +919,49 @@ func mimeTypeForFile(file string) string {
|
||||
}
|
||||
}
|
||||
|
||||
func toNeedSlice(fs []db.FileInfoTruncated) []map[string]interface{} {
|
||||
output := make([]map[string]interface{}, len(fs))
|
||||
for i, file := range fs {
|
||||
output[i] = map[string]interface{}{
|
||||
"name": file.Name,
|
||||
"flags": file.Flags,
|
||||
"modified": file.Modified,
|
||||
"version": file.Version,
|
||||
"localVersion": file.LocalVersion,
|
||||
"size": file.Size(),
|
||||
}
|
||||
func toNeedSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo {
|
||||
res := make([]jsonDBFileInfo, len(fs))
|
||||
for i, f := range fs {
|
||||
res[i] = jsonDBFileInfo(f)
|
||||
}
|
||||
return output
|
||||
return res
|
||||
}
|
||||
|
||||
// Type wrappers for nice JSON serialization
|
||||
|
||||
type jsonFileInfo protocol.FileInfo
|
||||
|
||||
func (f jsonFileInfo) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"name": f.Name,
|
||||
"size": protocol.FileInfo(f).Size(),
|
||||
"flags": fmt.Sprintf("%#o", f.Flags),
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"localVersion": f.LocalVersion,
|
||||
"numBlocks": len(f.Blocks),
|
||||
"version": jsonVersionVector(f.Version),
|
||||
})
|
||||
}
|
||||
|
||||
type jsonDBFileInfo db.FileInfoTruncated
|
||||
|
||||
func (f jsonDBFileInfo) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"name": f.Name,
|
||||
"size": db.FileInfoTruncated(f).Size(),
|
||||
"flags": fmt.Sprintf("%#o", f.Flags),
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"localVersion": f.LocalVersion,
|
||||
"version": jsonVersionVector(f.Version),
|
||||
})
|
||||
}
|
||||
|
||||
type jsonVersionVector protocol.Vector
|
||||
|
||||
func (v jsonVersionVector) MarshalJSON() ([]byte, error) {
|
||||
res := make([]string, len(v))
|
||||
for i, c := range v {
|
||||
res[i] = fmt.Sprintf("%d:%d", c.ID, c.Value)
|
||||
}
|
||||
return json.Marshal(res)
|
||||
}
|
||||
|
||||
@@ -12,16 +12,16 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
sessions = make(map[string]bool)
|
||||
sessionsMut sync.Mutex
|
||||
sessions = make(map[string]bool)
|
||||
sessionsMut sync.Mutex = sync.NewMutex()
|
||||
)
|
||||
|
||||
func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
@@ -42,6 +42,10 @@ func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handle
|
||||
}
|
||||
}
|
||||
|
||||
if debugHTTP {
|
||||
l.Debugln("Sessionless HTTP request with authentication; this is expensive.")
|
||||
}
|
||||
|
||||
error := func() {
|
||||
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
|
||||
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
|
||||
|
||||
@@ -12,14 +12,14 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
var csrfTokens []string
|
||||
var csrfMut sync.Mutex
|
||||
var csrfMut sync.Mutex = sync.NewMutex()
|
||||
|
||||
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
|
||||
// the request with 403. For / and /index.html, set a new CSRF cookie if none
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
)
|
||||
@@ -29,6 +30,7 @@ const (
|
||||
locLogFile = "logFile"
|
||||
locCsrfTokens = "csrfTokens"
|
||||
locPanicLog = "panicLog"
|
||||
locAuditLog = "auditLog"
|
||||
locDefFolder = "defFolder"
|
||||
)
|
||||
|
||||
@@ -48,7 +50,8 @@ var locations = map[locationEnum]string{
|
||||
locDatabase: "${config}/index-v0.11.0.db",
|
||||
locLogFile: "${config}/syncthing.log", // -logfile on Windows
|
||||
locCsrfTokens: "${config}/csrftokens.txt",
|
||||
locPanicLog: "${config}/panic-20060102-150405.log", // passed through time.Format()
|
||||
locPanicLog: "${config}/panic-${timestamp}.log",
|
||||
locAuditLog: "${config}/audit-${timestamp}.log",
|
||||
locDefFolder: "${home}/Sync",
|
||||
}
|
||||
|
||||
@@ -107,3 +110,14 @@ func homeDir() string {
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
func timestampedLoc(key locationEnum) string {
|
||||
// We take the roundtrip via "${timestamp}" instead of passing the path
|
||||
// directly through time.Format() to avoid issues when the path we are
|
||||
// expanding contains numbers; otherwise for example
|
||||
// /home/user2006/.../panic-20060102-150405.log would get both instances of
|
||||
// 2006 replaced by 2015...
|
||||
tpl := locations[key]
|
||||
now := time.Now().Format("20060102-150405")
|
||||
return strings.Replace(tpl, "${timestamp}", now, -1)
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
@@ -50,6 +51,7 @@ var (
|
||||
BuildHost = "unknown"
|
||||
BuildUser = "unknown"
|
||||
IsRelease bool
|
||||
IsBeta bool
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
@@ -77,9 +79,15 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a clean release build.
|
||||
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-beta[\d\.]+)?$`)
|
||||
// Check for a clean release build. A release is something like "v0.1.2",
|
||||
// with an optional suffix of letters and dot separated numbers like
|
||||
// "-beta3.47". If there's more stuff, like a plus sign and a commit hash
|
||||
// and so on, then it's not a release. If there's a dash anywhere in
|
||||
// there, it's some kind of beta or prerelease version.
|
||||
|
||||
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z]+[\d\.]+)?$`)
|
||||
IsRelease = exp.MatchString(Version)
|
||||
IsBeta = strings.Contains(Version, "-")
|
||||
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
@@ -144,6 +152,7 @@ are mostly useful for developers. Use with care.
|
||||
- "events" (the events package)
|
||||
- "files" (the files package)
|
||||
- "http" (the main package; HTTP requests)
|
||||
- "locks" (the sync package; trace long held locks)
|
||||
- "net" (the main package; connections & network messages)
|
||||
- "model" (the model package)
|
||||
- "scanner" (the scanner package)
|
||||
@@ -187,6 +196,7 @@ var (
|
||||
noConsole bool
|
||||
generateDir string
|
||||
logFile string
|
||||
auditEnabled bool
|
||||
noRestart = os.Getenv("STNORESTART") != ""
|
||||
noUpgrade = os.Getenv("STNOUPGRADE") != ""
|
||||
guiAddress = os.Getenv("STGUIADDRESS") // legacy
|
||||
@@ -223,6 +233,7 @@ func main() {
|
||||
flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&showVersion, "version", false, "Show version")
|
||||
flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&auditEnabled, "audit", false, "Write events to audit file")
|
||||
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"]))
|
||||
flag.Parse()
|
||||
@@ -323,7 +334,7 @@ func main() {
|
||||
}
|
||||
|
||||
if doUpgrade || doUpgradeCheck {
|
||||
rel, err := upgrade.LatestGithubRelease(Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
@@ -365,7 +376,23 @@ func main() {
|
||||
}
|
||||
|
||||
func syncthingMain() {
|
||||
var err error
|
||||
// Create a main service manager. We'll add things to this as we go along.
|
||||
// We want any logging it does to go through our log system, with INFO
|
||||
// severity.
|
||||
mainSvc := suture.New("main", suture.Spec{
|
||||
Log: func(line string) {
|
||||
l.Infoln(line)
|
||||
},
|
||||
})
|
||||
mainSvc.ServeBackground()
|
||||
|
||||
// Set a log prefix similar to the ID we will have later on, or early log
|
||||
// lines look ugly.
|
||||
l.SetPrefix("[start] ")
|
||||
|
||||
if auditEnabled {
|
||||
startAuditing(mainSvc)
|
||||
}
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -374,7 +401,7 @@ func syncthingMain() {
|
||||
events.Default.Log(events.Starting, map[string]string{"home": baseDirs["config"]})
|
||||
|
||||
// Ensure that that we have a certificate and key.
|
||||
cert, err = tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
if err != nil {
|
||||
cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
|
||||
if err != nil {
|
||||
@@ -433,6 +460,10 @@ func syncthingMain() {
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
if err := checkShortIDs(cfg); err != nil {
|
||||
l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one if the following:\n ", err)
|
||||
}
|
||||
|
||||
if len(profiler) > 0 {
|
||||
go func() {
|
||||
l.Debugln("Starting profiler on", profiler)
|
||||
@@ -510,6 +541,15 @@ func syncthingMain() {
|
||||
|
||||
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
|
||||
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
if err == nil {
|
||||
m.StartDeadlockDetector(time.Duration(it) * time.Second)
|
||||
}
|
||||
} else if !IsRelease || IsBeta {
|
||||
m.StartDeadlockDetector(20 * 60 * time.Second)
|
||||
}
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(cfg, m)
|
||||
@@ -544,7 +584,9 @@ func syncthingMain() {
|
||||
|
||||
// Routine to connect out to configured devices
|
||||
discoverer = discovery(externalPort)
|
||||
go listenConnect(myID, m, tlsCfg)
|
||||
|
||||
connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
|
||||
mainSvc.Add(connectionSvc)
|
||||
|
||||
for _, folder := range cfg.Folders() {
|
||||
// Routine to pull blocks from other devices to synchronize the local
|
||||
@@ -618,10 +660,29 @@ func syncthingMain() {
|
||||
|
||||
code := <-stop
|
||||
|
||||
mainSvc.Stop()
|
||||
|
||||
l.Okln("Exiting")
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func startAuditing(mainSvc *suture.Supervisor) {
|
||||
auditFile := timestampedLoc(locAuditLog)
|
||||
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
l.Fatalln("Audit:", err)
|
||||
}
|
||||
|
||||
auditSvc := newAuditSvc(fd)
|
||||
mainSvc.Add(auditSvc)
|
||||
|
||||
// We wait for the audit service to fully start before we return, to
|
||||
// ensure we capture all events from the start.
|
||||
auditSvc.WaitForStart()
|
||||
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
}
|
||||
|
||||
func setupGUI(cfg *config.Wrapper, m *model.Model) {
|
||||
opts := cfg.Options()
|
||||
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
|
||||
@@ -712,7 +773,7 @@ func setupUPnP() {
|
||||
} else {
|
||||
// Set up incoming port forwarding, if necessary and possible
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
igds := upnp.Discover()
|
||||
igds := upnp.Discover(time.Duration(cfg.Options().UPnPTimeoutS) * time.Second)
|
||||
if len(igds) > 0 {
|
||||
// Configure the first discovered IGD only. This is a work-around until we have a better mechanism
|
||||
// for handling multiple IGDs, which will require changes to the global discovery service
|
||||
@@ -724,7 +785,7 @@ func setupUPnP() {
|
||||
} else {
|
||||
l.Infof("Created UPnP port mapping for external port %d on UPnP device %s.", externalPort, igd.FriendlyIdentifier())
|
||||
|
||||
if opts.UPnPRenewal > 0 {
|
||||
if opts.UPnPRenewalM > 0 {
|
||||
go renewUPnP(port)
|
||||
}
|
||||
}
|
||||
@@ -742,7 +803,7 @@ func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + predictableRandom.Intn(65535-1024)
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, fmt.Sprintf("syncthing-%d", r), cfg.Options().UPnPLease*60)
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, fmt.Sprintf("syncthing-%d", r), cfg.Options().UPnPLeaseM*60)
|
||||
if err == nil {
|
||||
return r
|
||||
}
|
||||
@@ -753,14 +814,16 @@ func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||
func renewUPnP(port int) {
|
||||
for {
|
||||
opts := cfg.Options()
|
||||
time.Sleep(time.Duration(opts.UPnPRenewal) * time.Minute)
|
||||
time.Sleep(time.Duration(opts.UPnPRenewalM) * time.Minute)
|
||||
// Some values might have changed while we were sleeping
|
||||
opts = cfg.Options()
|
||||
|
||||
// Make sure our IGD reference isn't nil
|
||||
if igd == nil {
|
||||
if debugNet {
|
||||
l.Debugln("Undefined IGD during UPnP port renewal. Re-discovering...")
|
||||
}
|
||||
igds := upnp.Discover()
|
||||
igds := upnp.Discover(time.Duration(opts.UPnPTimeoutS) * time.Second)
|
||||
if len(igds) > 0 {
|
||||
// Configure the first discovered IGD only. This is a work-around until we have a better mechanism
|
||||
// for handling multiple IGDs, which will require changes to the global discovery service
|
||||
@@ -775,7 +838,7 @@ func renewUPnP(port int) {
|
||||
|
||||
// Just renew the same port that we already have
|
||||
if externalPort != 0 {
|
||||
err := igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", opts.UPnPLease*60)
|
||||
err := igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", opts.UPnPLeaseM*60)
|
||||
if err != nil {
|
||||
l.Warnf("Error renewing UPnP port mapping for external port %d on device %s: %s", externalPort, igd.FriendlyIdentifier(), err.Error())
|
||||
} else if debugNet {
|
||||
@@ -950,7 +1013,7 @@ func autoUpgrade() {
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestGithubRelease(Version)
|
||||
rel, err := upgrade.LatestRelease(Version)
|
||||
if err == upgrade.ErrUpgradeUnsupported {
|
||||
events.Default.Unsubscribe(sub)
|
||||
return
|
||||
@@ -989,6 +1052,7 @@ func autoUpgrade() {
|
||||
func cleanConfigDirectory() {
|
||||
patterns := map[string]time.Duration{
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
@@ -1004,7 +1068,7 @@ func cleanConfigDirectory() {
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
info, err := os.Lstat(file)
|
||||
info, err := osutil.Lstat(file)
|
||||
if err != nil {
|
||||
l.Infoln("Cleaning:", err)
|
||||
continue
|
||||
@@ -1020,3 +1084,18 @@ func cleanConfigDirectory() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortIDs verifies that the configuration won't result in duplicate
|
||||
// short ID:s; that is, that the devices in the cluster all have unique
|
||||
// initial 64 bits.
|
||||
func checkShortIDs(cfg *config.Wrapper) error {
|
||||
exists := make(map[uint64]protocol.DeviceID)
|
||||
for deviceID := range cfg.Devices() {
|
||||
shortID := deviceID.Short()
|
||||
if otherID, ok := exists[shortID]; ok {
|
||||
return fmt.Errorf("%v in conflict with %v", deviceID, otherID)
|
||||
}
|
||||
exists[shortID] = deviceID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,10 @@ import (
|
||||
)
|
||||
|
||||
func TestFolderErrors(t *testing.T) {
|
||||
// This test intentionally avoids starting the folders. If they are
|
||||
// started, they will perform an initial scan, which will create missing
|
||||
// folder markers and race with the stuff we do in the test.
|
||||
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "folder",
|
||||
RawPath: "testdata/testfolder",
|
||||
@@ -29,10 +33,8 @@ func TestFolderErrors(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, file := range []string{".stfolder", "testfolder/.stfolder", "testfolder"} {
|
||||
os.Remove("testdata/" + file)
|
||||
_, err := os.Stat("testdata/" + file)
|
||||
if err == nil {
|
||||
t.Error("Found unexpected file")
|
||||
if err := os.Remove("testdata/" + file); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,8 +59,12 @@ func TestFolderErrors(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
os.Remove("testdata/testfolder/.stfolder")
|
||||
os.Remove("testdata/testfolder/")
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder/"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 2 - new folder, marker created
|
||||
|
||||
@@ -79,7 +85,9 @@ func TestFolderErrors(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
os.Remove("testdata/.stfolder")
|
||||
if err := os.Remove("testdata/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 3 - Folder marker missing
|
||||
|
||||
@@ -91,7 +99,7 @@ func TestFolderErrors(t *testing.T) {
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "Folder marker missing" {
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
@@ -107,8 +115,12 @@ func TestFolderErrors(t *testing.T) {
|
||||
|
||||
// Case 4 - Folder path missing
|
||||
|
||||
os.Remove("testdata/testfolder/.stfolder")
|
||||
os.Remove("testdata/testfolder/")
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fcfg.RawPath = "testdata/testfolder"
|
||||
cfg = config.Wrap("testdata/subfolder", config.Configuration{
|
||||
@@ -118,15 +130,17 @@ func TestFolderErrors(t *testing.T) {
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "Folder path missing" {
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder path missing" {
|
||||
t.Error("Incorrect error: Folder path missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 4.1 - recover after folder path missing
|
||||
|
||||
os.Mkdir("testdata/testfolder", 0700)
|
||||
if err := os.Mkdir("testdata/testfolder", 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "Folder marker missing" {
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
@@ -140,3 +154,27 @@ func TestFolderErrors(t *testing.T) {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortIDCheck(t *testing.T) {
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
Devices: []config.DeviceConfiguration{
|
||||
{DeviceID: protocol.DeviceID{8, 16, 24, 32, 40, 48, 56, 0, 0}},
|
||||
{DeviceID: protocol.DeviceID{8, 16, 24, 32, 40, 48, 56, 1, 1}}, // first 56 bits same, differ in the first 64 bits
|
||||
},
|
||||
})
|
||||
|
||||
if err := checkShortIDs(cfg); err != nil {
|
||||
t.Error("Unexpected error:", err)
|
||||
}
|
||||
|
||||
cfg = config.Wrap("/tmp/test", config.Configuration{
|
||||
Devices: []config.DeviceConfiguration{
|
||||
{DeviceID: protocol.DeviceID{8, 16, 24, 32, 40, 48, 56, 64, 0}},
|
||||
{DeviceID: protocol.DeviceID{8, 16, 24, 32, 40, 48, 56, 64, 1}}, // first 64 bits same
|
||||
},
|
||||
})
|
||||
|
||||
if err := checkShortIDs(cfg); err == nil {
|
||||
t.Error("Should have gotten an error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,17 +14,17 @@ import (
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
var (
|
||||
stdoutFirstLines []string // The first 10 lines of stdout
|
||||
stdoutLastLines []string // The last 50 lines of stdout
|
||||
stdoutMut sync.Mutex
|
||||
stdoutFirstLines []string // The first 10 lines of stdout
|
||||
stdoutLastLines []string // The last 50 lines of stdout
|
||||
stdoutMut sync.Mutex = sync.NewMutex()
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -163,7 +163,7 @@ func copyStderr(stderr io.ReadCloser, dst io.Writer) {
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(time.Now().Format(locations[locPanicLog]))
|
||||
panicFd, err = os.Create(timestampedLoc(locPanicLog))
|
||||
if err != nil {
|
||||
l.Warnln("Create panic log:", err)
|
||||
continue
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
@@ -37,6 +37,7 @@ func (c *folderSummarySvc) Serve() {
|
||||
c.stop = make(chan struct{})
|
||||
c.folders = make(map[string]struct{})
|
||||
c.srv = srv
|
||||
c.foldersMut = sync.NewMutex()
|
||||
|
||||
srv.Serve()
|
||||
}
|
||||
@@ -66,21 +67,25 @@ func (c *folderSummarySvc) listenForUpdates() {
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
|
||||
if ev.Type == events.StateChanged && data["to"].(string) == "idle" && data["from"].(string) == "syncing" {
|
||||
// The folder changed to idle from syncing. We should do an
|
||||
// immediate refresh to update the GUI. The send to
|
||||
// c.immediate must be nonblocking so that we can continue
|
||||
// handling events.
|
||||
switch ev.Type {
|
||||
case events.StateChanged:
|
||||
if data["to"].(string) == "idle" && data["from"].(string) == "syncing" {
|
||||
// The folder changed to idle from syncing. We should do an
|
||||
// immediate refresh to update the GUI. The send to
|
||||
// c.immediate must be nonblocking so that we can continue
|
||||
// handling events.
|
||||
|
||||
select {
|
||||
case c.immediate <- folder:
|
||||
c.foldersMut.Lock()
|
||||
delete(c.folders, folder)
|
||||
c.foldersMut.Unlock()
|
||||
select {
|
||||
case c.immediate <- folder:
|
||||
c.foldersMut.Lock()
|
||||
delete(c.folders, folder)
|
||||
c.foldersMut.Unlock()
|
||||
|
||||
default:
|
||||
default:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
default:
|
||||
// This folder needs to be refreshed whenever we do the next
|
||||
// refresh.
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 3.7 KiB After Width: | Height: | Size: 3.6 KiB |
@@ -9,7 +9,7 @@
|
||||
"Addresses": "Adresy",
|
||||
"All Data": "Všechna data",
|
||||
"Allow Anonymous Usage Reporting?": "Povolit anonymní hlášení o používání?",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "An external command handles the versioning. It has to remove the file from the synced folder.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Verzování obstarává externí příkaz. Musí odstranit soubor ze sdíleného adresáře.",
|
||||
"Anonymous Usage Reporting": "Anonymní hlášení o používání",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Jakékoliv přístroje nakonfigurované na zavaděči budou přidány také na tento přístroj.",
|
||||
"Automatic upgrades": "Automatický upgrade",
|
||||
@@ -23,7 +23,7 @@
|
||||
"Connection Error": "Chyba připojení",
|
||||
"Copied from elsewhere": "Zkopírováno odjinud",
|
||||
"Copied from original": "Zkopírováno z originálu",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 následující přispěvatelé:",
|
||||
"Delete": "Smazat",
|
||||
"Device ID": "ID přístroje",
|
||||
"Device Identification": "Identifikace přístroje",
|
||||
@@ -45,8 +45,8 @@
|
||||
"Error": "Chyba",
|
||||
"External File Versioning": "Externí verzování souborů",
|
||||
"File Versioning": "Verze souborů",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Bity označující práva souborů jsou při hledání změn ignorovány. Použít pro souborové systémy FAT.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Po nahrazení nebo smazání aplikací Syncthing jsou soubory přesunuty do verzí označených daty v adresáři .stversions.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Soubory jsou chráněny před změnami na ostatních přístrojích, ale změny provedené z tohoto přístroje budou rozeslány na zbytek clusteru.",
|
||||
"Folder ID": "ID adresáře",
|
||||
"Folder Master": "Master adresář",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Syncthing is restarting.": "Syncthing se restartuje.",
|
||||
"Syncthing is upgrading.": "Syncthing se aktualizuje.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing se zdá být nefunkční, nebo je problém s připojením k Internetu. Opakuji...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing má nejspíše problém s provedením vašeho požadavku. Pokud problém přetrvává, obnovte stránku v prohlížeči nebo restartujte Syncthing.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Souhrnné statistiky jsou veřejně dostupné na {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Konfigurace byla uložena, ale není aktivována. Pro aktivaci nové konfigurace je třeba restartovat Syncthing.",
|
||||
"The device ID cannot be blank.": "ID přístroje nemůže být prázdné.",
|
||||
|
||||
@@ -9,21 +9,21 @@
|
||||
"Addresses": "Adressen",
|
||||
"All Data": "Alle Daten",
|
||||
"Allow Anonymous Usage Reporting?": "Übertragung von anonymen Nutzungsberichten erlauben?",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "An external command handles the versioning. It has to remove the file from the synced folder.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Ordner entfernen.",
|
||||
"Anonymous Usage Reporting": "Anonymer Nutzungsbericht",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Alle Geräte, die beim Verteiler eingetragen sind, werden auch bei diesem Gerät eingetragen",
|
||||
"Automatic upgrades": "automatische Updates",
|
||||
"Bugs": "Fehler",
|
||||
"CPU Utilization": "Prozessorauslastung",
|
||||
"Changelog": "Versionsinfo",
|
||||
"Changelog": "Änderungsprotokoll",
|
||||
"Close": "Schließen",
|
||||
"Command": "Command",
|
||||
"Command": "Kommando",
|
||||
"Comment, when used at the start of a line": "Kommentar, wenn am Anfang der Zeile benutzt.",
|
||||
"Compression": "Komprimierung",
|
||||
"Connection Error": "Verbindungsfehler",
|
||||
"Copied from elsewhere": "Von woanders kopiert",
|
||||
"Copied from original": "Vom Original kopiert",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 die folgenden Unterstützer:",
|
||||
"Delete": "Löschen",
|
||||
"Device ID": "Geräte ID",
|
||||
"Device Identification": "Gerät Identifikation",
|
||||
@@ -43,10 +43,10 @@
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Trage durch ein Komma getrennte \"IP:Port\" Adressen oder \"dynamic\" ein um automatische Adresserkennung durchzuführen.",
|
||||
"Enter ignore patterns, one per line.": "Geben Sie Ignoriermuster ein, eines pro Zeile.",
|
||||
"Error": "Fehler",
|
||||
"External File Versioning": "External File Versioning",
|
||||
"External File Versioning": "Externe Dateiversionierung",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen zu verwenden.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner namens .stversions verschoben.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind vor Veränderung durch andere Geräte geschützt, auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
|
||||
"Folder ID": "Verzeichnis ID",
|
||||
"Folder Master": "Keine Veränderungen zulassen",
|
||||
@@ -132,14 +132,14 @@
|
||||
"Syncthing is restarting.": "Syncthing wird neu gestartet",
|
||||
"Syncthing is upgrading.": "Syncthing wird aktualisiert",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing scheint nicht erreichbar zu sein oder es gibt ein Problem mit Deiner Internetverbindung. Versuche erneut...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Es scheint als ob Syncthing ein Problem mit der Verarbeitung ihrer Eingabe hat. Bitte laden sie die Seite neu oder führen sie einen Neustart von Syncthing durch, falls das Problem weiterhin besteht.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Die gesammelten Statistiken sind öffentlich verfügbar unter {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Die Konfiguration wurde gespeichert, aber nicht aktiviert. Syncthing muss neugestartet werden um die neue Konfiguration zu aktivieren.",
|
||||
"The device ID cannot be blank.": "Die Geräte ID darf nicht leer sein.",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Die hier einzutragende Geräte ID kann im \"Bearbeiten > Zeige ID\"-Dialog auf dem anderen Gerät gefunden werden. Leerzeichen und Bindestriche sind optional (werden ignoriert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Der verschlüsselte Nutzungsbericht wird täglich gesendet. Er wird benutzt um Statistiken über verwendete Betriebssysteme, Verzeichnis-Größen und Programm-Versionen zu erstellen. Sollte der Bericht in Zukunft weitere Daten erfassen, wird dieses Fenster erneut angezeigt.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Die eingegebene Geräte ID scheint nicht gültig zu sein. Es sollte eine 52 oder 56 stellige Zeichenkette aus Buchstaben und Nummern sein. Leerzeichen und Bindestriche sind optional.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "The first command line parameter is the folder path and the second parameter is the relative path in the folder.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Der erste Kommandozeilenparameter ist der Verzeichnis-Pfad und der zweite Parameter ist der relative Pfad in diesem Ordner.",
|
||||
"The folder ID cannot be blank.": "Die Verzeichnis ID darf nicht leer sein.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Die Verzeichnis ID muss eine kurze Kennung (64 Zeichen oder weniger) sein. Sie kann nur aus Buchstaben, Zahlen und dem Punkt- (.), Bindestrich- (-), und Unterstrich- (_) Zeichen bestehen.",
|
||||
"The folder ID must be unique.": "Die Verzeichnis ID muss eindeutig sein.",
|
||||
@@ -149,7 +149,7 @@
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Die längste Zeit, die alte Versionen vorgehalten werden (in Tagen, 0 bedeutet, alte Versionen für immer zu behalten).",
|
||||
"The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei gespeichert werden sollen.",
|
||||
"The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
|
||||
"The path cannot be blank.": "The path cannot be blank.",
|
||||
"The path cannot be blank.": "Der Pfad darf nicht leer sein",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Das Suchintervall muss eine nicht negative Anzahl von Sekunden sein.",
|
||||
"Unknown": "Unbekannt",
|
||||
"Unshared": "Ungeteilt",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"A new major version may not be compatible with previous versions.": "A new major version may not be compatible with previous versions.",
|
||||
"API Key": "API Key",
|
||||
"About": "About",
|
||||
"Add": "Add",
|
||||
@@ -9,6 +10,7 @@
|
||||
"Addresses": "Addresses",
|
||||
"All Data": "All Data",
|
||||
"Allow Anonymous Usage Reporting?": "Allow Anonymous Usage Reporting?",
|
||||
"Alphabetic": "Alphabetic",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "An external command handles the versioning. It has to remove the file from the synced folder.",
|
||||
"Anonymous Usage Reporting": "Anonymous Usage Reporting",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Any devices configured on an introducer device will be added to this device as well.",
|
||||
@@ -44,6 +46,7 @@
|
||||
"Enter ignore patterns, one per line.": "Enter ignore patterns, one per line.",
|
||||
"Error": "Error",
|
||||
"External File Versioning": "External File Versioning",
|
||||
"File Pull Order": "File Pull Order",
|
||||
"File Versioning": "File Versioning",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
@@ -66,11 +69,13 @@
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversion of the given condition (i.e. do not exclude)",
|
||||
"Keep Versions": "Keep Versions",
|
||||
"Largest First": "Largest First",
|
||||
"Last File Received": "Last File Received",
|
||||
"Last seen": "Last seen",
|
||||
"Later": "Later",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maximum Age",
|
||||
"Metadata Only": "Metadata Only",
|
||||
"Move to top of queue": "Move to top of queue",
|
||||
@@ -78,22 +83,27 @@
|
||||
"Never": "Never",
|
||||
"New Device": "New Device",
|
||||
"New Folder": "New Folder",
|
||||
"Newest First": "Newest First",
|
||||
"No": "No",
|
||||
"No File Versioning": "No File Versioning",
|
||||
"Notice": "Notice",
|
||||
"OK": "OK",
|
||||
"Off": "Off",
|
||||
"Oldest First": "Oldest First",
|
||||
"Out Of Sync": "Out Of Sync",
|
||||
"Out of Sync Items": "Out of Sync Items",
|
||||
"Outgoing Rate Limit (KiB/s)": "Outgoing Rate Limit (KiB/s)",
|
||||
"Override Changes": "Override Changes",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Path where versions should be stored (leave empty for the default .stversions folder in the folder).",
|
||||
"Please consult the release notes before performing a major upgrade.": "Please consult the release notes before performing a major upgrade.",
|
||||
"Please wait": "Please wait",
|
||||
"Preview": "Preview",
|
||||
"Preview Usage Report": "Preview Usage Report",
|
||||
"Quick guide to supported patterns": "Quick guide to supported patterns",
|
||||
"RAM Utilization": "RAM Utilization",
|
||||
"Random": "Random",
|
||||
"Release Notes": "Release Notes",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan All": "Rescan All",
|
||||
"Rescan Interval": "Rescan Interval",
|
||||
@@ -120,6 +130,7 @@
|
||||
"Shutdown Complete": "Shutdown Complete",
|
||||
"Simple File Versioning": "Simple File Versioning",
|
||||
"Single level wildcard (matches within a directory only)": "Single level wildcard (matches within a directory only)",
|
||||
"Smallest First": "Smallest First",
|
||||
"Source Code": "Source Code",
|
||||
"Staggered File Versioning": "Staggered File Versioning",
|
||||
"Start Browser": "Start Browser",
|
||||
@@ -151,10 +162,12 @@
|
||||
"The number of versions must be a number and cannot be blank.": "The number of versions must be a number and cannot be blank.",
|
||||
"The path cannot be blank.": "The path cannot be blank.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"This is a major version upgrade.": "This is a major version upgrade.",
|
||||
"Unknown": "Unknown",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Up to Date",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Upgrade To {{version}}",
|
||||
"Upgrading": "Upgrading",
|
||||
"Upload Rate": "Upload Rate",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"Connection Error": "Errore di Connessione",
|
||||
"Copied from elsewhere": "Copiato da qualche altra parte",
|
||||
"Copied from original": "Copiato dall'originale",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 i seguenti Collaboratori:",
|
||||
"Delete": "Elimina",
|
||||
"Device ID": "ID Dispositivo",
|
||||
"Device Identification": "Identificazione Dispositivo",
|
||||
@@ -45,8 +45,8 @@
|
||||
"Error": "Errore",
|
||||
"External File Versioning": "Controllo Versione Esterno",
|
||||
"File Versioning": "Controllo Versione dei File",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Il software evita i bit dei permessi dei file durante il controllo delle modifiche. Utilizzato nei filesystem FAT.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "I file sostituiti o eliminati da Syncthing vengono datati e spostati in una cartella .stversions.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "I file sono protetti dalle modifiche effettuate negli altri dispositivi, ma le modifiche effettuate in questo dispositivo verranno inviate anche al resto del cluster.",
|
||||
"Folder ID": "ID Cartella",
|
||||
"Folder Master": "Cartella Principale",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Syncthing is restarting.": "Riavvio di Syncthing in corso.",
|
||||
"Syncthing is upgrading.": "Aggiornamento di Syncthing in corso.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing sembra inattivo, oppure c'è un problema con la tua connessione a Internet. Nuovo tentativo…",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Sembra che Syncthing non sia in grado di elaborare il tuo comando. Se il problema persiste prova a ricaricare la pagina nel tuo navigatore oppure prova a riavviare Syncthing.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Le statistiche aggregate sono disponibili pubblicamente su {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configurazione è stata salvata ma non attivata. Devi riavviare Syncthing per attivare la nuova configurazione.",
|
||||
"The device ID cannot be blank.": "L'ID del dispositivo non può essere vuoto.",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"Connection Error": "Bağlantı hatası",
|
||||
"Copied from elsewhere": "Başka bir yerden kopyalanmış",
|
||||
"Copied from original": "Aslından kopyalanmış",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Telif Hakkı © 2015 Katkıda bulunanlar:",
|
||||
"Delete": "Sil",
|
||||
"Device ID": "Cihaz ID",
|
||||
"Device Identification": "Cihaz Kimliği",
|
||||
@@ -43,10 +43,10 @@
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "IP adresleri eklemek için virgül ile ayırarak \"ip:port\" yazın, ya da \"dynamic\" yazarak otomatik bulma işlemini seçin.",
|
||||
"Enter ignore patterns, one per line.": "Yoksayılacak kalıp dizilerini her satıra bir tane olacak şekilde girin.",
|
||||
"Error": "Hata",
|
||||
"External File Versioning": "External File Versioning",
|
||||
"External File Versioning": "Harici Dosya Sürümlendirme",
|
||||
"File Versioning": "Dosya Sürümlendirme",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Değişimleri yoklarken dosya izin bilgilerini ihmal et. FAT dosya sistemlerinde kullanın.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dosyalar Syncthing tarafından değiştirildiğinde ya da silindiğinde, tarih damgalı sürümleri .stversions dizinine taşınır.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dosyalar diğer cihazlarda yapılan değişikliklerden korunur, ancak bu cihazdaki değişiklikler kümedeki diğer cihazlara gönderilir.",
|
||||
"Folder ID": "Klasör ID",
|
||||
"Folder Master": "Ana Klasör",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Syncthing is restarting.": "Syncthing yeniden başlatılıyor.",
|
||||
"Syncthing is upgrading.": "Syncthing yükseltiliyor.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing görünüşe durdu veya internetin bağlantınızda problem var. Tekrar deniyor....",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing isteminizi işleme alırken bir sorunla karşılaştı. Lütfen sayfanızı yenileyin veya sorun devam ediyorsa Syncthing'i yeniden başlatın.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Toplanan halka açık istatistiklere ulaşabileceğiniz adres {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Ayarlar kaydedildi ancak aktifleştirilmedi. Aktifleştirmek için Syncthing yeniden başlatılmalı.",
|
||||
"The device ID cannot be blank.": "Cihaz ID boş olamaz.",
|
||||
@@ -149,7 +149,7 @@
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Bir sürümün tutulması için belirlenen azami süre (sürümleri daimi olarak tutabilmek için 0 değeri atayın)",
|
||||
"The number of old versions to keep, per file.": "Dosya başına saklanacak eski sürüm.",
|
||||
"The number of versions must be a number and cannot be blank.": "Sürümlerin sayısı sayı olmalı ve boş bırakılamaz.",
|
||||
"The path cannot be blank.": "The path cannot be blank.",
|
||||
"The path cannot be blank.": "Dizin yolu boş bırakılamaz.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Tarama zaman aralığı, saniye cinsinden negatif olmayan bir sayı olmalıdır.",
|
||||
"Unknown": "Bilinmiyor",
|
||||
"Unshared": "Paylaşılmayan",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
"Addresses": "Адреси",
|
||||
"All Data": "Усі дані",
|
||||
"Allow Anonymous Usage Reporting?": "Дозволити програмі збирати анонімну статистику використання?",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "An external command handles the versioning. It has to remove the file from the synced folder.",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Зовнішня команда керування версіями. Вона має видалити файл із директорії, що синхронізується.",
|
||||
"Anonymous Usage Reporting": "Анонімна статистика використання",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Усі пристрої, налаштовані на пристрої-рекомендувачі, будуть додані до поточного пристрою.",
|
||||
"Automatic upgrades": "Автоматичні оновлення",
|
||||
@@ -23,7 +23,7 @@
|
||||
"Connection Error": "Помилка з’єднання",
|
||||
"Copied from elsewhere": "Скопійовано з іншого місця",
|
||||
"Copied from original": "Скопійовано з оригіналу",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 наступних контриб’юторів:",
|
||||
"Delete": "Видалити",
|
||||
"Device ID": "ID пристрою",
|
||||
"Device Identification": "Ідентифікатор пристрою",
|
||||
@@ -45,8 +45,8 @@
|
||||
"Error": "Помилка",
|
||||
"External File Versioning": "Зовнішне керування версіями",
|
||||
"File Versioning": "Керування версіями",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "File permission bits are ignored when looking for changes. Use on FAT file systems.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Біти прав доступу до файлів будуть проігноровані під час пошуку змін. Використовуйте на файлових системах FAT.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Файли будуть поміщатися у директорію .stversions із відповідною позначкою часу, коли вони будуть замінятися або видалятися програмою.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файли захищено від змін зроблених на інших пристроях, але зміни зроблені на цьому пристрої будуть надіслані решті кластеру.",
|
||||
"Folder ID": "ID директорії",
|
||||
"Folder Master": "Центральна директорія",
|
||||
@@ -132,7 +132,7 @@
|
||||
"Syncthing is restarting.": "Syncthing перезавантажується.",
|
||||
"Syncthing is upgrading.": "Syncthing оновлюється.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Схоже на те, що Syncthing закритий, або виникла проблема із Інтернет-з’єднанням. Проводиться повторна спроба з’єднання…",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Схоже на те, що Syncthing стикнувся з проблемою оброблюючи ваш запит. Будь ласка перезавантажте сторінку в браузері або перезапустіть Syncthing.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Зібрана статистика публічно доступна за посиланням {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфігурацію збережено, але не активовано. Необхідно перезапустити Syncthing для того, щоби активувати нову конфігурацію.",
|
||||
"The device ID cannot be blank.": "ID пристрою не може бути порожнім.",
|
||||
|
||||
120
gui/index.html
@@ -38,6 +38,12 @@
|
||||
<span translate translate-value-version="{{upgradeInfo.latest}}">Upgrade To {%version%}</span>
|
||||
</button>
|
||||
</li>
|
||||
<li ng-if="upgradeInfo && upgradeInfo.majorNewer">
|
||||
<button type="button" class="btn navbar-btn btn-danger btn-sm" href="" ng-click="upgradeMajor()">
|
||||
<span class="glyphicon glyphicon-chevron-up"></span> 
|
||||
<span translate translate-value-version="{{upgradeInfo.latest}}">Upgrade To {%version%}</span>
|
||||
</button>
|
||||
</li>
|
||||
<li class="dropdown" language-select></li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><span class="glyphicon glyphicon-cog" aria-label="Edit"></span></a>
|
||||
@@ -193,9 +199,9 @@
|
||||
<th><span class="glyphicon glyphicon-folder-open"></span> <span translate>Folder Path</span></th>
|
||||
<td class="text-right">{{folder.path}}</td>
|
||||
</tr>
|
||||
<tr ng-if="model[folder.id].invalid">
|
||||
<tr ng-if="model[folder.id].invalid || model[folder.id].error">
|
||||
<th><span class="glyphicon glyphicon-warning-sign"></span> <span translate>Error</span></th>
|
||||
<td class="text-right">{{model[folder.id].invalid}}</td>
|
||||
<td class="text-right">{{model[folder.id].invalid || model[folder.id].error}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-globe"></span> <span translate>Global State</span></th>
|
||||
@@ -233,6 +239,17 @@
|
||||
<th><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan Interval</span></th>
|
||||
<td class="text-right">{{folder.rescanIntervalS}} s</td>
|
||||
</tr>
|
||||
<tr ng-if="folder.order != 'random'">
|
||||
<th><span class="glyphicon glyphicon-sort"></span> <span translate>File Pull Order</span></th>
|
||||
<td class="text-right" ng-switch="folder.order">
|
||||
<span ng-switch-when="random" translate>Random</span>
|
||||
<span ng-switch-when="alphabetic" translate>Alphabetic</span>
|
||||
<span ng-switch-when="smallestFirst" translate>Smallest First</span>
|
||||
<span ng-switch-when="largestFirst" translate>Largest First</span>
|
||||
<span ng-switch-when="oldestFirst" translate>Oldest First</span>
|
||||
<span ng-switch-when="newestFirst" translate>Newest First</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-if="folder.versioning.type">
|
||||
<th><span class="glyphicon glyphicon-tags"></span> <span translate>File Versioning</span></th>
|
||||
<td class="text-right" ng-switch="folder.versioning.type">
|
||||
@@ -293,11 +310,11 @@
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> <span translate>Download Rate</span></th>
|
||||
<td class="text-right">{{connections_total.inbps | binary}}B/s ({{connections_total.inBytesTotal | binary}}B)</td>
|
||||
<td class="text-right">{{connectionsTotal.inbps | binary}}B/s ({{connectionsTotal.inBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> <span translate>Upload Rate</span></th>
|
||||
<td class="text-right">{{connections_total.outbps | binary}}B/s ({{connections_total.outBytesTotal | binary}}B)</td>
|
||||
<td class="text-right">{{connectionsTotal.outbps | binary}}B/s ({{connectionsTotal.outBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-th"></span> <span translate>RAM Utilization</span></th>
|
||||
@@ -326,7 +343,7 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> <span translate>Version</span></th>
|
||||
<td class="text-right">{{version}}</td>
|
||||
<td class="text-right">{{versionString()}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -463,9 +480,38 @@
|
||||
|
||||
<modal id="idqr" large="yes" status="info" close="yes" icon="qrcode" title="{{'Device Identification' | translate}} — {{deviceName(thisDevice())}}">
|
||||
<div class="well well-sm text-monospace text-center">{{myID}}</div>
|
||||
<img ng-if="myID" class="center-block img-thumbnail" src="qr/?text={{myID}}"/>
|
||||
<img ng-if="myID" class="center-block img-thumbnail" ng-src="qr/?text={{myID}}"/>
|
||||
</modal>
|
||||
|
||||
<!-- Major upgrade modal -->
|
||||
|
||||
<div id="majorUpgrade" class="modal fade" tabindex="-1" data-backdrop="true" data-keyboard="true">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header alert alert-danger">
|
||||
<h4 class="modal-title">
|
||||
<span ng-if="icon" class="glyphicon glyphicon-chevron-up"></span>
|
||||
<span translate>Major Upgrade</span>
|
||||
</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>
|
||||
<span translate>This is a major version upgrade.</span>
|
||||
<span translate>A new major version may not be compatible with previous versions.</span>
|
||||
<span translate>Please consult the release notes before performing a major upgrade.</span>
|
||||
</p>
|
||||
<p>
|
||||
<a href="https://github.com/syncthing/syncthing/releases/latest" target="_blank" translate>Release Notes</a>
|
||||
</p>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-primary btn-sm" ng-click="upgrade()"><span class="glyphicon glyphicon-ok"></span> <span translate>Upgrade</span></button>
|
||||
<button type="button" class="btn btn-default btn-sm" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> <span translate>Close</span></button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Device editor modal -->
|
||||
|
||||
<div id="editDevice" class="modal fade" tabindex="-1">
|
||||
@@ -590,6 +636,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<!-- Left column -->
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
@@ -608,7 +655,20 @@
|
||||
<p translate class="help-block">File permission bits are ignored when looking for changes. Use on FAT file systems.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Right column-->
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<label translate>File Pull Order</label>
|
||||
<select class="form-control" ng-model="currentFolder.order">
|
||||
<option value="random" translate>Random</option>
|
||||
<option value="alphabetic" translate>Alphabetic</option>
|
||||
<option value="smallestFirst" translate>Smallest First</option>
|
||||
<option value="largestFirst" translate>Largest First</option>
|
||||
<option value="oldestFirst" translate>Oldest First</option>
|
||||
<option value="newestFirst" translate>Newest First</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label translate>File Versioning</label>
|
||||
<div class="radio">
|
||||
@@ -907,10 +967,22 @@
|
||||
<hr/>
|
||||
|
||||
<table class="table table-striped table-condensed">
|
||||
<tr ng-repeat="f in needed.progress" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.name}}">{{f.name | basename}}</td>
|
||||
<td ng-if="a == 'sync' && progress[neededFolder] && progress[neededFolder][f.name]">
|
||||
|
||||
<tr dir-paginate="f in needed | itemsPerPage: neededPageSize" current-page="neededCurrentPage" total-items="neededTotal">
|
||||
<!-- Icon -->
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[f.action]}}"></span> {{needActions[f.action]}}</td>
|
||||
|
||||
<!-- Name -->
|
||||
<td ng-if="f.type != 'queued'" title="{{f.name}}">{{f.name | basename}}</td>
|
||||
<td ng-if="f.type == 'queued'">
|
||||
<a href="" ng-click="bumpFile(neededFolder, f.name)" title="{{'Move to top of queue' | translate}}">
|
||||
<span class="glyphicon glyphicon-eject"></span>
|
||||
</a>
|
||||
<span title="{{f.name}}"> {{f.name | basename}}</span>
|
||||
</td>
|
||||
|
||||
<!-- Size/Progress -->
|
||||
<td ng-if="f.type == 'progress' && f.action == 'sync' && progress[neededFolder] && progress[neededFolder][f.name]">
|
||||
<div class="progress">
|
||||
<div class="progress-bar progress-bar-success" style="width: {{progress[neededFolder][f.name].reused}}%"></div>
|
||||
<div class="progress-bar" style="width: {{progress[neededFolder][f.name].copiedFromOrigin}}%"></div>
|
||||
@@ -922,30 +994,26 @@
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-right small-data" ng-if="a != 'sync' || !progress[neededFolder] || !progress[neededFolder][f.name]">
|
||||
<td class="text-right small-data" ng-if="f.type != 'progress' || f.action != 'sync' || !progress[neededFolder] || !progress[neededFolder][f.name]">
|
||||
<span ng-if="f.size > 0">{{f.size | binary}}B</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-repeat="f in needed.queued" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td><a href="" ng-if="$index != 0" ng-click="bumpFile(neededFolder, f.name)" title="{{'Move to top of queue' | translate}}"><span class="glyphicon glyphicon-eject"></span></a><span ng-if="$index != 0"> </span><span title="{{f.name}}">{{f.name | basename}}</span></td>
|
||||
<td class="text-right small-data">
|
||||
<span ng-if="f.size > 0">{{f.size | binary}}B</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-repeat="f in needed.rest" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.name}}">{{f.name | basename}}</td>
|
||||
<td class="text-right small-data"><span ng-if="f.size > 0">{{f.size | binary}}B</span></td>
|
||||
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<dir-pagination-controls on-page-change="neededPageChanged(newPageNumber)"></dir-pagination-controls>
|
||||
<ul class="pagination pull-right">
|
||||
<li ng-repeat="option in [10, 20, 30, 50, 100]" ng-class="{ active: neededPageSize == option }">
|
||||
<a href="#" ng-click="neededChangePageSize(option)">{{option}}</a>
|
||||
<li>
|
||||
</ul>
|
||||
<div class="clearfix">
|
||||
</modal>
|
||||
|
||||
<!-- About modal -->
|
||||
|
||||
<modal id="about" large="yes" close="yes" status="info" title="{{'About' | translate}}">
|
||||
<h1 class="text-center"><img alt="Syncthing" title="Syncthing" src="assets/img/logo-horizontal.svg" style="vertical-align: -16px" height="100" width="366"/><br/><small>{{version}}</small></h1>
|
||||
<h1 class="text-center"><img alt="Syncthing" title="Syncthing" src="assets/img/logo-horizontal.svg" style="vertical-align: -16px" height="100" width="366"/><br/><small>{{versionString()}}</small></h1>
|
||||
<hr/>
|
||||
|
||||
<p translate>Copyright © 2015 the following Contributors:</p>
|
||||
@@ -963,16 +1031,19 @@
|
||||
<li class="auto-generated">Brandon Philips</li>
|
||||
<li class="auto-generated">Brendan Long</li>
|
||||
<li class="auto-generated">Caleb Callaway</li>
|
||||
<li class="auto-generated">Carsten Hagemann</li>
|
||||
<li class="auto-generated">Cathryne Linenweaver</li>
|
||||
<li class="auto-generated">Chris Joel</li>
|
||||
<li class="auto-generated">Colin Kennedy</li>
|
||||
<li class="auto-generated">Daniel Martí</li>
|
||||
<li class="auto-generated">Dennis Wilson</li>
|
||||
<li class="auto-generated">Dominik Heidler</li>
|
||||
<li class="auto-generated">Elias Jarlebring</li>
|
||||
<li class="auto-generated">Emil Hessman</li>
|
||||
<li class="auto-generated">Federico Castagnini</li>
|
||||
<li class="auto-generated">Felix Ableitner</li>
|
||||
<li class="auto-generated">Felix Unterpaintner</li>
|
||||
<li class="auto-generated">Francois-Xavier Gsell</li>
|
||||
<li class="auto-generated">Gilli Sigurdsson</li>
|
||||
<li class="auto-generated">Jakob Borg</li>
|
||||
<li class="auto-generated">James Patterson</li>
|
||||
@@ -1025,6 +1096,7 @@
|
||||
<script src="vendor/angular/angular.min.js"></script>
|
||||
<script src="vendor/angular/angular-translate.min.js"></script>
|
||||
<script src="vendor/angular/angular-translate-loader.min.js"></script>
|
||||
<script src="vendor/angular/angular-dirPagination.js"></script>
|
||||
<script src="vendor/jquery/jquery-2.0.3.min.js"></script>
|
||||
<script src="vendor/bootstrap/js/bootstrap.min.js"></script>
|
||||
<!-- / vendor scripts -->
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
/*global $: false, angular: false, console: false, validLangs: false */
|
||||
|
||||
var syncthing = angular.module('syncthing', [
|
||||
'angularUtils.directives.dirPagination',
|
||||
'pascalprecht.translate',
|
||||
|
||||
'syncthing.core'
|
||||
@@ -49,22 +50,22 @@ syncthing.config(function ($httpProvider, $translateProvider, LocaleServiceProvi
|
||||
// @TODO: extract global level functions into seperate service(s)
|
||||
|
||||
function deviceCompare(a, b) {
|
||||
if (typeof a.Name !== 'undefined' && typeof b.Name !== 'undefined') {
|
||||
if (a.Name < b.Name)
|
||||
if (typeof a.name !== 'undefined' && typeof b.name !== 'undefined') {
|
||||
if (a.name < b.name)
|
||||
return -1;
|
||||
return a.Name > b.Name;
|
||||
return a.name > b.name;
|
||||
}
|
||||
if (a.DeviceID < b.DeviceID) {
|
||||
if (a.deviceID < b.deviceID) {
|
||||
return -1;
|
||||
}
|
||||
return a.DeviceID > b.DeviceID;
|
||||
return a.deviceID > b.deviceID;
|
||||
}
|
||||
|
||||
function folderCompare(a, b) {
|
||||
if (a.ID < b.ID) {
|
||||
if (a.id < b.id) {
|
||||
return -1;
|
||||
}
|
||||
return a.ID > b.ID;
|
||||
return a.id > b.id;
|
||||
}
|
||||
|
||||
function folderMap(l) {
|
||||
|
||||
@@ -24,7 +24,6 @@ angular.module('syncthing.core')
|
||||
$scope.config = {};
|
||||
$scope.configInSync = true;
|
||||
$scope.connections = {};
|
||||
$scope.connections_total = {};
|
||||
$scope.errors = [];
|
||||
$scope.model = {};
|
||||
$scope.myID = '';
|
||||
@@ -40,6 +39,11 @@ angular.module('syncthing.core')
|
||||
$scope.deviceStats = {};
|
||||
$scope.folderStats = {};
|
||||
$scope.progress = {};
|
||||
$scope.version = {};
|
||||
$scope.needed = [];
|
||||
$scope.neededTotal = 0;
|
||||
$scope.neededCurrentPage = 1;
|
||||
$scope.neededPageSize = 10;
|
||||
|
||||
$(window).bind('beforeunload', function () {
|
||||
navigatingAway = true;
|
||||
@@ -76,7 +80,7 @@ angular.module('syncthing.core')
|
||||
refreshFolderStats();
|
||||
|
||||
$http.get(urlbase + '/system/version').success(function (data) {
|
||||
$scope.version = data.version;
|
||||
$scope.version = data;
|
||||
}).error($scope.emitHTTPError);
|
||||
|
||||
$http.get(urlbase + '/svc/report').success(function (data) {
|
||||
@@ -368,7 +372,16 @@ angular.module('syncthing.core')
|
||||
id;
|
||||
|
||||
prevDate = now;
|
||||
$scope.connections_total = data['total'];
|
||||
|
||||
try {
|
||||
data.total.inbps = Math.max(0, (data.total.inBytesTotal - $scope.connectionsTotal.inBytesTotal) / td);
|
||||
data.total.outbps = Math.max(0, (data.total.outBytesTotal - $scope.connectionsTotal.outBytesTotal) / td);
|
||||
} catch (e) {
|
||||
data.total.inbps = 0;
|
||||
data.total.outbps = 0;
|
||||
}
|
||||
$scope.connectionsTotal = data.total;
|
||||
|
||||
data = data.connections;
|
||||
for (id in data) {
|
||||
if (!data.hasOwnProperty(id)) {
|
||||
@@ -406,14 +419,63 @@ angular.module('syncthing.core')
|
||||
}
|
||||
|
||||
function refreshNeed(folder) {
|
||||
$http.get(urlbase + "/db/need?folder=" + encodeURIComponent(folder)).success(function (data) {
|
||||
var url = urlbase + "/db/need?folder=" + encodeURIComponent(folder);
|
||||
url += "&page=" + $scope.neededCurrentPage;
|
||||
url += "&perpage=" + $scope.neededPageSize;
|
||||
$http.get(url).success(function (data) {
|
||||
if ($scope.neededFolder == folder) {
|
||||
console.log("refreshNeed", folder, data);
|
||||
$scope.needed = data;
|
||||
parseNeeded(data);
|
||||
}
|
||||
}).error($scope.emitHTTPError);
|
||||
}
|
||||
|
||||
function needAction(file) {
|
||||
var fDelete = 4096;
|
||||
var fDirectory = 16384;
|
||||
|
||||
if ((file.flags & (fDelete + fDirectory)) === fDelete + fDirectory) {
|
||||
return 'rmdir';
|
||||
} else if ((file.flags & fDelete) === fDelete) {
|
||||
return 'rm';
|
||||
} else if ((file.flags & fDirectory) === fDirectory) {
|
||||
return 'touch';
|
||||
} else {
|
||||
return 'sync';
|
||||
}
|
||||
};
|
||||
|
||||
function parseNeeded(data) {
|
||||
var merged = [];
|
||||
data.progress.forEach(function (item) {
|
||||
item.type = "progress";
|
||||
item.action = needAction(item);
|
||||
merged.push(item);
|
||||
});
|
||||
data.queued.forEach(function (item) {
|
||||
item.type = "queued";
|
||||
item.action = needAction(item);
|
||||
merged.push(item);
|
||||
});
|
||||
data.rest.forEach(function (item) {
|
||||
item.type = "rest";
|
||||
item.action = needAction(item);
|
||||
merged.push(item);
|
||||
});
|
||||
$scope.needed = merged;
|
||||
$scope.neededTotal = data.total;
|
||||
}
|
||||
|
||||
$scope.neededPageChanged = function (page) {
|
||||
$scope.neededCurrentPage = page;
|
||||
refreshNeed($scope.neededFolder);
|
||||
};
|
||||
|
||||
$scope.neededChangePageSize = function (perpage) {
|
||||
$scope.neededPageSize = perpage;
|
||||
refreshNeed($scope.neededFolder);
|
||||
}
|
||||
|
||||
var refreshDeviceStats = debounce(function () {
|
||||
$http.get(urlbase + "/stats/device").success(function (data) {
|
||||
$scope.deviceStats = data;
|
||||
@@ -452,10 +514,14 @@ angular.module('syncthing.core')
|
||||
return 'unshared';
|
||||
}
|
||||
|
||||
if ($scope.model[folderCfg.id].invalid !== '') {
|
||||
if ($scope.model[folderCfg.id].invalid) {
|
||||
return 'stopped';
|
||||
}
|
||||
|
||||
if ($scope.model[folderCfg.id].state == 'error') {
|
||||
return 'stopped'; // legacy, the state is called "stopped" in the GUI
|
||||
}
|
||||
|
||||
return '' + $scope.model[folderCfg.id].state;
|
||||
};
|
||||
|
||||
@@ -485,6 +551,9 @@ angular.module('syncthing.core')
|
||||
if (state == 'scanning') {
|
||||
return 'primary';
|
||||
}
|
||||
if (state == 'error') {
|
||||
return 'danger';
|
||||
}
|
||||
return 'info';
|
||||
};
|
||||
|
||||
@@ -682,6 +751,7 @@ angular.module('syncthing.core')
|
||||
|
||||
$scope.upgrade = function () {
|
||||
restarting = true;
|
||||
$('#majorUpgrade').modal('hide');
|
||||
$('#upgrading').modal();
|
||||
$http.post(urlbase + '/system/upgrade').success(function () {
|
||||
$('#restarting').modal();
|
||||
@@ -691,6 +761,10 @@ angular.module('syncthing.core')
|
||||
});
|
||||
};
|
||||
|
||||
$scope.upgradeMajor = function () {
|
||||
$('#majorUpgrade').modal();
|
||||
};
|
||||
|
||||
$scope.shutdown = function () {
|
||||
restarting = true;
|
||||
$http.post(urlbase + '/system/shutdown').success(function () {
|
||||
@@ -892,6 +966,9 @@ angular.module('syncthing.core')
|
||||
$scope.directoryList = [];
|
||||
|
||||
$scope.$watch('currentFolder.path', function (newvalue) {
|
||||
if (newvalue && newvalue.trim().charAt(0) == '~') {
|
||||
$scope.currentFolder.path = $scope.system.tilde + newvalue.trim().substring(1)
|
||||
}
|
||||
$http.get(urlbase + '/system/browse', {
|
||||
params: { current: newvalue }
|
||||
}).success(function (data) {
|
||||
@@ -962,7 +1039,7 @@ angular.module('syncthing.core')
|
||||
$scope.addFolderAndShare = function (folder, device) {
|
||||
$scope.dismissFolderRejection(folder, device);
|
||||
$scope.currentFolder = {
|
||||
ID: folder,
|
||||
id: folder,
|
||||
selectedDevices: {}
|
||||
};
|
||||
$scope.currentFolder.selectedDevices[device] = true;
|
||||
@@ -1157,24 +1234,11 @@ angular.module('syncthing.core')
|
||||
$('#needed').modal().on('hidden.bs.modal', function () {
|
||||
$scope.neededFolder = undefined;
|
||||
$scope.needed = undefined;
|
||||
$scope.neededTotal = 0;
|
||||
$scope.neededCurrentPage = 1;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.needAction = function (file) {
|
||||
var fDelete = 4096;
|
||||
var fDirectory = 16384;
|
||||
|
||||
if ((file.flags & (fDelete + fDirectory)) === fDelete + fDirectory) {
|
||||
return 'rmdir';
|
||||
} else if ((file.flags & fDelete) === fDelete) {
|
||||
return 'rm';
|
||||
} else if ((file.flags & fDirectory) === fDirectory) {
|
||||
return 'touch';
|
||||
} else {
|
||||
return 'sync';
|
||||
}
|
||||
};
|
||||
|
||||
$scope.override = function (folder) {
|
||||
$http.post(urlbase + "/db/override?folder=" + encodeURIComponent(folder));
|
||||
};
|
||||
@@ -1196,14 +1260,43 @@ angular.module('syncthing.core')
|
||||
};
|
||||
|
||||
$scope.bumpFile = function (folder, file) {
|
||||
$http.post(urlbase + "/db/prio?folder=" + encodeURIComponent(folder) + "&file=" + encodeURIComponent(file)).success(function (data) {
|
||||
var url = urlbase + "/db/prio?folder=" + encodeURIComponent(folder) + "&file=" + encodeURIComponent(file);
|
||||
// In order to get the right view of data in the response.
|
||||
url += "&page=" + $scope.neededCurrentPage;
|
||||
url += "&perpage=" + $scope.neededPageSize;
|
||||
$http.post(url).success(function (data) {
|
||||
if ($scope.neededFolder == folder) {
|
||||
console.log("bumpFile", folder, data);
|
||||
$scope.needed = data;
|
||||
parseNeeded(data);
|
||||
}
|
||||
}).error($scope.emitHTTPError);
|
||||
};
|
||||
|
||||
$scope.versionString = function () {
|
||||
if (!$scope.version.version) {
|
||||
return '';
|
||||
}
|
||||
|
||||
var os = {
|
||||
'darwin': 'Mac OS X',
|
||||
'dragonfly': 'DragonFly BSD',
|
||||
'freebsd': 'FreeBSD',
|
||||
'openbsd': 'OpenBSD',
|
||||
'netbsd': 'NetBSD',
|
||||
'linux': 'Linux',
|
||||
'windows': 'Windows',
|
||||
'solaris': 'Solaris',
|
||||
}[$scope.version.os] || $scope.version.os;
|
||||
|
||||
var arch ={
|
||||
'386': '32 bit',
|
||||
'amd64': '64 bit',
|
||||
'arm': 'ARM',
|
||||
}[$scope.version.arch] || $scope.version.arch;
|
||||
|
||||
return $scope.version.version + ', ' + os + ' (' + arch + ')';
|
||||
};
|
||||
|
||||
// pseudo main. called on all definitions assigned
|
||||
initController();
|
||||
});
|
||||
|
||||
@@ -2,8 +2,23 @@ angular.module('syncthing.core')
|
||||
.provider('LocaleService', function () {
|
||||
'use strict';
|
||||
|
||||
function detectLocalStorage() {
|
||||
// Feature detect localStorage; https://mathiasbynens.be/notes/localstorage-pattern
|
||||
try {
|
||||
var uid = new Date();
|
||||
var storage = window.localStorage;
|
||||
storage.setItem(uid, uid);
|
||||
var success = storage.getItem(uid) == uid;
|
||||
storage.removeItem(uid);
|
||||
return storage;
|
||||
} catch (exception) {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
var _defaultLocale,
|
||||
_availableLocales;
|
||||
_availableLocales,
|
||||
_localStorage = detectLocalStorage();
|
||||
|
||||
var _SYNLANG = "SYN_LANG"; // const key for localStorage
|
||||
|
||||
@@ -18,6 +33,7 @@ angular.module('syncthing.core')
|
||||
_availableLocales = locales;
|
||||
};
|
||||
|
||||
|
||||
this.$get = ['$http', '$translate', '$location', function ($http, $translate, $location) {
|
||||
|
||||
/**
|
||||
@@ -33,7 +49,10 @@ angular.module('syncthing.core')
|
||||
|
||||
function autoConfigLocale() {
|
||||
var params = $location.search();
|
||||
var savedLang = typeof(localStorage) != 'undefined' && localStorage[_SYNLANG];
|
||||
var savedLang;
|
||||
if (_localStorage) {
|
||||
savedLang = _localStorage[_SYNLANG];
|
||||
}
|
||||
|
||||
if(params.lang) {
|
||||
useLocale(params.lang, true);
|
||||
@@ -84,8 +103,8 @@ angular.module('syncthing.core')
|
||||
function useLocale(language, save2Storage) {
|
||||
if (language) {
|
||||
$translate.use(language).then(function () {
|
||||
if (save2Storage && typeof(localStorage) != 'undefined')
|
||||
localStorage[_SYNLANG] = language;
|
||||
if (save2Storage && _localStorage)
|
||||
_localStorage[_SYNLANG] = language;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
520
gui/vendor/angular/angular-dirPagination.js
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
/**
|
||||
* dirPagination - AngularJS module for paginating (almost) anything.
|
||||
*
|
||||
*
|
||||
* Credits
|
||||
* =======
|
||||
*
|
||||
* Daniel Tabuenca: https://groups.google.com/d/msg/angular/an9QpzqIYiM/r8v-3W1X5vcJ
|
||||
* for the idea on how to dynamically invoke the ng-repeat directive.
|
||||
*
|
||||
* I borrowed a couple of lines and a few attribute names from the AngularUI Bootstrap project:
|
||||
* https://github.com/angular-ui/bootstrap/blob/master/src/pagination/pagination.js
|
||||
*
|
||||
* Copyright 2014 Michael Bromley <michael@michaelbromley.co.uk>
|
||||
*/
|
||||
|
||||
(function() {
|
||||
|
||||
/**
|
||||
* Config
|
||||
*/
|
||||
var moduleName = 'angularUtils.directives.dirPagination';
|
||||
var DEFAULT_ID = '__default';
|
||||
|
||||
/**
|
||||
* Module
|
||||
*/
|
||||
var module;
|
||||
try {
|
||||
module = angular.module(moduleName);
|
||||
} catch(err) {
|
||||
// named module does not exist, so create one
|
||||
module = angular.module(moduleName, []);
|
||||
}
|
||||
|
||||
module
|
||||
.directive('dirPaginate', ['$compile', '$parse', 'paginationService', dirPaginateDirective])
|
||||
.directive('dirPaginateNoCompile', noCompileDirective)
|
||||
.directive('dirPaginationControls', ['paginationService', 'paginationTemplate', dirPaginationControlsDirective])
|
||||
.filter('itemsPerPage', ['paginationService', itemsPerPageFilter])
|
||||
.service('paginationService', paginationService)
|
||||
.provider('paginationTemplate', paginationTemplateProvider)
|
||||
.run(['$templateCache',dirPaginationControlsTemplateInstaller]);
|
||||
|
||||
function dirPaginateDirective($compile, $parse, paginationService) {
|
||||
|
||||
return {
|
||||
terminal: true,
|
||||
multiElement: true,
|
||||
compile: dirPaginationCompileFn
|
||||
};
|
||||
|
||||
function dirPaginationCompileFn(tElement, tAttrs){
|
||||
|
||||
var expression = tAttrs.dirPaginate;
|
||||
// regex taken directly from https://github.com/angular/angular.js/blob/master/src/ng/directive/ngRepeat.js#L211
|
||||
var match = expression.match(/^\s*([\s\S]+?)\s+in\s+([\s\S]+?)(?:\s+track\s+by\s+([\s\S]+?))?\s*$/);
|
||||
|
||||
var filterPattern = /\|\s*itemsPerPage\s*:[^|]*/;
|
||||
if (match[2].match(filterPattern) === null) {
|
||||
throw 'pagination directive: the \'itemsPerPage\' filter must be set.';
|
||||
}
|
||||
var itemsPerPageFilterRemoved = match[2].replace(filterPattern, '');
|
||||
var collectionGetter = $parse(itemsPerPageFilterRemoved);
|
||||
|
||||
addNoCompileAttributes(tElement);
|
||||
|
||||
// If any value is specified for paginationId, we register the un-evaluated expression at this stage for the benefit of any
|
||||
// dir-pagination-controls directives that may be looking for this ID.
|
||||
var rawId = tAttrs.paginationId || DEFAULT_ID;
|
||||
paginationService.registerInstance(rawId);
|
||||
|
||||
return function dirPaginationLinkFn(scope, element, attrs){
|
||||
|
||||
// Now that we have access to the `scope` we can interpolate any expression given in the paginationId attribute and
|
||||
// potentially register a new ID if it evaluates to a different value than the rawId.
|
||||
var paginationId = $parse(attrs.paginationId)(scope) || attrs.paginationId || DEFAULT_ID;
|
||||
paginationService.registerInstance(paginationId);
|
||||
|
||||
var repeatExpression = getRepeatExpression(expression, paginationId);
|
||||
addNgRepeatToElement(element, attrs, repeatExpression);
|
||||
|
||||
removeTemporaryAttributes(element);
|
||||
var compiled = $compile(element);
|
||||
|
||||
var currentPageGetter = makeCurrentPageGetterFn(scope, attrs, paginationId);
|
||||
paginationService.setCurrentPageParser(paginationId, currentPageGetter, scope);
|
||||
|
||||
if (typeof attrs.totalItems !== 'undefined') {
|
||||
paginationService.setAsyncModeTrue(paginationId);
|
||||
scope.$watch(function() {
|
||||
return $parse(attrs.totalItems)(scope);
|
||||
}, function (result) {
|
||||
if (0 <= result) {
|
||||
paginationService.setCollectionLength(paginationId, result);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
scope.$watchCollection(function() {
|
||||
return collectionGetter(scope);
|
||||
}, function(collection) {
|
||||
if (collection) {
|
||||
paginationService.setCollectionLength(paginationId, collection.length);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Delegate to the link function returned by the new compilation of the ng-repeat
|
||||
compiled(scope);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* If a pagination id has been specified, we need to check that it is present as the second argument passed to
|
||||
* the itemsPerPage filter. If it is not there, we add it and return the modified expression.
|
||||
*
|
||||
* @param expression
|
||||
* @param paginationId
|
||||
* @returns {*}
|
||||
*/
|
||||
function getRepeatExpression(expression, paginationId) {
|
||||
var repeatExpression,
|
||||
idDefinedInFilter = !!expression.match(/(\|\s*itemsPerPage\s*:[^|]*:[^|]*)/);
|
||||
|
||||
if (paginationId !== DEFAULT_ID && !idDefinedInFilter) {
|
||||
repeatExpression = expression.replace(/(\|\s*itemsPerPage\s*:[^|]*)/, "$1 : '" + paginationId + "'");
|
||||
} else {
|
||||
repeatExpression = expression;
|
||||
}
|
||||
|
||||
return repeatExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the ng-repeat directive to the element. In the case of multi-element (-start, -end) it adds the
|
||||
* appropriate multi-element ng-repeat to the first and last element in the range.
|
||||
* @param element
|
||||
* @param attrs
|
||||
* @param repeatExpression
|
||||
*/
|
||||
function addNgRepeatToElement(element, attrs, repeatExpression) {
|
||||
if (element[0].hasAttribute('dir-paginate-start') || element[0].hasAttribute('data-dir-paginate-start')) {
|
||||
// using multiElement mode (dir-paginate-start, dir-paginate-end)
|
||||
attrs.$set('ngRepeatStart', repeatExpression);
|
||||
element.eq(element.length - 1).attr('ng-repeat-end', true);
|
||||
} else {
|
||||
attrs.$set('ngRepeat', repeatExpression);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the dir-paginate-no-compile directive to each element in the tElement range.
|
||||
* @param tElement
|
||||
*/
|
||||
function addNoCompileAttributes(tElement) {
|
||||
angular.forEach(tElement, function(el) {
|
||||
if (el.nodeType === Node.ELEMENT_NODE) {
|
||||
angular.element(el).attr('dir-paginate-no-compile', true);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the variations on dir-paginate (data-, -start, -end) and the dir-paginate-no-compile directives.
|
||||
* @param element
|
||||
*/
|
||||
function removeTemporaryAttributes(element) {
|
||||
angular.forEach(element, function(el) {
|
||||
if (el.nodeType === Node.ELEMENT_NODE) {
|
||||
angular.element(el).removeAttr('dir-paginate-no-compile');
|
||||
}
|
||||
});
|
||||
element.eq(0).removeAttr('dir-paginate-start').removeAttr('dir-paginate').removeAttr('data-dir-paginate-start').removeAttr('data-dir-paginate');
|
||||
element.eq(element.length - 1).removeAttr('dir-paginate-end').removeAttr('data-dir-paginate-end');
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a getter function for the current-page attribute, using the expression provided or a default value if
|
||||
* no current-page expression was specified.
|
||||
*
|
||||
* @param scope
|
||||
* @param attrs
|
||||
* @param paginationId
|
||||
* @returns {*}
|
||||
*/
|
||||
function makeCurrentPageGetterFn(scope, attrs, paginationId) {
|
||||
var currentPageGetter;
|
||||
if (attrs.currentPage) {
|
||||
currentPageGetter = $parse(attrs.currentPage);
|
||||
} else {
|
||||
// if the current-page attribute was not set, we'll make our own
|
||||
var defaultCurrentPage = paginationId + '__currentPage';
|
||||
scope[defaultCurrentPage] = 1;
|
||||
currentPageGetter = $parse(defaultCurrentPage);
|
||||
}
|
||||
return currentPageGetter;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a helper directive that allows correct compilation when in multi-element mode (ie dir-paginate-start, dir-paginate-end).
|
||||
* It is dynamically added to all elements in the dir-paginate compile function, and it prevents further compilation of
|
||||
* any inner directives. It is then removed in the link function, and all inner directives are then manually compiled.
|
||||
*/
|
||||
function noCompileDirective() {
|
||||
return {
|
||||
priority: 5000,
|
||||
terminal: true
|
||||
};
|
||||
}
|
||||
|
||||
function dirPaginationControlsTemplateInstaller($templateCache) {
|
||||
$templateCache.put('angularUtils.directives.dirPagination.template', '<ul class="pagination" ng-if="1 < pages.length"><li ng-if="boundaryLinks" ng-class="{ disabled : pagination.current == 1 }"><a href="" ng-click="setCurrent(1)">«</a></li><li ng-if="directionLinks" ng-class="{ disabled : pagination.current == 1 }"><a href="" ng-click="setCurrent(pagination.current - 1)">‹</a></li><li ng-repeat="pageNumber in pages track by $index" ng-class="{ active : pagination.current == pageNumber, disabled : pageNumber == \'...\' }"><a href="" ng-click="setCurrent(pageNumber)">{{ pageNumber }}</a></li><li ng-if="directionLinks" ng-class="{ disabled : pagination.current == pagination.last }"><a href="" ng-click="setCurrent(pagination.current + 1)">›</a></li><li ng-if="boundaryLinks" ng-class="{ disabled : pagination.current == pagination.last }"><a href="" ng-click="setCurrent(pagination.last)">»</a></li></ul>');
|
||||
}
|
||||
|
||||
function dirPaginationControlsDirective(paginationService, paginationTemplate) {
|
||||
|
||||
var numberRegex = /^\d+$/;
|
||||
|
||||
return {
|
||||
restrict: 'AE',
|
||||
templateUrl: function(elem, attrs) {
|
||||
return attrs.templateUrl || paginationTemplate.getPath();
|
||||
},
|
||||
scope: {
|
||||
maxSize: '=?',
|
||||
onPageChange: '&?',
|
||||
paginationId: '=?'
|
||||
},
|
||||
link: dirPaginationControlsLinkFn
|
||||
};
|
||||
|
||||
function dirPaginationControlsLinkFn(scope, element, attrs) {
|
||||
|
||||
// rawId is the un-interpolated value of the pagination-id attribute. This is only important when the corresponding dir-paginate directive has
|
||||
// not yet been linked (e.g. if it is inside an ng-if block), and in that case it prevents this controls directive from assuming that there is
|
||||
// no corresponding dir-paginate directive and wrongly throwing an exception.
|
||||
var rawId = attrs.paginationId || DEFAULT_ID;
|
||||
var paginationId = scope.paginationId || attrs.paginationId || DEFAULT_ID;
|
||||
|
||||
if (!paginationService.isRegistered(paginationId) && !paginationService.isRegistered(rawId)) {
|
||||
var idMessage = (paginationId !== DEFAULT_ID) ? ' (id: ' + paginationId + ') ' : ' ';
|
||||
throw 'pagination directive: the pagination controls' + idMessage + 'cannot be used without the corresponding pagination directive.';
|
||||
}
|
||||
|
||||
if (!scope.maxSize) { scope.maxSize = 9; }
|
||||
scope.directionLinks = angular.isDefined(attrs.directionLinks) ? scope.$parent.$eval(attrs.directionLinks) : true;
|
||||
scope.boundaryLinks = angular.isDefined(attrs.boundaryLinks) ? scope.$parent.$eval(attrs.boundaryLinks) : false;
|
||||
|
||||
var paginationRange = Math.max(scope.maxSize, 5);
|
||||
scope.pages = [];
|
||||
scope.pagination = {
|
||||
last: 1,
|
||||
current: 1
|
||||
};
|
||||
scope.range = {
|
||||
lower: 1,
|
||||
upper: 1,
|
||||
total: 1
|
||||
};
|
||||
|
||||
scope.$watch(function() {
|
||||
return (paginationService.getCollectionLength(paginationId) + 1) * paginationService.getItemsPerPage(paginationId);
|
||||
}, function(length) {
|
||||
if (0 < length) {
|
||||
generatePagination();
|
||||
}
|
||||
});
|
||||
|
||||
scope.$watch(function() {
|
||||
return (paginationService.getItemsPerPage(paginationId));
|
||||
}, function(current, previous) {
|
||||
if (current != previous && typeof previous !== 'undefined') {
|
||||
goToPage(scope.pagination.current);
|
||||
}
|
||||
});
|
||||
|
||||
scope.$watch(function() {
|
||||
return paginationService.getCurrentPage(paginationId);
|
||||
}, function(currentPage, previousPage) {
|
||||
if (currentPage != previousPage) {
|
||||
goToPage(currentPage);
|
||||
}
|
||||
});
|
||||
|
||||
scope.setCurrent = function(num) {
|
||||
if (isValidPageNumber(num)) {
|
||||
num = parseInt(num, 10);
|
||||
paginationService.setCurrentPage(paginationId, num);
|
||||
}
|
||||
};
|
||||
|
||||
function goToPage(num) {
|
||||
if (isValidPageNumber(num)) {
|
||||
scope.pages = generatePagesArray(num, paginationService.getCollectionLength(paginationId), paginationService.getItemsPerPage(paginationId), paginationRange);
|
||||
scope.pagination.current = num;
|
||||
updateRangeValues();
|
||||
|
||||
// if a callback has been set, then call it with the page number as an argument
|
||||
if (scope.onPageChange) {
|
||||
scope.onPageChange({ newPageNumber : num });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function generatePagination() {
|
||||
var page = parseInt(paginationService.getCurrentPage(paginationId)) || 1;
|
||||
|
||||
scope.pages = generatePagesArray(page, paginationService.getCollectionLength(paginationId), paginationService.getItemsPerPage(paginationId), paginationRange);
|
||||
scope.pagination.current = page;
|
||||
scope.pagination.last = scope.pages[scope.pages.length - 1];
|
||||
if (scope.pagination.last < scope.pagination.current) {
|
||||
scope.setCurrent(scope.pagination.last);
|
||||
} else {
|
||||
updateRangeValues();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function updates the values (lower, upper, total) of the `scope.range` object, which can be used in the pagination
|
||||
* template to display the current page range, e.g. "showing 21 - 40 of 144 results";
|
||||
*/
|
||||
function updateRangeValues() {
|
||||
var currentPage = paginationService.getCurrentPage(paginationId),
|
||||
itemsPerPage = paginationService.getItemsPerPage(paginationId),
|
||||
totalItems = paginationService.getCollectionLength(paginationId);
|
||||
|
||||
scope.range.lower = (currentPage - 1) * itemsPerPage + 1;
|
||||
scope.range.upper = Math.min(currentPage * itemsPerPage, totalItems);
|
||||
scope.range.total = totalItems;
|
||||
}
|
||||
|
||||
function isValidPageNumber(num) {
|
||||
return (numberRegex.test(num) && (0 < num && num <= scope.pagination.last));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate an array of page numbers (or the '...' string) which is used in an ng-repeat to generate the
|
||||
* links used in pagination
|
||||
*
|
||||
* @param currentPage
|
||||
* @param rowsPerPage
|
||||
* @param paginationRange
|
||||
* @param collectionLength
|
||||
* @returns {Array}
|
||||
*/
|
||||
function generatePagesArray(currentPage, collectionLength, rowsPerPage, paginationRange) {
|
||||
var pages = [];
|
||||
var totalPages = Math.ceil(collectionLength / rowsPerPage);
|
||||
var halfWay = Math.ceil(paginationRange / 2);
|
||||
var position;
|
||||
|
||||
if (currentPage <= halfWay) {
|
||||
position = 'start';
|
||||
} else if (totalPages - halfWay < currentPage) {
|
||||
position = 'end';
|
||||
} else {
|
||||
position = 'middle';
|
||||
}
|
||||
|
||||
var ellipsesNeeded = paginationRange < totalPages;
|
||||
var i = 1;
|
||||
while (i <= totalPages && i <= paginationRange) {
|
||||
var pageNumber = calculatePageNumber(i, currentPage, paginationRange, totalPages);
|
||||
|
||||
var openingEllipsesNeeded = (i === 2 && (position === 'middle' || position === 'end'));
|
||||
var closingEllipsesNeeded = (i === paginationRange - 1 && (position === 'middle' || position === 'start'));
|
||||
if (ellipsesNeeded && (openingEllipsesNeeded || closingEllipsesNeeded)) {
|
||||
pages.push('...');
|
||||
} else {
|
||||
pages.push(pageNumber);
|
||||
}
|
||||
i ++;
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given the position in the sequence of pagination links [i], figure out what page number corresponds to that position.
|
||||
*
|
||||
* @param i
|
||||
* @param currentPage
|
||||
* @param paginationRange
|
||||
* @param totalPages
|
||||
* @returns {*}
|
||||
*/
|
||||
function calculatePageNumber(i, currentPage, paginationRange, totalPages) {
|
||||
var halfWay = Math.ceil(paginationRange/2);
|
||||
if (i === paginationRange) {
|
||||
return totalPages;
|
||||
} else if (i === 1) {
|
||||
return i;
|
||||
} else if (paginationRange < totalPages) {
|
||||
if (totalPages - halfWay < currentPage) {
|
||||
return totalPages - paginationRange + i;
|
||||
} else if (halfWay < currentPage) {
|
||||
return currentPage - halfWay + i;
|
||||
} else {
|
||||
return i;
|
||||
}
|
||||
} else {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This filter slices the collection into pages based on the current page number and number of items per page.
|
||||
* @param paginationService
|
||||
* @returns {Function}
|
||||
*/
|
||||
function itemsPerPageFilter(paginationService) {
|
||||
|
||||
return function(collection, itemsPerPage, paginationId) {
|
||||
if (typeof (paginationId) === 'undefined') {
|
||||
paginationId = DEFAULT_ID;
|
||||
}
|
||||
if (!paginationService.isRegistered(paginationId)) {
|
||||
throw 'pagination directive: the itemsPerPage id argument (id: ' + paginationId + ') does not match a registered pagination-id.';
|
||||
}
|
||||
var end;
|
||||
var start;
|
||||
if (collection instanceof Array) {
|
||||
itemsPerPage = parseInt(itemsPerPage) || 9999999999;
|
||||
if (paginationService.isAsyncMode(paginationId)) {
|
||||
start = 0;
|
||||
} else {
|
||||
start = (paginationService.getCurrentPage(paginationId) - 1) * itemsPerPage;
|
||||
}
|
||||
end = start + itemsPerPage;
|
||||
paginationService.setItemsPerPage(paginationId, itemsPerPage);
|
||||
|
||||
return collection.slice(start, end);
|
||||
} else {
|
||||
return collection;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* This service allows the various parts of the module to communicate and stay in sync.
|
||||
*/
|
||||
function paginationService() {
|
||||
|
||||
var instances = {};
|
||||
var lastRegisteredInstance;
|
||||
|
||||
this.registerInstance = function(instanceId) {
|
||||
if (typeof instances[instanceId] === 'undefined') {
|
||||
instances[instanceId] = {
|
||||
asyncMode: false
|
||||
};
|
||||
lastRegisteredInstance = instanceId;
|
||||
}
|
||||
};
|
||||
|
||||
this.isRegistered = function(instanceId) {
|
||||
return (typeof instances[instanceId] !== 'undefined');
|
||||
};
|
||||
|
||||
this.getLastInstanceId = function() {
|
||||
return lastRegisteredInstance;
|
||||
};
|
||||
|
||||
this.setCurrentPageParser = function(instanceId, val, scope) {
|
||||
instances[instanceId].currentPageParser = val;
|
||||
instances[instanceId].context = scope;
|
||||
};
|
||||
this.setCurrentPage = function(instanceId, val) {
|
||||
instances[instanceId].currentPageParser.assign(instances[instanceId].context, val);
|
||||
};
|
||||
this.getCurrentPage = function(instanceId) {
|
||||
var parser = instances[instanceId].currentPageParser;
|
||||
return parser ? parser(instances[instanceId].context) : 1;
|
||||
};
|
||||
|
||||
this.setItemsPerPage = function(instanceId, val) {
|
||||
instances[instanceId].itemsPerPage = val;
|
||||
};
|
||||
this.getItemsPerPage = function(instanceId) {
|
||||
return instances[instanceId].itemsPerPage;
|
||||
};
|
||||
|
||||
this.setCollectionLength = function(instanceId, val) {
|
||||
instances[instanceId].collectionLength = val;
|
||||
};
|
||||
this.getCollectionLength = function(instanceId) {
|
||||
return instances[instanceId].collectionLength;
|
||||
};
|
||||
|
||||
this.setAsyncModeTrue = function(instanceId) {
|
||||
instances[instanceId].asyncMode = true;
|
||||
};
|
||||
|
||||
this.isAsyncMode = function(instanceId) {
|
||||
return instances[instanceId].asyncMode;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* This provider allows global configuration of the template path used by the dir-pagination-controls directive.
|
||||
*/
|
||||
function paginationTemplateProvider() {
|
||||
|
||||
var templatePath = 'angularUtils.directives.dirPagination.template';
|
||||
|
||||
this.setPath = function(path) {
|
||||
templatePath = path;
|
||||
};
|
||||
|
||||
this.$get = function() {
|
||||
return {
|
||||
getPath: function() {
|
||||
return templatePath;
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -82,6 +82,7 @@ type FolderConfiguration struct {
|
||||
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
|
||||
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
|
||||
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
|
||||
Order PullOrder `xml:"order" json:"order"`
|
||||
|
||||
Invalid string `xml:"-" json:"invalid"` // Set at runtime when there is an error, not saved
|
||||
|
||||
@@ -227,8 +228,9 @@ type OptionsConfiguration struct {
|
||||
ReconnectIntervalS int `xml:"reconnectionIntervalS" json:"reconnectionIntervalS" default:"60"`
|
||||
StartBrowser bool `xml:"startBrowser" json:"startBrowser" default:"true"`
|
||||
UPnPEnabled bool `xml:"upnpEnabled" json:"upnpEnabled" default:"true"`
|
||||
UPnPLease int `xml:"upnpLeaseMinutes" json:"upnpLeaseMinutes" default:"0"`
|
||||
UPnPRenewal int `xml:"upnpRenewalMinutes" json:"upnpRenewalMinutes" default:"30"`
|
||||
UPnPLeaseM int `xml:"upnpLeaseMinutes" json:"upnpLeaseMinutes" default:"0"`
|
||||
UPnPRenewalM int `xml:"upnpRenewalMinutes" json:"upnpRenewalMinutes" default:"30"`
|
||||
UPnPTimeoutS int `xml:"upnpTimeoutSeconds" json:"upnpTimeoutSeconds" default:"3"`
|
||||
URAccepted int `xml:"urAccepted" json:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
|
||||
URUniqueID string `xml:"urUniqueID" json:"urUniqueId"` // Unique ID for reporting purposes, regenerated when UR is turned on.
|
||||
RestartOnWakeup bool `xml:"restartOnWakeup" json:"restartOnWakeup" default:"true"`
|
||||
@@ -677,3 +679,57 @@ func randomString(l int) string {
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
type PullOrder int
|
||||
|
||||
const (
|
||||
OrderRandom PullOrder = iota // default is random
|
||||
OrderAlphabetic
|
||||
OrderSmallestFirst
|
||||
OrderLargestFirst
|
||||
OrderOldestFirst
|
||||
OrderNewestFirst
|
||||
)
|
||||
|
||||
func (o PullOrder) String() string {
|
||||
switch o {
|
||||
case OrderRandom:
|
||||
return "random"
|
||||
case OrderAlphabetic:
|
||||
return "alphabetic"
|
||||
case OrderSmallestFirst:
|
||||
return "smallestFirst"
|
||||
case OrderLargestFirst:
|
||||
return "largestFirst"
|
||||
case OrderOldestFirst:
|
||||
return "oldestFirst"
|
||||
case OrderNewestFirst:
|
||||
return "newestFirst"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (o PullOrder) MarshalText() ([]byte, error) {
|
||||
return []byte(o.String()), nil
|
||||
}
|
||||
|
||||
func (o *PullOrder) UnmarshalText(bs []byte) error {
|
||||
switch string(bs) {
|
||||
case "random":
|
||||
*o = OrderRandom
|
||||
case "alphabetic":
|
||||
*o = OrderAlphabetic
|
||||
case "smallestFirst":
|
||||
*o = OrderSmallestFirst
|
||||
case "largestFirst":
|
||||
*o = OrderLargestFirst
|
||||
case "oldestFirst":
|
||||
*o = OrderOldestFirst
|
||||
case "newestFirst":
|
||||
*o = OrderNewestFirst
|
||||
default:
|
||||
*o = OrderRandom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,8 +42,9 @@ func TestDefaultValues(t *testing.T) {
|
||||
ReconnectIntervalS: 60,
|
||||
StartBrowser: true,
|
||||
UPnPEnabled: true,
|
||||
UPnPLease: 0,
|
||||
UPnPRenewal: 30,
|
||||
UPnPLeaseM: 0,
|
||||
UPnPRenewalM: 30,
|
||||
UPnPTimeoutS: 3,
|
||||
RestartOnWakeup: true,
|
||||
AutoUpgradeIntervalH: 12,
|
||||
KeepTemporariesH: 24,
|
||||
@@ -147,8 +148,9 @@ func TestOverriddenValues(t *testing.T) {
|
||||
ReconnectIntervalS: 6000,
|
||||
StartBrowser: false,
|
||||
UPnPEnabled: false,
|
||||
UPnPLease: 60,
|
||||
UPnPRenewal: 15,
|
||||
UPnPLeaseM: 60,
|
||||
UPnPRenewalM: 15,
|
||||
UPnPTimeoutS: 15,
|
||||
RestartOnWakeup: false,
|
||||
AutoUpgradeIntervalH: 24,
|
||||
KeepTemporariesH: 48,
|
||||
@@ -339,8 +341,8 @@ func TestWindowsPaths(t *testing.T) {
|
||||
folder.RawPath = `relative\path`
|
||||
expected = folder.RawPath
|
||||
actual = folder.Path()
|
||||
if actual != expected {
|
||||
t.Errorf("%q != %q", actual, expected)
|
||||
if actual == expected || !strings.HasPrefix(actual, "\\\\?\\") {
|
||||
t.Errorf("%q == %q, expected absolutification", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -526,3 +528,51 @@ func TestCopy(t *testing.T) {
|
||||
t.Error("Copy should be unchanged")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullOrder(t *testing.T) {
|
||||
wrapper, err := Load("testdata/pullorder.xml", device1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
folders := wrapper.Folders()
|
||||
|
||||
expected := []struct {
|
||||
name string
|
||||
order PullOrder
|
||||
}{
|
||||
{"f1", OrderRandom}, // empty value, default
|
||||
{"f2", OrderRandom}, // explicit
|
||||
{"f3", OrderAlphabetic}, // explicit
|
||||
{"f4", OrderRandom}, // unknown value, default
|
||||
{"f5", OrderSmallestFirst}, // explicit
|
||||
{"f6", OrderLargestFirst}, // explicit
|
||||
{"f7", OrderOldestFirst}, // explicit
|
||||
{"f8", OrderNewestFirst}, // explicit
|
||||
}
|
||||
|
||||
// Verify values are deserialized correctly
|
||||
|
||||
for _, tc := range expected {
|
||||
if actual := folders[tc.name].Order; actual != tc.order {
|
||||
t.Errorf("Incorrect pull order for %q: %v != %v", tc.name, actual, tc.order)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize and deserialize again to verify it survives the transformation
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
cfg := wrapper.Raw()
|
||||
cfg.WriteXML(buf)
|
||||
|
||||
t.Logf("%s", buf.Bytes())
|
||||
|
||||
cfg, err = ReadXML(buf, device1)
|
||||
wrapper = Wrap("testdata/pullorder.xml", cfg)
|
||||
folders = wrapper.Folders()
|
||||
|
||||
for _, tc := range expected {
|
||||
if actual := folders[tc.name].Order; actual != tc.order {
|
||||
t.Errorf("Incorrect pull order for %q: %v != %v", tc.name, actual, tc.order)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
internal/config/testdata/overridenvalues.xml
vendored
@@ -15,6 +15,7 @@
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<upnpLeaseMinutes>60</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>15</upnpRenewalMinutes>
|
||||
<upnpTimeoutSeconds>15</upnpTimeoutSeconds>
|
||||
<restartOnWakeup>false</restartOnWakeup>
|
||||
<autoUpgradeIntervalH>24</autoUpgradeIntervalH>
|
||||
<keepTemporariesH>48</keepTemporariesH>
|
||||
|
||||
25
internal/config/testdata/pullorder.xml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
<configuration version="10">
|
||||
<folder id="f1" directory="testdata/">
|
||||
</folder>
|
||||
<folder id="f2" directory="testdata/">
|
||||
<order>random</order>
|
||||
</folder>
|
||||
<folder id="f3" directory="testdata/">
|
||||
<order>alphabetic</order>
|
||||
</folder>
|
||||
<folder id="f4" directory="testdata/">
|
||||
<order>whatever</order>
|
||||
</folder>
|
||||
<folder id="f5" directory="testdata/">
|
||||
<order>smallestFirst</order>
|
||||
</folder>
|
||||
<folder id="f6" directory="testdata/">
|
||||
<order>largestFirst</order>
|
||||
</folder>
|
||||
<folder id="f7" directory="testdata/">
|
||||
<order>oldestFirst</order>
|
||||
</folder>
|
||||
<folder id="f8" directory="testdata/">
|
||||
<order>newestFirst</order>
|
||||
</folder>
|
||||
</configuration>
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// An interface to handle configuration changes, and a wrapper type á la
|
||||
@@ -49,7 +49,12 @@ type Wrapper struct {
|
||||
// Wrap wraps an existing Configuration structure and ties it to a file on
|
||||
// disk.
|
||||
func Wrap(path string, cfg Configuration) *Wrapper {
|
||||
w := &Wrapper{cfg: cfg, path: path}
|
||||
w := &Wrapper{
|
||||
cfg: cfg,
|
||||
path: path,
|
||||
mut: sync.NewMutex(),
|
||||
sMut: sync.NewMutex(),
|
||||
}
|
||||
w.replaces = make(chan Configuration)
|
||||
go w.Serve()
|
||||
return w
|
||||
@@ -215,29 +220,6 @@ func (w *Wrapper) SetGUI(gui GUIConfiguration) {
|
||||
w.replaces <- w.cfg.Copy()
|
||||
}
|
||||
|
||||
// Sets the folder error state. Emits ConfigSaved to cause a GUI refresh.
|
||||
func (w *Wrapper) SetFolderError(id string, err error) {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
|
||||
w.folderMap = nil
|
||||
|
||||
for i := range w.cfg.Folders {
|
||||
if w.cfg.Folders[i].ID == id {
|
||||
errstr := ""
|
||||
if err != nil {
|
||||
errstr = err.Error()
|
||||
}
|
||||
if errstr != w.cfg.Folders[i].Invalid {
|
||||
w.cfg.Folders[i].Invalid = errstr
|
||||
events.Default.Log(events.ConfigSaved, w.cfg)
|
||||
w.replaces <- w.cfg.Copy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns whether or not connection attempts from the given device should be
|
||||
// silently ignored.
|
||||
func (w *Wrapper) IgnoredDevice(id protocol.DeviceID) bool {
|
||||
|
||||
@@ -17,11 +17,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@@ -123,7 +123,8 @@ func NewBlockFinder(db *leveldb.DB, cfg *config.Wrapper) *BlockFinder {
|
||||
}
|
||||
|
||||
f := &BlockFinder{
|
||||
db: db,
|
||||
db: db,
|
||||
mut: sync.NewRWMutex(),
|
||||
}
|
||||
f.Changed(cfg.Raw())
|
||||
cfg.Subscribe(f)
|
||||
|
||||
@@ -10,10 +10,11 @@ import (
|
||||
"crypto/rand"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@@ -132,7 +133,7 @@ func TestConcurrentSetClear(t *testing.T) {
|
||||
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
@@ -188,7 +189,7 @@ func TestConcurrentSetOnly(t *testing.T) {
|
||||
|
||||
dur := 30 * time.Second
|
||||
t0 := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
var (
|
||||
clockTick int64
|
||||
clockMut sync.Mutex
|
||||
clockMut sync.Mutex = sync.NewMutex()
|
||||
)
|
||||
|
||||
func clock(v int64) int64 {
|
||||
|
||||
@@ -156,6 +156,9 @@ func (o *versionList) UnmarshalXDR(bs []byte) error {
|
||||
|
||||
func (o *versionList) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
_versionsSize := int(xr.ReadUint32())
|
||||
if _versionsSize < 0 {
|
||||
return xdr.ElementSizeExceeded("versions", _versionsSize, 0)
|
||||
}
|
||||
o.versions = make([]fileVersion, _versionsSize)
|
||||
for i := range o.versions {
|
||||
(&o.versions[i]).DecodeXDRFrom(xr)
|
||||
|
||||
@@ -13,10 +13,9 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
@@ -50,6 +49,7 @@ func NewFileSet(folder string, db *leveldb.DB) *FileSet {
|
||||
folder: folder,
|
||||
db: db,
|
||||
blockmap: NewBlockMap(db, folder),
|
||||
mutex: sync.NewMutex(),
|
||||
}
|
||||
|
||||
ldbCheckGlobals(db, []byte(folder))
|
||||
|
||||
@@ -13,15 +13,6 @@ type FileInfoTruncated struct {
|
||||
ActualSize int64
|
||||
}
|
||||
|
||||
func ToTruncated(file protocol.FileInfo) FileInfoTruncated {
|
||||
t := FileInfoTruncated{
|
||||
FileInfo: file,
|
||||
ActualSize: file.Size(),
|
||||
}
|
||||
t.FileInfo.Blocks = nil
|
||||
return t
|
||||
}
|
||||
|
||||
func (f *FileInfoTruncated) UnmarshalXDR(bs []byte) error {
|
||||
err := f.FileInfo.UnmarshalXDR(bs)
|
||||
f.ActualSize = f.FileInfo.Size()
|
||||
|
||||
@@ -9,12 +9,13 @@ package discover
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
var device protocol.DeviceID
|
||||
@@ -97,7 +98,7 @@ func TestUDP4Success(t *testing.T) {
|
||||
|
||||
// Do a lookup in a separate routine
|
||||
addrs := []string{}
|
||||
wg := sync.WaitGroup{}
|
||||
wg := sync.NewWaitGroup()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
addrs = client.Lookup(device)
|
||||
@@ -193,7 +194,7 @@ func TestUDP4Failure(t *testing.T) {
|
||||
|
||||
// Do a lookup in a separate routine
|
||||
addrs := []string{}
|
||||
wg := sync.WaitGroup{}
|
||||
wg := sync.NewWaitGroup()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
addrs = client.Lookup(device)
|
||||
|
||||
@@ -12,16 +12,19 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, proto := range []string{"udp", "udp4", "udp6"} {
|
||||
Register(proto, func(uri *url.URL, pkt *Announce) (Client, error) {
|
||||
c := &UDPClient{}
|
||||
c := &UDPClient{
|
||||
wg: sync.NewWaitGroup(),
|
||||
mut: sync.NewRWMutex(),
|
||||
}
|
||||
err := c.Start(uri, pkt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -13,12 +13,12 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/beacon"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type Discoverer struct {
|
||||
@@ -59,6 +59,8 @@ func NewDiscoverer(id protocol.DeviceID, addresses []string) *Discoverer {
|
||||
negCacheCutoff: 3 * time.Minute,
|
||||
registry: make(map[protocol.DeviceID][]CacheEntry),
|
||||
lastLookup: make(map[protocol.DeviceID]time.Time),
|
||||
registryLock: sync.NewRWMutex(),
|
||||
mut: sync.NewRWMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,7 +142,7 @@ func (d *Discoverer) StartGlobal(servers []string, extPort uint16) {
|
||||
|
||||
d.extPort = extPort
|
||||
pkt := d.announcementPkt()
|
||||
wg := sync.WaitGroup{}
|
||||
wg := sync.NewWaitGroup()
|
||||
clients := make(chan Client, len(servers))
|
||||
for _, address := range servers {
|
||||
wg.Add(1)
|
||||
@@ -216,7 +218,7 @@ func (d *Discoverer) Lookup(device protocol.DeviceID) []string {
|
||||
// server client and one local announcement interval has passed. This is
|
||||
// to avoid finding local peers on their remote address at startup.
|
||||
results := make(chan []string, len(d.clients))
|
||||
wg := sync.WaitGroup{}
|
||||
wg := sync.NewWaitGroup()
|
||||
for _, client := range d.clients {
|
||||
wg.Add(1)
|
||||
go func(c Client) {
|
||||
|
||||
@@ -172,6 +172,9 @@ func (o *Announce) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
(&o.This).DecodeXDRFrom(xr)
|
||||
_ExtraSize := int(xr.ReadUint32())
|
||||
if _ExtraSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Extra", _ExtraSize, 16)
|
||||
}
|
||||
if _ExtraSize > 16 {
|
||||
return xdr.ElementSizeExceeded("Extra", _ExtraSize, 16)
|
||||
}
|
||||
@@ -266,6 +269,9 @@ func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||
func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
_AddressesSize := int(xr.ReadUint32())
|
||||
if _AddressesSize < 0 {
|
||||
return xdr.ElementSizeExceeded("Addresses", _AddressesSize, 16)
|
||||
}
|
||||
if _AddressesSize > 16 {
|
||||
return xdr.ElementSizeExceeded("Addresses", _AddressesSize, 16)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,10 @@ package events
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type EventType int
|
||||
@@ -101,7 +103,6 @@ type Subscription struct {
|
||||
mask EventType
|
||||
id int
|
||||
events chan Event
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
var Default = NewLogger()
|
||||
@@ -113,7 +114,8 @@ var (
|
||||
|
||||
func NewLogger() *Logger {
|
||||
return &Logger{
|
||||
subs: make(map[int]*Subscription),
|
||||
subs: make(map[int]*Subscription),
|
||||
mutex: sync.NewMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,9 +170,6 @@ func (l *Logger) Unsubscribe(s *Subscription) {
|
||||
}
|
||||
|
||||
func (s *Subscription) Poll(timeout time.Duration) (Event, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if debug {
|
||||
dl.Debugln("poll", timeout)
|
||||
}
|
||||
@@ -197,15 +196,16 @@ type BufferedSubscription struct {
|
||||
next int
|
||||
cur int
|
||||
mut sync.Mutex
|
||||
cond *sync.Cond
|
||||
cond *stdsync.Cond
|
||||
}
|
||||
|
||||
func NewBufferedSubscription(s *Subscription, size int) *BufferedSubscription {
|
||||
bs := &BufferedSubscription{
|
||||
sub: s,
|
||||
buf: make([]Event, size),
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
bs.cond = sync.NewCond(&bs.mut)
|
||||
bs.cond = stdsync.NewCond(bs.mut)
|
||||
go bs.pollingLoop()
|
||||
return bs
|
||||
}
|
||||
|
||||
@@ -69,7 +69,9 @@ var testcases = []testcase{
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
switch runtime.GOOS {
|
||||
case "windows", "darwin":
|
||||
case "windows":
|
||||
testcases = append(testcases, testcase{"foo.txt", "foo.TXT", 0, true})
|
||||
case "darwin":
|
||||
testcases = append(testcases, testcase{"foo.txt", "foo.TXT", 0, true})
|
||||
fallthrough
|
||||
default:
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/fnmatch"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type Pattern struct {
|
||||
@@ -48,6 +48,7 @@ func New(withCache bool) *Matcher {
|
||||
m := &Matcher{
|
||||
withCache: withCache,
|
||||
stop: make(chan struct{}),
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
if withCache {
|
||||
go m.clean(2 * time.Hour)
|
||||
|
||||
@@ -7,9 +7,8 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// deviceActivity tracks the number of outstanding requests per device and can
|
||||
@@ -23,6 +22,7 @@ type deviceActivity struct {
|
||||
func newDeviceActivity() *deviceActivity {
|
||||
return &deviceActivity{
|
||||
act: make(map[protocol.DeviceID]int),
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,25 +1,16 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type folderState int
|
||||
@@ -28,7 +19,7 @@ const (
|
||||
FolderIdle folderState = iota
|
||||
FolderScanning
|
||||
FolderSyncing
|
||||
FolderCleaning
|
||||
FolderError
|
||||
)
|
||||
|
||||
func (s folderState) String() string {
|
||||
@@ -37,10 +28,10 @@ func (s folderState) String() string {
|
||||
return "idle"
|
||||
case FolderScanning:
|
||||
return "scanning"
|
||||
case FolderCleaning:
|
||||
return "cleaning"
|
||||
case FolderSyncing:
|
||||
return "syncing"
|
||||
case FolderError:
|
||||
return "error"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -51,10 +42,16 @@ type stateTracker struct {
|
||||
|
||||
mut sync.Mutex
|
||||
current folderState
|
||||
err error
|
||||
changed time.Time
|
||||
}
|
||||
|
||||
// setState sets the new folder state, for states other than FolderError.
|
||||
func (s *stateTracker) setState(newState folderState) {
|
||||
if newState == FolderError {
|
||||
panic("must use setError")
|
||||
}
|
||||
|
||||
s.mut.Lock()
|
||||
if newState != s.current {
|
||||
/* This should hold later...
|
||||
@@ -74,6 +71,7 @@ func (s *stateTracker) setState(newState folderState) {
|
||||
}
|
||||
|
||||
s.current = newState
|
||||
s.err = nil
|
||||
s.changed = time.Now()
|
||||
|
||||
events.Default.Log(events.StateChanged, eventData)
|
||||
@@ -81,9 +79,35 @@ func (s *stateTracker) setState(newState folderState) {
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *stateTracker) getState() (current folderState, changed time.Time) {
|
||||
// getState returns the current state, the time when it last changed, and the
|
||||
// current error or nil.
|
||||
func (s *stateTracker) getState() (current folderState, changed time.Time, err error) {
|
||||
s.mut.Lock()
|
||||
current, changed = s.current, s.changed
|
||||
current, changed, err = s.current, s.changed, s.err
|
||||
s.mut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// setError sets the folder state to FolderError with the specified error.
|
||||
func (s *stateTracker) setError(err error) {
|
||||
s.mut.Lock()
|
||||
if s.current != FolderError || s.err.Error() != err.Error() {
|
||||
eventData := map[string]interface{}{
|
||||
"folder": s.folder,
|
||||
"to": FolderError.String(),
|
||||
"from": s.current.String(),
|
||||
"error": err.Error(),
|
||||
}
|
||||
|
||||
if !s.changed.IsZero() {
|
||||
eventData["duration"] = time.Since(s.changed).Seconds()
|
||||
}
|
||||
|
||||
s.current = FolderError
|
||||
s.err = err
|
||||
s.changed = time.Now()
|
||||
|
||||
events.Default.Log(events.StateChanged, eventData)
|
||||
}
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
@@ -31,6 +30,7 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/scanner"
|
||||
"github.com/syncthing/syncthing/internal/stats"
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/syncthing/syncthing/internal/versioner"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
@@ -49,8 +49,9 @@ type service interface {
|
||||
Jobs() ([]string, []string) // In progress, Queued
|
||||
BringToFront(string)
|
||||
|
||||
setState(folderState)
|
||||
getState() (folderState, time.Time)
|
||||
setState(state folderState)
|
||||
setError(err error)
|
||||
getState() (folderState, time.Time, error)
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
@@ -85,7 +86,7 @@ type Model struct {
|
||||
}
|
||||
|
||||
var (
|
||||
SymlinkWarning = sync.Once{}
|
||||
SymlinkWarning = stdsync.Once{}
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
@@ -113,23 +114,25 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName,
|
||||
protoConn: make(map[protocol.DeviceID]protocol.Connection),
|
||||
rawConn: make(map[protocol.DeviceID]io.Closer),
|
||||
deviceVer: make(map[protocol.DeviceID]string),
|
||||
|
||||
fmut: sync.NewRWMutex(),
|
||||
pmut: sync.NewRWMutex(),
|
||||
}
|
||||
if cfg.Options().ProgressUpdateIntervalS > -1 {
|
||||
go m.progressEmitter.Serve()
|
||||
}
|
||||
|
||||
var timeout = 20 * 60 // seconds
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
if err == nil {
|
||||
timeout = it
|
||||
}
|
||||
}
|
||||
deadlockDetect(&m.fmut, time.Duration(timeout)*time.Second)
|
||||
deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
|
||||
return m
|
||||
}
|
||||
|
||||
// Starts deadlock detector on the models locks which causes panics in case
|
||||
// the locks cannot be acquired in the given timeout period.
|
||||
func (m *Model) StartDeadlockDetector(timeout time.Duration) {
|
||||
l.Infof("Starting deadlock detector with %v timeout", timeout)
|
||||
deadlockDetect(m.fmut, timeout)
|
||||
deadlockDetect(m.pmut, timeout)
|
||||
}
|
||||
|
||||
// StartRW starts read/write processing on the current model. When in
|
||||
// read/write mode the model will attempt to keep in sync with the cluster by
|
||||
// pulling needed files from peer devices.
|
||||
@@ -144,7 +147,7 @@ func (m *Model) StartFolderRW(folder string) {
|
||||
if ok {
|
||||
panic("cannot start already running folder " + folder)
|
||||
}
|
||||
p := newRWFolder(m, cfg)
|
||||
p := newRWFolder(m, m.shortID, cfg)
|
||||
m.folderRunners[folder] = p
|
||||
m.fmut.Unlock()
|
||||
|
||||
@@ -372,53 +375,71 @@ func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
|
||||
return
|
||||
}
|
||||
|
||||
// NeedFiles returns the list of currently needed files in progress, queued,
|
||||
// and to be queued on next puller iteration. Also takes a soft cap which is
|
||||
// only respected when adding files from the model rather than the runner queue.
|
||||
func (m *Model) NeedFolderFiles(folder string, max int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) {
|
||||
// NeedFiles returns paginated list of currently needed files in progress, queued,
|
||||
// and to be queued on next puller iteration, as well as the total number of
|
||||
// files currently needed.
|
||||
func (m *Model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, int) {
|
||||
m.fmut.RLock()
|
||||
defer m.fmut.RUnlock()
|
||||
|
||||
if rf, ok := m.folderFiles[folder]; ok {
|
||||
var progress, queued, rest []db.FileInfoTruncated
|
||||
var seen map[string]bool
|
||||
total := 0
|
||||
|
||||
runner, ok := m.folderRunners[folder]
|
||||
if ok {
|
||||
progressNames, queuedNames := runner.Jobs()
|
||||
|
||||
progress = make([]db.FileInfoTruncated, len(progressNames))
|
||||
queued = make([]db.FileInfoTruncated, len(queuedNames))
|
||||
seen = make(map[string]bool, len(progressNames)+len(queuedNames))
|
||||
|
||||
for i, name := range progressNames {
|
||||
if f, ok := rf.GetGlobalTruncated(name); ok {
|
||||
progress[i] = f
|
||||
seen[name] = true
|
||||
}
|
||||
}
|
||||
|
||||
for i, name := range queuedNames {
|
||||
if f, ok := rf.GetGlobalTruncated(name); ok {
|
||||
queued[i] = f
|
||||
seen[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
left := max - len(progress) - len(queued)
|
||||
if max < 1 || left > 0 {
|
||||
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
|
||||
left--
|
||||
ft := f.(db.FileInfoTruncated)
|
||||
if !seen[ft.Name] {
|
||||
rest = append(rest, ft)
|
||||
}
|
||||
return max < 1 || left > 0
|
||||
})
|
||||
}
|
||||
return progress, queued, rest
|
||||
rf, ok := m.folderFiles[folder]
|
||||
if !ok {
|
||||
return nil, nil, nil, 0
|
||||
}
|
||||
return nil, nil, nil
|
||||
|
||||
var progress, queued, rest []db.FileInfoTruncated
|
||||
var seen map[string]struct{}
|
||||
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
|
||||
runner, ok := m.folderRunners[folder]
|
||||
if ok {
|
||||
allProgressNames, allQueuedNames := runner.Jobs()
|
||||
|
||||
var progressNames, queuedNames []string
|
||||
progressNames, skip, get = getChunk(allProgressNames, skip, get)
|
||||
queuedNames, skip, get = getChunk(allQueuedNames, skip, get)
|
||||
|
||||
progress = make([]db.FileInfoTruncated, len(progressNames))
|
||||
queued = make([]db.FileInfoTruncated, len(queuedNames))
|
||||
seen = make(map[string]struct{}, len(progressNames)+len(queuedNames))
|
||||
|
||||
for i, name := range progressNames {
|
||||
if f, ok := rf.GetGlobalTruncated(name); ok {
|
||||
progress[i] = f
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for i, name := range queuedNames {
|
||||
if f, ok := rf.GetGlobalTruncated(name); ok {
|
||||
queued[i] = f
|
||||
seen[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rest = make([]db.FileInfoTruncated, 0, perpage)
|
||||
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
|
||||
total++
|
||||
if skip > 0 {
|
||||
skip--
|
||||
return true
|
||||
}
|
||||
if get > 0 {
|
||||
ft := f.(db.FileInfoTruncated)
|
||||
if _, ok := seen[ft.Name]; !ok {
|
||||
rest = append(rest, ft)
|
||||
get--
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return progress, queued, rest, total
|
||||
}
|
||||
|
||||
// Index is called when a new device is connected and we receive their full index.
|
||||
@@ -770,15 +791,23 @@ func (m *Model) ReplaceLocal(folder string, fs []protocol.FileInfo) {
|
||||
|
||||
func (m *Model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool) {
|
||||
m.fmut.RLock()
|
||||
f, ok := m.folderFiles[folder].Get(protocol.LocalDeviceID, file)
|
||||
fs, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
f, ok := fs.Get(protocol.LocalDeviceID, file)
|
||||
return f, ok
|
||||
}
|
||||
|
||||
func (m *Model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) {
|
||||
m.fmut.RLock()
|
||||
f, ok := m.folderFiles[folder].GetGlobal(file)
|
||||
fs, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
f, ok := fs.GetGlobal(file)
|
||||
return f, ok
|
||||
}
|
||||
|
||||
@@ -1036,8 +1065,8 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
m.fmut.RUnlock()
|
||||
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"numFiles": len(fs),
|
||||
"folder": folder,
|
||||
"items": len(fs),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1085,16 +1114,16 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
|
||||
|
||||
func (m *Model) ScanFolders() map[string]error {
|
||||
m.fmut.RLock()
|
||||
var folders = make([]string, 0, len(m.folderCfgs))
|
||||
folders := make([]string, 0, len(m.folderCfgs))
|
||||
for folder := range m.folderCfgs {
|
||||
folders = append(folders, folder)
|
||||
}
|
||||
m.fmut.RUnlock()
|
||||
|
||||
var errors = make(map[string]error, len(m.folderCfgs))
|
||||
var errorsMut sync.Mutex
|
||||
errors := make(map[string]error, len(m.folderCfgs))
|
||||
errorsMut := sync.NewMutex()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg := sync.NewWaitGroup()
|
||||
wg.Add(len(folders))
|
||||
for _, folder := range folders {
|
||||
folder := folder
|
||||
@@ -1104,11 +1133,15 @@ func (m *Model) ScanFolders() map[string]error {
|
||||
errorsMut.Lock()
|
||||
errors[folder] = err
|
||||
errorsMut.Unlock()
|
||||
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by SetFolderError
|
||||
m.cfg.SetFolderError(folder, err)
|
||||
// duplicate set is handled by setError.
|
||||
m.fmut.RLock()
|
||||
srv := m.folderRunners[folder]
|
||||
m.fmut.RUnlock()
|
||||
srv.setError(err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1184,32 +1217,31 @@ nextSub:
|
||||
}
|
||||
|
||||
runner.setState(FolderScanning)
|
||||
defer runner.setState(FolderIdle)
|
||||
fchan, err := w.Walk()
|
||||
|
||||
fchan, err := w.Walk()
|
||||
if err != nil {
|
||||
m.cfg.SetFolderError(folder, err)
|
||||
runner.setError(err)
|
||||
return err
|
||||
}
|
||||
batchSize := 100
|
||||
batch := make([]protocol.FileInfo, 0, batchSize)
|
||||
|
||||
batchSizeFiles := 100
|
||||
batchSizeBlocks := 2048 // about 256 MB
|
||||
|
||||
batch := make([]protocol.FileInfo, 0, batchSizeFiles)
|
||||
blocksHandled := 0
|
||||
|
||||
for f := range fchan {
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"name": f.Name,
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"flags": fmt.Sprintf("0%o", f.Flags),
|
||||
"size": f.Size(),
|
||||
})
|
||||
if len(batch) == batchSize {
|
||||
if len(batch) == batchSizeFiles || blocksHandled > batchSizeBlocks {
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
l.Infof("Stopping folder %s mid-scan due to folder error: %s", folder, err)
|
||||
return err
|
||||
}
|
||||
fs.Update(protocol.LocalDeviceID, batch)
|
||||
m.updateLocals(folder, batch)
|
||||
batch = batch[:0]
|
||||
blocksHandled = 0
|
||||
}
|
||||
batch = append(batch, f)
|
||||
blocksHandled += len(f.Blocks)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
@@ -1244,8 +1276,8 @@ nextSub:
|
||||
return true
|
||||
}
|
||||
|
||||
if len(batch) == batchSize {
|
||||
fs.Update(protocol.LocalDeviceID, batch)
|
||||
if len(batch) == batchSizeFiles {
|
||||
m.updateLocals(folder, batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
@@ -1260,15 +1292,8 @@ nextSub:
|
||||
Modified: f.Modified,
|
||||
Version: f.Version, // The file is still the same, so don't bump version
|
||||
}
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"name": f.Name,
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"flags": fmt.Sprintf("0%o", f.Flags),
|
||||
"size": f.Size(),
|
||||
})
|
||||
batch = append(batch, nf)
|
||||
} else if _, err := os.Lstat(filepath.Join(folderCfg.Path(), f.Name)); err != nil {
|
||||
} else if _, err := osutil.Lstat(filepath.Join(folderCfg.Path(), f.Name)); err != nil {
|
||||
// File has been deleted.
|
||||
|
||||
// We don't specifically verify that the error is
|
||||
@@ -1284,22 +1309,16 @@ nextSub:
|
||||
Modified: f.Modified,
|
||||
Version: f.Version.Update(m.shortID),
|
||||
}
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"name": f.Name,
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"flags": fmt.Sprintf("0%o", f.Flags),
|
||||
"size": f.Size(),
|
||||
})
|
||||
batch = append(batch, nf)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if len(batch) > 0 {
|
||||
fs.Update(protocol.LocalDeviceID, batch)
|
||||
m.updateLocals(folder, batch)
|
||||
}
|
||||
|
||||
runner.setState(FolderIdle)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1342,22 +1361,28 @@ func (m *Model) clusterConfig(device protocol.DeviceID) protocol.ClusterConfigMe
|
||||
return cm
|
||||
}
|
||||
|
||||
func (m *Model) State(folder string) (string, time.Time) {
|
||||
func (m *Model) State(folder string) (string, time.Time, error) {
|
||||
m.fmut.RLock()
|
||||
runner, ok := m.folderRunners[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return "", time.Time{}
|
||||
// The returned error should be an actual folder error, so returning
|
||||
// errors.New("does not exist") or similar here would be
|
||||
// inappropriate.
|
||||
return "", time.Time{}, nil
|
||||
}
|
||||
state, changed := runner.getState()
|
||||
return state.String(), changed
|
||||
state, changed, err := runner.getState()
|
||||
return state.String(), changed, err
|
||||
}
|
||||
|
||||
func (m *Model) Override(folder string) {
|
||||
m.fmut.RLock()
|
||||
fs := m.folderFiles[folder]
|
||||
fs, ok := m.folderFiles[folder]
|
||||
runner := m.folderRunners[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
runner.setState(FolderScanning)
|
||||
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
||||
@@ -1479,8 +1504,8 @@ func (m *Model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly
|
||||
}
|
||||
|
||||
if !dirsonly && base != "" {
|
||||
last[base] = []int64{
|
||||
f.Modified, f.Size(),
|
||||
last[base] = []interface{}{
|
||||
time.Unix(f.Modified, 0), f.Size(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1530,7 +1555,7 @@ func (m *Model) BringToFront(folder, file string) {
|
||||
func (m *Model) CheckFolderHealth(id string) error {
|
||||
folder, ok := m.cfg.Folders()[id]
|
||||
if !ok {
|
||||
return errors.New("Folder does not exist")
|
||||
return errors.New("folder does not exist")
|
||||
}
|
||||
|
||||
fi, err := os.Stat(folder.Path())
|
||||
@@ -1540,9 +1565,9 @@ func (m *Model) CheckFolderHealth(id string) error {
|
||||
// that all files have been deleted which might not be the case,
|
||||
// so mark it as invalid instead.
|
||||
if err != nil || !fi.IsDir() {
|
||||
err = errors.New("Folder path missing")
|
||||
err = errors.New("folder path missing")
|
||||
} else if !folder.HasMarker() {
|
||||
err = errors.New("Folder marker missing")
|
||||
err = errors.New("folder marker missing")
|
||||
}
|
||||
} else if os.IsNotExist(err) {
|
||||
// If we don't have any files in the index, and the directory
|
||||
@@ -1557,35 +1582,29 @@ func (m *Model) CheckFolderHealth(id string) error {
|
||||
err = folder.CreateMarker()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if folder.Invalid != "" {
|
||||
l.Infof("Starting folder %q after error %q", folder.ID, folder.Invalid)
|
||||
m.cfg.SetFolderError(id, nil)
|
||||
m.fmut.RLock()
|
||||
runner, runnerExists := m.folderRunners[folder.ID]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
var oldErr error
|
||||
if runnerExists {
|
||||
_, _, oldErr = runner.getState()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if oldErr != nil && oldErr.Error() != err.Error() {
|
||||
l.Infof("Folder %q error changed: %q -> %q", folder.ID, oldErr, err)
|
||||
} else if oldErr == nil {
|
||||
l.Warnf("Stopping folder %q - %v", folder.ID, err)
|
||||
}
|
||||
|
||||
if folder, ok := m.cfg.Folders()[id]; !ok || folder.Invalid != "" {
|
||||
panic("Unable to unset folder \"" + id + "\" error.")
|
||||
if runnerExists {
|
||||
runner.setError(err)
|
||||
}
|
||||
} else if oldErr != nil {
|
||||
l.Infof("Folder %q error is cleared, restarting", folder.ID)
|
||||
if runnerExists {
|
||||
runner.setState(FolderIdle)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if folder.Invalid == err.Error() {
|
||||
return err
|
||||
}
|
||||
|
||||
// folder is a copy of the original struct, hence Invalid value is
|
||||
// preserved after the set.
|
||||
m.cfg.SetFolderError(id, err)
|
||||
|
||||
if folder.Invalid == "" {
|
||||
l.Warnf("Stopping folder %q - %v", folder.ID, err)
|
||||
} else {
|
||||
l.Infof("Folder %q error changed: %q -> %q", folder.ID, folder.Invalid, err)
|
||||
}
|
||||
|
||||
if folder, ok := m.cfg.Folders()[id]; !ok || folder.Invalid != err.Error() {
|
||||
panic("Unable to set folder \"" + id + "\" error.")
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -1615,3 +1634,17 @@ func symlinkInvalid(isLink bool) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
|
||||
// Returns the resulting slice, plus how much elements are left to skip or
|
||||
// copy to satisfy the values which were provided, given the slice is not
|
||||
// big enough.
|
||||
func getChunk(data []string, skip, get int) ([]string, int, int) {
|
||||
l := len(data)
|
||||
if l <= skip {
|
||||
return []string{}, skip - l, get
|
||||
} else if l < skip+get {
|
||||
return data[skip:l], 0, get - (l - skip)
|
||||
}
|
||||
return data[skip : skip+get], 0, 0
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -621,21 +620,25 @@ func TestROScanRecovery(t *testing.T) {
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %s", status, m.cfg.Folders()["default"].Invalid)
|
||||
}
|
||||
if m.cfg.Folders()["default"].Invalid == status {
|
||||
_, _, err := m.State("default")
|
||||
if err == nil && status == "" {
|
||||
return nil
|
||||
}
|
||||
if err != nil && err.Error() == status {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
if err := waitFor("Folder path missing"); err != nil {
|
||||
if err := waitFor("folder path missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
os.Mkdir(fcfg.RawPath, 0700)
|
||||
|
||||
if err := waitFor("Folder marker missing"); err != nil {
|
||||
if err := waitFor("folder marker missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -654,14 +657,14 @@ func TestROScanRecovery(t *testing.T) {
|
||||
|
||||
os.Remove(filepath.Join(fcfg.RawPath, ".stfolder"))
|
||||
|
||||
if err := waitFor("Folder marker missing"); err != nil {
|
||||
if err := waitFor("folder marker missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
os.Remove(fcfg.RawPath)
|
||||
|
||||
if err := waitFor("Folder path missing"); err != nil {
|
||||
if err := waitFor("folder path missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -701,21 +704,25 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %s", status, m.cfg.Folders()["default"].Invalid)
|
||||
}
|
||||
if m.cfg.Folders()["default"].Invalid == status {
|
||||
_, _, err := m.State("default")
|
||||
if err == nil && status == "" {
|
||||
return nil
|
||||
}
|
||||
if err != nil && err.Error() == status {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
if err := waitFor("Folder path missing"); err != nil {
|
||||
if err := waitFor("folder path missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
os.Mkdir(fcfg.RawPath, 0700)
|
||||
|
||||
if err := waitFor("Folder marker missing"); err != nil {
|
||||
if err := waitFor("folder marker missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -734,14 +741,14 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
|
||||
os.Remove(filepath.Join(fcfg.RawPath, ".stfolder"))
|
||||
|
||||
if err := waitFor("Folder marker missing"); err != nil {
|
||||
if err := waitFor("folder marker missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
os.Remove(fcfg.RawPath)
|
||||
|
||||
if err := waitFor("Folder path missing"); err != nil {
|
||||
if err := waitFor("folder path missing"); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -767,7 +774,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
filedata := []int64{0x666, 0xa}
|
||||
filedata := []interface{}{time.Unix(0x666, 0), 0xa}
|
||||
|
||||
testdata := []protocol.FileInfo{
|
||||
b(false, "another"),
|
||||
@@ -839,13 +846,13 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
|
||||
result := m.GlobalDirectoryTree("default", "", -1, false)
|
||||
|
||||
if !reflect.DeepEqual(result, expectedResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(expectedResult))
|
||||
if mm(result) != mm(expectedResult) {
|
||||
t.Errorf("Does not match:\n%#v\n%#v", result, expectedResult)
|
||||
}
|
||||
|
||||
result = m.GlobalDirectoryTree("default", "another", -1, false)
|
||||
|
||||
if !reflect.DeepEqual(result, expectedResult["another"]) {
|
||||
if mm(result) != mm(expectedResult["another"]) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(expectedResult["another"]))
|
||||
}
|
||||
|
||||
@@ -857,7 +864,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
"rootfile": filedata,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -878,7 +885,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
"rootfile": filedata,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -908,7 +915,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -927,7 +934,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -937,7 +944,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
"file": filedata,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -946,7 +953,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
"with": map[string]interface{}{},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -957,7 +964,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -970,7 +977,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -983,7 +990,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -991,7 +998,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
result = m.GlobalDirectoryTree("default", "som", -1, false)
|
||||
currentResult = map[string]interface{}{}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
}
|
||||
@@ -1016,7 +1023,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
filedata := []int64{0x666, 0xa}
|
||||
filedata := []interface{}{time.Unix(0x666, 0).Format(time.RFC3339), 0xa}
|
||||
|
||||
testdata := []protocol.FileInfo{
|
||||
b(true, "another", "directory", "afile"),
|
||||
@@ -1097,7 +1104,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
|
||||
result := m.GlobalDirectoryTree("default", "", -1, false)
|
||||
|
||||
if !reflect.DeepEqual(result, expectedResult) {
|
||||
if mm(result) != mm(expectedResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(expectedResult))
|
||||
}
|
||||
|
||||
@@ -1108,7 +1115,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -1117,7 +1124,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
"invalid": map[string]interface{}{},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
|
||||
@@ -1126,7 +1133,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
result = m.GlobalDirectoryTree("default", "xthis", 1, false)
|
||||
currentResult = map[string]interface{}{}
|
||||
|
||||
if !reflect.DeepEqual(result, currentResult) {
|
||||
if mm(result) != mm(currentResult) {
|
||||
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,11 @@ package model
|
||||
import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type ProgressEmitter struct {
|
||||
@@ -35,6 +35,7 @@ func NewProgressEmitter(cfg *config.Wrapper) *ProgressEmitter {
|
||||
registry: make(map[string]*sharedPullerState),
|
||||
last: make(map[string]map[string]*pullerProgress),
|
||||
timer: time.NewTimer(time.Millisecond),
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
t.Changed(cfg.Raw())
|
||||
cfg.Subscribe(t)
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
var timeout = 10 * time.Millisecond
|
||||
@@ -50,7 +51,9 @@ func TestProgressEmitter(t *testing.T) {
|
||||
|
||||
expectTimeout(w, t)
|
||||
|
||||
s := sharedPullerState{}
|
||||
s := sharedPullerState{
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
p.Register(&s)
|
||||
|
||||
expectEvent(w, t, 1)
|
||||
|
||||
@@ -6,21 +6,34 @@
|
||||
|
||||
package model
|
||||
|
||||
import "sync"
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type jobQueue struct {
|
||||
progress []string
|
||||
queued []string
|
||||
queued []jobQueueEntry
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func newJobQueue() *jobQueue {
|
||||
return &jobQueue{}
|
||||
type jobQueueEntry struct {
|
||||
name string
|
||||
size int64
|
||||
modified int64
|
||||
}
|
||||
|
||||
func (q *jobQueue) Push(file string) {
|
||||
func newJobQueue() *jobQueue {
|
||||
return &jobQueue{
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *jobQueue) Push(file string, size, modified int64) {
|
||||
q.mut.Lock()
|
||||
q.queued = append(q.queued, file)
|
||||
q.queued = append(q.queued, jobQueueEntry{file, size, modified})
|
||||
q.mut.Unlock()
|
||||
}
|
||||
|
||||
@@ -32,8 +45,7 @@ func (q *jobQueue) Pop() (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var f string
|
||||
f = q.queued[0]
|
||||
f := q.queued[0].name
|
||||
q.queued = q.queued[1:]
|
||||
q.progress = append(q.progress, f)
|
||||
|
||||
@@ -45,7 +57,7 @@ func (q *jobQueue) BringToFront(filename string) {
|
||||
defer q.mut.Unlock()
|
||||
|
||||
for i, cur := range q.queued {
|
||||
if cur == filename {
|
||||
if cur.name == filename {
|
||||
if i > 0 {
|
||||
// Shift the elements before the selected element one step to
|
||||
// the right, overwriting the selected element
|
||||
@@ -79,7 +91,62 @@ func (q *jobQueue) Jobs() ([]string, []string) {
|
||||
copy(progress, q.progress)
|
||||
|
||||
queued := make([]string, len(q.queued))
|
||||
copy(queued, q.queued)
|
||||
for i := range q.queued {
|
||||
queued[i] = q.queued[i].name
|
||||
}
|
||||
|
||||
return progress, queued
|
||||
}
|
||||
|
||||
func (q *jobQueue) Shuffle() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
l := len(q.queued)
|
||||
for i := range q.queued {
|
||||
r := rand.Intn(l)
|
||||
q.queued[i], q.queued[r] = q.queued[r], q.queued[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortSmallestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(smallestFirst(q.queued))
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortLargestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(sort.Reverse(smallestFirst(q.queued)))
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortOldestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(oldestFirst(q.queued))
|
||||
}
|
||||
|
||||
func (q *jobQueue) SortNewestFirst() {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
sort.Sort(sort.Reverse(oldestFirst(q.queued)))
|
||||
}
|
||||
|
||||
// The usual sort.Interface boilerplate
|
||||
|
||||
type smallestFirst []jobQueueEntry
|
||||
|
||||
func (q smallestFirst) Len() int { return len(q) }
|
||||
func (q smallestFirst) Less(a, b int) bool { return q[a].size < q[b].size }
|
||||
func (q smallestFirst) Swap(a, b int) { q[a], q[b] = q[b], q[a] }
|
||||
|
||||
type oldestFirst []jobQueueEntry
|
||||
|
||||
func (q oldestFirst) Len() int { return len(q) }
|
||||
func (q oldestFirst) Less(a, b int) bool { return q[a].modified < q[b].modified }
|
||||
func (q oldestFirst) Swap(a, b int) { q[a], q[b] = q[b], q[a] }
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
func TestJobQueue(t *testing.T) {
|
||||
// Some random actions
|
||||
q := newJobQueue()
|
||||
q.Push("f1")
|
||||
q.Push("f2")
|
||||
q.Push("f3")
|
||||
q.Push("f4")
|
||||
q.Push("f1", 0, 0)
|
||||
q.Push("f2", 0, 0)
|
||||
q.Push("f3", 0, 0)
|
||||
q.Push("f4", 0, 0)
|
||||
|
||||
progress, queued := q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 4 {
|
||||
@@ -43,7 +43,7 @@ func TestJobQueue(t *testing.T) {
|
||||
t.Fatal("Wrong length", len(progress), len(queued))
|
||||
}
|
||||
|
||||
q.Push(n)
|
||||
q.Push(n, 0, 0)
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 4 {
|
||||
t.Fatal("Wrong length")
|
||||
@@ -120,10 +120,10 @@ func TestJobQueue(t *testing.T) {
|
||||
|
||||
func TestBringToFront(t *testing.T) {
|
||||
q := newJobQueue()
|
||||
q.Push("f1")
|
||||
q.Push("f2")
|
||||
q.Push("f3")
|
||||
q.Push("f4")
|
||||
q.Push("f1", 0, 0)
|
||||
q.Push("f2", 0, 0)
|
||||
q.Push("f3", 0, 0)
|
||||
q.Push("f4", 0, 0)
|
||||
|
||||
_, queued := q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
|
||||
@@ -159,12 +159,101 @@ func TestBringToFront(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffle(t *testing.T) {
|
||||
q := newJobQueue()
|
||||
q.Push("f1", 0, 0)
|
||||
q.Push("f2", 0, 0)
|
||||
q.Push("f3", 0, 0)
|
||||
q.Push("f4", 0, 0)
|
||||
|
||||
// This test will fail once in eight million times (1 / (4!)^5) :)
|
||||
for i := 0; i < 5; i++ {
|
||||
q.Shuffle()
|
||||
_, queued := q.Jobs()
|
||||
if l := len(queued); l != 4 {
|
||||
t.Fatalf("Weird length %d returned from Jobs()", l)
|
||||
}
|
||||
|
||||
t.Logf("%v", queued)
|
||||
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
|
||||
// The queue was shuffled
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t.Error("Queue was not shuffled after five attempts.")
|
||||
}
|
||||
|
||||
func TestSortBySize(t *testing.T) {
|
||||
q := newJobQueue()
|
||||
q.Push("f1", 20, 0)
|
||||
q.Push("f2", 40, 0)
|
||||
q.Push("f3", 30, 0)
|
||||
q.Push("f4", 10, 0)
|
||||
|
||||
q.SortSmallestFirst()
|
||||
|
||||
_, actual := q.Jobs()
|
||||
if l := len(actual); l != 4 {
|
||||
t.Fatalf("Weird length %d returned from Jobs()", l)
|
||||
}
|
||||
expected := []string{"f4", "f1", "f3", "f2"}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("SortSmallestFirst(): %#v != %#v", actual, expected)
|
||||
}
|
||||
|
||||
q.SortLargestFirst()
|
||||
|
||||
_, actual = q.Jobs()
|
||||
if l := len(actual); l != 4 {
|
||||
t.Fatalf("Weird length %d returned from Jobs()", l)
|
||||
}
|
||||
expected = []string{"f2", "f3", "f1", "f4"}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("SortLargestFirst(): %#v != %#v", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortByAge(t *testing.T) {
|
||||
q := newJobQueue()
|
||||
q.Push("f1", 0, 20)
|
||||
q.Push("f2", 0, 40)
|
||||
q.Push("f3", 0, 30)
|
||||
q.Push("f4", 0, 10)
|
||||
|
||||
q.SortOldestFirst()
|
||||
|
||||
_, actual := q.Jobs()
|
||||
if l := len(actual); l != 4 {
|
||||
t.Fatalf("Weird length %d returned from Jobs()", l)
|
||||
}
|
||||
expected := []string{"f4", "f1", "f3", "f2"}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("SortOldestFirst(): %#v != %#v", actual, expected)
|
||||
}
|
||||
|
||||
q.SortNewestFirst()
|
||||
|
||||
_, actual = q.Jobs()
|
||||
if l := len(actual); l != 4 {
|
||||
t.Fatalf("Weird length %d returned from Jobs()", l)
|
||||
}
|
||||
expected = []string{"f2", "f3", "f1", "f4"}
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("SortNewestFirst(): %#v != %#v", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkJobQueueBump(b *testing.B) {
|
||||
files := genFiles(b.N)
|
||||
|
||||
q := newJobQueue()
|
||||
for _, f := range files {
|
||||
q.Push(f.Name)
|
||||
q.Push(f.Name, 0, 0)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
@@ -180,7 +269,7 @@ func BenchmarkJobQueuePushPopDone10k(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
q := newJobQueue()
|
||||
for _, f := range files {
|
||||
q.Push(f.Name)
|
||||
q.Push(f.Name, 0, 0)
|
||||
}
|
||||
for _ = range files {
|
||||
n, _ := q.Pop()
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
type roFolder struct {
|
||||
@@ -23,11 +25,14 @@ type roFolder struct {
|
||||
|
||||
func newROFolder(model *Model, folder string, interval time.Duration) *roFolder {
|
||||
return &roFolder{
|
||||
stateTracker: stateTracker{folder: folder},
|
||||
folder: folder,
|
||||
intv: interval,
|
||||
model: model,
|
||||
stop: make(chan struct{}),
|
||||
stateTracker: stateTracker{
|
||||
folder: folder,
|
||||
mut: sync.NewMutex(),
|
||||
},
|
||||
folder: folder,
|
||||
intv: interval,
|
||||
model: model,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,8 +72,8 @@ func (s *roFolder) Serve() {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by SetFolderError
|
||||
s.model.cfg.SetFolderError(s.folder, err)
|
||||
// duplicate set is handled by setError.
|
||||
s.setError(err)
|
||||
reschedule()
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/scanner"
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
"github.com/syncthing/syncthing/internal/versioner"
|
||||
)
|
||||
|
||||
@@ -68,15 +68,20 @@ type rwFolder struct {
|
||||
lenientMtimes bool
|
||||
copiers int
|
||||
pullers int
|
||||
shortID uint64
|
||||
order config.PullOrder
|
||||
|
||||
stop chan struct{}
|
||||
queue *jobQueue
|
||||
dbUpdates chan protocol.FileInfo
|
||||
}
|
||||
|
||||
func newRWFolder(m *Model, cfg config.FolderConfiguration) *rwFolder {
|
||||
func newRWFolder(m *Model, shortID uint64, cfg config.FolderConfiguration) *rwFolder {
|
||||
return &rwFolder{
|
||||
stateTracker: stateTracker{folder: cfg.ID},
|
||||
stateTracker: stateTracker{
|
||||
folder: cfg.ID,
|
||||
mut: sync.NewMutex(),
|
||||
},
|
||||
|
||||
model: m,
|
||||
progressEmitter: m.progressEmitter,
|
||||
@@ -88,6 +93,8 @@ func newRWFolder(m *Model, cfg config.FolderConfiguration) *rwFolder {
|
||||
lenientMtimes: cfg.LenientMtimes,
|
||||
copiers: cfg.Copiers,
|
||||
pullers: cfg.Pullers,
|
||||
shortID: shortID,
|
||||
order: cfg.Order,
|
||||
|
||||
stop: make(chan struct{}),
|
||||
queue: newJobQueue(),
|
||||
@@ -116,6 +123,11 @@ func (p *rwFolder) Serve() {
|
||||
var prevIgnoreHash string
|
||||
|
||||
rescheduleScan := func() {
|
||||
if p.scanIntv == 0 {
|
||||
// We should not run scans, so it should not be rescheduled.
|
||||
return
|
||||
}
|
||||
|
||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||
sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4
|
||||
intv := time.Duration(sleepNanos) * time.Nanosecond
|
||||
@@ -243,8 +255,8 @@ func (p *rwFolder) Serve() {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by SetFolderError
|
||||
p.model.cfg.SetFolderError(p.folder, err)
|
||||
// duplicate set is handled by setError.
|
||||
p.setError(err)
|
||||
rescheduleScan()
|
||||
continue
|
||||
}
|
||||
@@ -277,10 +289,10 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
copyChan := make(chan copyBlocksState)
|
||||
finisherChan := make(chan *sharedPullerState)
|
||||
|
||||
var updateWg sync.WaitGroup
|
||||
var copyWg sync.WaitGroup
|
||||
var pullWg sync.WaitGroup
|
||||
var doneWg sync.WaitGroup
|
||||
updateWg := sync.NewWaitGroup()
|
||||
copyWg := sync.NewWaitGroup()
|
||||
pullWg := sync.NewWaitGroup()
|
||||
doneWg := sync.NewWaitGroup()
|
||||
|
||||
if debug {
|
||||
l.Debugln(p, "c", p.copiers, "p", p.pullers)
|
||||
@@ -336,13 +348,9 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
buckets := map[string][]protocol.FileInfo{}
|
||||
|
||||
folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
|
||||
// Needed items are delivered sorted lexicographically. This isn't
|
||||
// really optimal from a performance point of view - it would be
|
||||
// better if files were handled in random order, to spread the load
|
||||
// over the cluster. But it means that we can be sure that we fully
|
||||
// handle directories before the files that go inside them, which is
|
||||
// nice.
|
||||
// Needed items are delivered sorted lexicographically. We'll handle
|
||||
// directories as they come along, so parents before children. Files
|
||||
// are queued and the order may be changed later.
|
||||
|
||||
file := intf.(protocol.FileInfo)
|
||||
|
||||
@@ -382,13 +390,32 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
default:
|
||||
// A new or changed file or symlink. This is the only case where we
|
||||
// do stuff concurrently in the background
|
||||
p.queue.Push(file.Name)
|
||||
p.queue.Push(file.Name, file.Size(), file.Modified)
|
||||
}
|
||||
|
||||
changed++
|
||||
return true
|
||||
})
|
||||
|
||||
// Reorder the file queue according to configuration
|
||||
|
||||
switch p.order {
|
||||
case config.OrderRandom:
|
||||
p.queue.Shuffle()
|
||||
case config.OrderAlphabetic:
|
||||
// The queue is already in alphabetic order.
|
||||
case config.OrderSmallestFirst:
|
||||
p.queue.SortSmallestFirst()
|
||||
case config.OrderLargestFirst:
|
||||
p.queue.SortLargestFirst()
|
||||
case config.OrderOldestFirst:
|
||||
p.queue.SortOldestFirst()
|
||||
case config.OrderNewestFirst:
|
||||
p.queue.SortOldestFirst()
|
||||
}
|
||||
|
||||
// Process the file queue
|
||||
|
||||
nextFile:
|
||||
for {
|
||||
fileName, ok := p.queue.Pop()
|
||||
@@ -474,15 +501,19 @@ nextFile:
|
||||
func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"details": db.ToTruncated(file),
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "dir",
|
||||
"action": "update",
|
||||
})
|
||||
|
||||
defer func() {
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"error": err,
|
||||
"type": "dir",
|
||||
"action": "update",
|
||||
})
|
||||
}()
|
||||
|
||||
@@ -497,13 +528,13 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
|
||||
}
|
||||
|
||||
info, err := os.Lstat(realName)
|
||||
info, err := osutil.Lstat(realName)
|
||||
switch {
|
||||
// There is already something under that name, but it's a file/link.
|
||||
// Most likely a file/link is getting replaced with a directory.
|
||||
// Remove the file/link and fall through to directory creation.
|
||||
case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0):
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
||||
return
|
||||
@@ -553,15 +584,18 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"details": db.ToTruncated(file),
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "dir",
|
||||
"action": "delete",
|
||||
})
|
||||
defer func() {
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"error": err,
|
||||
"type": "dir",
|
||||
"action": "delete",
|
||||
})
|
||||
}()
|
||||
|
||||
@@ -572,11 +606,11 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
files, _ := dir.Readdirnames(-1)
|
||||
for _, file := range files {
|
||||
if defTempNamer.IsTemporary(file) {
|
||||
osutil.InWritableDir(os.Remove, filepath.Join(realName, file))
|
||||
osutil.InWritableDir(osutil.Remove, filepath.Join(realName, file))
|
||||
}
|
||||
}
|
||||
}
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
p.dbUpdates <- file
|
||||
} else {
|
||||
@@ -588,28 +622,34 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
func (p *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"details": db.ToTruncated(file),
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
"action": "delete",
|
||||
})
|
||||
defer func() {
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"error": err,
|
||||
"type": "file",
|
||||
"action": "delete",
|
||||
})
|
||||
}()
|
||||
|
||||
realName := filepath.Join(p.dir, file.Name)
|
||||
|
||||
cur, ok := p.model.CurrentFolderFile(p.folder, file.Name)
|
||||
if ok && cur.Version.Concurrent(file.Version) {
|
||||
// There is a conflict here. Move the file to a conflict copy instead of deleting.
|
||||
if ok && p.inConflict(cur.Version, file.Version) {
|
||||
// There is a conflict here. Move the file to a conflict copy instead
|
||||
// of deleting. Also merge with the version vector we had, to indicate
|
||||
// we have resolved the conflict.
|
||||
file.Version = file.Version.Merge(cur.Version)
|
||||
err = osutil.InWritableDir(moveForConflict, realName)
|
||||
} else if p.versioner != nil {
|
||||
err = osutil.InWritableDir(p.versioner.Archive, realName)
|
||||
} else {
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
}
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
@@ -624,25 +664,31 @@ func (p *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": source.Name,
|
||||
"details": db.ToTruncated(source),
|
||||
"folder": p.folder,
|
||||
"item": source.Name,
|
||||
"type": "file",
|
||||
"action": "delete",
|
||||
})
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": target.Name,
|
||||
"details": db.ToTruncated(source),
|
||||
"folder": p.folder,
|
||||
"item": target.Name,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
defer func() {
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": source.Name,
|
||||
"error": err,
|
||||
"type": "file",
|
||||
"action": "delete",
|
||||
})
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": target.Name,
|
||||
"error": err,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
}()
|
||||
|
||||
@@ -679,7 +725,7 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// get rid of. Attempt to delete it instead so that we make *some*
|
||||
// progress. The target is unhandled.
|
||||
|
||||
err = osutil.InWritableDir(os.Remove, from)
|
||||
err = osutil.InWritableDir(osutil.Remove, from)
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, file %q): delete %q after failed rename: %v", p.folder, target.Name, source.Name, err)
|
||||
return
|
||||
@@ -693,9 +739,10 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// changed file.
|
||||
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"details": db.ToTruncated(file),
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
|
||||
curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)
|
||||
@@ -718,6 +765,8 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"error": err,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -775,6 +824,7 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
reused: reused,
|
||||
ignorePerms: p.ignorePerms,
|
||||
version: curFile.Version,
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
|
||||
if debug {
|
||||
@@ -816,6 +866,12 @@ func (p *rwFolder) shortcutFile(file protocol.FileInfo) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// This may have been a conflict. We should merge the version vectors so
|
||||
// that our clock doesn't move backwards.
|
||||
if cur, ok := p.model.CurrentFolderFile(p.folder, file.Name); ok {
|
||||
file.Version = file.Version.Merge(cur.Version)
|
||||
}
|
||||
|
||||
p.dbUpdates <- file
|
||||
return
|
||||
}
|
||||
@@ -983,6 +1039,8 @@ func (p *rwFolder) performFinish(state *sharedPullerState) {
|
||||
"folder": p.folder,
|
||||
"item": state.file.Name,
|
||||
"error": err,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
}()
|
||||
|
||||
@@ -1011,10 +1069,12 @@ func (p *rwFolder) performFinish(state *sharedPullerState) {
|
||||
}
|
||||
}
|
||||
|
||||
if state.version.Concurrent(state.file.Version) {
|
||||
if p.inConflict(state.version, state.file.Version) {
|
||||
// The new file has been changed in conflict with the existing one. We
|
||||
// should file it away as a conflict instead of just removing or
|
||||
// archiving.
|
||||
// archiving. Also merge with the version vector we had, to indicate
|
||||
// we have resolved the conflict.
|
||||
state.file.Version = state.file.Version.Merge(state.version)
|
||||
err = osutil.InWritableDir(moveForConflict, state.realName)
|
||||
} else if p.versioner != nil {
|
||||
// If we should use versioning, let the versioner archive the old
|
||||
@@ -1031,9 +1091,9 @@ func (p *rwFolder) performFinish(state *sharedPullerState) {
|
||||
|
||||
// If the target path is a symlink or a directory, we cannot copy
|
||||
// over it, hence remove it before proceeding.
|
||||
stat, err := os.Lstat(state.realName)
|
||||
stat, err := osutil.Lstat(state.realName)
|
||||
if err == nil && (stat.IsDir() || stat.Mode()&os.ModeSymlink != 0) {
|
||||
osutil.InWritableDir(os.Remove, state.realName)
|
||||
osutil.InWritableDir(osutil.Remove, state.realName)
|
||||
}
|
||||
// Replace the original content with the new one
|
||||
err = osutil.Rename(state.tempName, state.realName)
|
||||
@@ -1084,6 +1144,8 @@ func (p *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
"folder": p.folder,
|
||||
"item": state.file.Name,
|
||||
"error": state.failed(),
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
}
|
||||
p.model.receivedFile(p.folder, state.file.Name)
|
||||
@@ -1144,6 +1206,22 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *rwFolder) inConflict(current, replacement protocol.Vector) bool {
|
||||
if current.Concurrent(replacement) {
|
||||
// Obvious case
|
||||
return true
|
||||
}
|
||||
if replacement.Counter(p.shortID) > current.Counter(p.shortID) {
|
||||
// The replacement file contains a higher version for ourselves than
|
||||
// what we have. This isn't supposed to be possible, since it's only
|
||||
// we who can increment that counter. We take it as a sign that
|
||||
// something is wrong (our index may have been corrupted or removed)
|
||||
// and flag it as a conflict.
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
|
||||
for i := range cfg.Folders {
|
||||
folder := &cfg.Folders[i]
|
||||
|
||||
@@ -393,7 +393,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
||||
}
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
p.queue.Push("filex")
|
||||
p.queue.Push("filex", 0, 0)
|
||||
p.queue.Pop()
|
||||
|
||||
if len(p.queue.progress) != 1 {
|
||||
@@ -480,7 +480,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
||||
}
|
||||
|
||||
// queue.Done should be called by the finisher routine
|
||||
p.queue.Push("filex")
|
||||
p.queue.Push("filex", 0, 0)
|
||||
p.queue.Pop()
|
||||
|
||||
if len(p.queue.progress) != 1 {
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// A sharedPullerState is kept for each file that is being synced and is kept
|
||||
@@ -59,8 +59,8 @@ type lockedWriterAt struct {
|
||||
}
|
||||
|
||||
func (w lockedWriterAt) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
(*w.mut).Lock()
|
||||
defer (*w.mut).Unlock()
|
||||
return w.wr.WriteAt(p, off)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,11 +9,14 @@ package model
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
func TestSourceFileOK(t *testing.T) {
|
||||
s := sharedPullerState{
|
||||
realName: "testdata/foo",
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
|
||||
fd, err := s.sourceFile()
|
||||
@@ -42,6 +45,7 @@ func TestSourceFileOK(t *testing.T) {
|
||||
func TestSourceFileBad(t *testing.T) {
|
||||
s := sharedPullerState{
|
||||
realName: "nonexistent",
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
|
||||
fd, err := s.sourceFile()
|
||||
@@ -67,6 +71,7 @@ func TestReadOnlyDir(t *testing.T) {
|
||||
|
||||
s := sharedPullerState{
|
||||
tempName: "testdata/read_only_dir/.temp_name",
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
|
||||
fd, err := s.tempFile()
|
||||
@@ -78,4 +83,5 @@ func TestReadOnlyDir(t *testing.T) {
|
||||
}
|
||||
|
||||
s.fail("Test done", nil)
|
||||
s.finalClose()
|
||||
}
|
||||
|
||||
29
internal/osutil/lstat_broken.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build linux android
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Lstat is like os.Lstat, except lobotomized for Android. See
|
||||
// https://forum.syncthing.net/t/2395
|
||||
func Lstat(name string) (fi os.FileInfo, err error) {
|
||||
for i := 0; i < 10; i++ { // We have to draw the line somewhere
|
||||
fi, err = os.Lstat(name)
|
||||
if err, ok := err.(*os.PathError); ok && err.Err == syscall.EINTR {
|
||||
time.Sleep(time.Duration(i+1) * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
15
internal/osutil/lstat_ok.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !linux,!android
|
||||
|
||||
package osutil
|
||||
|
||||
import "os"
|
||||
|
||||
func Lstat(name string) (fi os.FileInfo, err error) {
|
||||
return os.Lstat(name)
|
||||
}
|
||||
@@ -15,14 +15,15 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
var ErrNoHome = errors.New("No home directory found - set $HOME (or the platform equivalent).")
|
||||
|
||||
// Try to keep this entire operation atomic-like. We shouldn't be doing this
|
||||
// often enough that there is any contention on this lock.
|
||||
var renameLock sync.Mutex
|
||||
var renameLock sync.Mutex = sync.NewMutex()
|
||||
|
||||
// TryRename renames a file, leaving source file intact in case of failure.
|
||||
// Tries hard to succeed on various systems by temporarily tweaking directory
|
||||
@@ -88,6 +89,20 @@ func InWritableDir(fn func(string) error, path string) error {
|
||||
return fn(path)
|
||||
}
|
||||
|
||||
// On Windows, removes the read-only attribute from the target prior deletion.
|
||||
func Remove(path string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode()&0200 == 0 {
|
||||
os.Chmod(path, 0700)
|
||||
}
|
||||
}
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
func ExpandTilde(path string) (string, error) {
|
||||
if path == "~" {
|
||||
return getHomeDir()
|
||||
|
||||
@@ -8,6 +8,7 @@ package osutil_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
@@ -68,3 +69,97 @@ func TestInWriteableDir(t *testing.T) {
|
||||
t.Error("testdata/file/foo returned nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInWritableDirWindowsRemove(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("Tests not required")
|
||||
return
|
||||
}
|
||||
|
||||
err := os.RemoveAll("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata")
|
||||
|
||||
create := func(name string) error {
|
||||
fd, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
os.Mkdir("testdata", 0700)
|
||||
|
||||
os.Mkdir("testdata/windows", 0500)
|
||||
os.Mkdir("testdata/windows/ro", 0500)
|
||||
create("testdata/windows/ro/readonly")
|
||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := os.Remove(path)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := osutil.InWritableDir(osutil.Remove, path)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInWritableDirWindowsRename(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("Tests not required")
|
||||
return
|
||||
}
|
||||
|
||||
err := os.RemoveAll("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll("testdata")
|
||||
|
||||
create := func(name string) error {
|
||||
fd, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
os.Mkdir("testdata", 0700)
|
||||
|
||||
os.Mkdir("testdata/windows", 0500)
|
||||
os.Mkdir("testdata/windows/ro", 0500)
|
||||
create("testdata/windows/ro/readonly")
|
||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := os.Rename(path, path+"new")
|
||||
if err == nil {
|
||||
t.Errorf("Expected error %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
rename := func(path string) error {
|
||||
return osutil.Rename(path, path+"new")
|
||||
}
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := osutil.InWritableDir(rename, path)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %s: %s", path, err)
|
||||
}
|
||||
_, err = os.Stat(path + "new")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@ package scanner
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// The parallell hasher reads FileInfo structures from the inbox, hashes the
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// is closed and all items handled.
|
||||
|
||||
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) {
|
||||
var wg sync.WaitGroup
|
||||
wg := sync.NewWaitGroup()
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/ignore"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -193,7 +194,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
// We will attempt to normalize it.
|
||||
normalizedPath := filepath.Join(w.Dir, normalizedRn)
|
||||
if _, err := os.Lstat(normalizedPath); os.IsNotExist(err) {
|
||||
if _, err := osutil.Lstat(normalizedPath); os.IsNotExist(err) {
|
||||
// Nothing exists with the normalized filename. Good.
|
||||
if err = os.Rename(p, normalizedPath); err != nil {
|
||||
l.Infof(`Error normalizing UTF8 encoding of file "%s": %v`, rn, err)
|
||||
@@ -356,7 +357,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
}
|
||||
|
||||
func checkDir(dir string) error {
|
||||
if info, err := os.Lstat(dir); err != nil {
|
||||
if info, err := osutil.Lstat(dir); err != nil {
|
||||
return err
|
||||
} else if !info.IsDir() {
|
||||
return errors.New(dir + ": not a directory")
|
||||
|
||||
@@ -203,6 +203,14 @@ func TestNormalization(t *testing.T) {
|
||||
"5-\xCD\xE2", // EUC-CN "wài" (外) -- ignored (not UTF8)
|
||||
}
|
||||
numInvalid := 2
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, in case 5 the character gets replaced with a
|
||||
// replacement character \xEF\xBF\xBD at the point it's written to disk,
|
||||
// which means it suddenly becomes valid (sort of).
|
||||
numInvalid--
|
||||
}
|
||||
|
||||
numValid := len(tests) - numInvalid
|
||||
|
||||
for _, s1 := range tests {
|
||||
|
||||
@@ -60,7 +60,7 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
stat, err := osutil.Lstat(path)
|
||||
if err != nil || stat.Mode()&os.ModeSymlink == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
31
internal/sync/debug.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "locks") || os.Getenv("STTRACE") == "all"
|
||||
threshold = time.Duration(100 * time.Millisecond)
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
|
||||
func init() {
|
||||
if n, err := strconv.Atoi(os.Getenv("STLOCKTHRESHOLD")); debug && err == nil {
|
||||
threshold = time.Duration(n) * time.Millisecond
|
||||
}
|
||||
if debug {
|
||||
l.Debugf("Enabling lock logging at %v threshold", threshold)
|
||||
}
|
||||
}
|
||||
141
internal/sync/sync.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Mutex interface {
|
||||
Lock()
|
||||
Unlock()
|
||||
}
|
||||
|
||||
type RWMutex interface {
|
||||
Mutex
|
||||
RLock()
|
||||
RUnlock()
|
||||
}
|
||||
|
||||
type WaitGroup interface {
|
||||
Add(int)
|
||||
Done()
|
||||
Wait()
|
||||
}
|
||||
|
||||
func NewMutex() Mutex {
|
||||
if debug {
|
||||
return &loggedMutex{}
|
||||
}
|
||||
return &sync.Mutex{}
|
||||
}
|
||||
|
||||
func NewRWMutex() RWMutex {
|
||||
if debug {
|
||||
return &loggedRWMutex{
|
||||
unlockers: make([]string, 0),
|
||||
}
|
||||
}
|
||||
return &sync.RWMutex{}
|
||||
}
|
||||
|
||||
func NewWaitGroup() WaitGroup {
|
||||
if debug {
|
||||
return &loggedWaitGroup{}
|
||||
}
|
||||
return &sync.WaitGroup{}
|
||||
}
|
||||
|
||||
type loggedMutex struct {
|
||||
sync.Mutex
|
||||
start time.Time
|
||||
lockedAt string
|
||||
}
|
||||
|
||||
func (m *loggedMutex) Lock() {
|
||||
m.Mutex.Lock()
|
||||
m.start = time.Now()
|
||||
m.lockedAt = getCaller()
|
||||
}
|
||||
|
||||
func (m *loggedMutex) Unlock() {
|
||||
duration := time.Now().Sub(m.start)
|
||||
if duration >= threshold {
|
||||
l.Debugf("Mutex held for %v. Locked at %s unlocked at %s", duration, m.lockedAt, getCaller())
|
||||
}
|
||||
m.Mutex.Unlock()
|
||||
}
|
||||
|
||||
type loggedRWMutex struct {
|
||||
sync.RWMutex
|
||||
start time.Time
|
||||
lockedAt string
|
||||
|
||||
logUnlockers uint32
|
||||
|
||||
unlockers []string
|
||||
unlockersMut sync.Mutex
|
||||
}
|
||||
|
||||
func (m *loggedRWMutex) Lock() {
|
||||
start := time.Now()
|
||||
|
||||
atomic.StoreUint32(&m.logUnlockers, 1)
|
||||
m.RWMutex.Lock()
|
||||
m.logUnlockers = 0
|
||||
|
||||
m.start = time.Now()
|
||||
duration := m.start.Sub(start)
|
||||
|
||||
m.lockedAt = getCaller()
|
||||
if duration > threshold {
|
||||
l.Debugf("RWMutex took %v to lock. Locked at %s. RUnlockers while locking: %s", duration, m.lockedAt, strings.Join(m.unlockers, ", "))
|
||||
}
|
||||
m.unlockers = m.unlockers[0:]
|
||||
}
|
||||
|
||||
func (m *loggedRWMutex) Unlock() {
|
||||
duration := time.Now().Sub(m.start)
|
||||
if duration >= threshold {
|
||||
l.Debugf("RWMutex held for %v. Locked at %s: unlocked at %s", duration, m.lockedAt, getCaller())
|
||||
}
|
||||
m.RWMutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *loggedRWMutex) RUnlock() {
|
||||
if atomic.LoadUint32(&m.logUnlockers) == 1 {
|
||||
m.unlockersMut.Lock()
|
||||
m.unlockers = append(m.unlockers, getCaller())
|
||||
m.unlockersMut.Unlock()
|
||||
}
|
||||
m.RWMutex.RUnlock()
|
||||
}
|
||||
|
||||
type loggedWaitGroup struct {
|
||||
sync.WaitGroup
|
||||
}
|
||||
|
||||
func (wg *loggedWaitGroup) Wait() {
|
||||
start := time.Now()
|
||||
wg.WaitGroup.Wait()
|
||||
duration := time.Now().Sub(start)
|
||||
if duration >= threshold {
|
||||
l.Debugf("WaitGroup took %v at %s", duration, getCaller())
|
||||
}
|
||||
}
|
||||
|
||||
func getCaller() string {
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
file = filepath.Join(filepath.Base(filepath.Dir(file)), filepath.Base(file))
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
185
internal/sync/sync_test.go
Normal file
@@ -0,0 +1,185 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
logThreshold = 100 * time.Millisecond
|
||||
shortWait = 5 * time.Millisecond
|
||||
longWait = 125 * time.Millisecond
|
||||
)
|
||||
|
||||
func TestTypes(t *testing.T) {
|
||||
debug = false
|
||||
|
||||
if _, ok := NewMutex().(*sync.Mutex); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
if _, ok := NewRWMutex().(*sync.RWMutex); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
if _, ok := NewWaitGroup().(*sync.WaitGroup); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
debug = true
|
||||
|
||||
if _, ok := NewMutex().(*loggedMutex); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
if _, ok := NewRWMutex().(*loggedRWMutex); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
if _, ok := NewWaitGroup().(*loggedWaitGroup); !ok {
|
||||
t.Error("Wrong type")
|
||||
}
|
||||
|
||||
debug = false
|
||||
}
|
||||
|
||||
func TestMutex(t *testing.T) {
|
||||
debug = true
|
||||
threshold = logThreshold
|
||||
|
||||
msgmut := sync.Mutex{}
|
||||
messages := make([]string, 0)
|
||||
|
||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
||||
msgmut.Lock()
|
||||
messages = append(messages, message)
|
||||
msgmut.Unlock()
|
||||
})
|
||||
|
||||
mut := NewMutex()
|
||||
mut.Lock()
|
||||
time.Sleep(shortWait)
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) > 0 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
time.Sleep(longWait)
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) != 1 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
debug = false
|
||||
}
|
||||
|
||||
func TestRWMutex(t *testing.T) {
|
||||
debug = true
|
||||
threshold = logThreshold
|
||||
|
||||
msgmut := sync.Mutex{}
|
||||
messages := make([]string, 0)
|
||||
|
||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
||||
msgmut.Lock()
|
||||
messages = append(messages, message)
|
||||
msgmut.Unlock()
|
||||
})
|
||||
|
||||
mut := NewRWMutex()
|
||||
mut.Lock()
|
||||
time.Sleep(shortWait)
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) > 0 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
time.Sleep(longWait)
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) != 1 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
// Testing rlocker logging
|
||||
mut.RLock()
|
||||
go func() {
|
||||
time.Sleep(longWait)
|
||||
mut.RUnlock()
|
||||
}()
|
||||
|
||||
mut.Lock()
|
||||
mut.Unlock()
|
||||
|
||||
if len(messages) != 2 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
if !strings.Contains(messages[1], "RUnlockers while locking: sync") || !strings.Contains(messages[1], "sync_test.go:") {
|
||||
t.Error("Unexpected message")
|
||||
}
|
||||
|
||||
// Testing multiple rlockers
|
||||
mut.RLock()
|
||||
mut.RLock()
|
||||
mut.RLock()
|
||||
mut.RUnlock()
|
||||
mut.RUnlock()
|
||||
mut.RUnlock()
|
||||
|
||||
debug = false
|
||||
}
|
||||
|
||||
func TestWaitGroup(t *testing.T) {
|
||||
debug = true
|
||||
threshold = logThreshold
|
||||
|
||||
msgmut := sync.Mutex{}
|
||||
messages := make([]string, 0)
|
||||
|
||||
l.AddHandler(logger.LevelDebug, func(_ logger.LogLevel, message string) {
|
||||
msgmut.Lock()
|
||||
messages = append(messages, message)
|
||||
msgmut.Unlock()
|
||||
})
|
||||
|
||||
wg := NewWaitGroup()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
time.Sleep(shortWait)
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if len(messages) > 0 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
wg = NewWaitGroup()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
time.Sleep(longWait)
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if len(messages) != 1 {
|
||||
t.Errorf("Unexpected message count")
|
||||
}
|
||||
|
||||
debug = false
|
||||
}
|
||||
@@ -27,21 +27,21 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Returns the latest release, including prereleases or not depending on the argument
|
||||
func LatestGithubRelease(version string) (Release, error) {
|
||||
resp, err := http.Get("https://api.github.com/repos/syncthing/syncthing/releases?per_page=10")
|
||||
// Returns the latest releases, including prereleases or not depending on the argument
|
||||
func LatestGithubReleases(version string) ([]Release, error) {
|
||||
resp, err := http.Get("https://api.github.com/repos/syncthing/syncthing/releases?per_page=30")
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode > 299 {
|
||||
return Release{}, fmt.Errorf("API call returned HTTP error: %s", resp.Status)
|
||||
return nil, fmt.Errorf("API call returned HTTP error: %s", resp.Status)
|
||||
}
|
||||
|
||||
var rels []Release
|
||||
json.NewDecoder(resp.Body).Decode(&rels)
|
||||
resp.Body.Close()
|
||||
|
||||
return LatestRelease(version, rels)
|
||||
return rels, nil
|
||||
}
|
||||
|
||||
type SortByRelease []Release
|
||||
@@ -56,7 +56,12 @@ func (s SortByRelease) Less(i, j int) bool {
|
||||
return CompareVersions(s[i].Tag, s[j].Tag) > 0
|
||||
}
|
||||
|
||||
func LatestRelease(version string, rels []Release) (Release, error) {
|
||||
func LatestRelease(version string) (Release, error) {
|
||||
rels, _ := LatestGithubReleases(version)
|
||||
return SelectLatestRelease(version, rels)
|
||||
}
|
||||
|
||||
func SelectLatestRelease(version string, rels []Release) (Release, error) {
|
||||
if len(rels) == 0 {
|
||||
return Release{}, ErrVersionUnknown
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !noupgrade
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
@@ -65,7 +67,7 @@ var upgrades = map[string]string{
|
||||
"v0.11.0-beta0+40-g53cb66e-dirty": "v0.11.0-beta0",
|
||||
}
|
||||
|
||||
func TestRelease(t *testing.T) {
|
||||
func TestGithubRelease(t *testing.T) {
|
||||
fd, err := os.Open("testdata/github-releases.json")
|
||||
if err != nil {
|
||||
t.Errorf("Missing github-release test data")
|
||||
@@ -76,7 +78,7 @@ func TestRelease(t *testing.T) {
|
||||
json.NewDecoder(fd).Decode(&rels)
|
||||
|
||||
for old, target := range upgrades {
|
||||
upgrade, err := LatestRelease(old, rels)
|
||||
upgrade, err := SelectLatestRelease(old, rels)
|
||||
if err != nil {
|
||||
t.Error("Error retrieving latest version", err)
|
||||
}
|
||||
@@ -85,3 +87,10 @@ func TestRelease(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorRelease(t *testing.T) {
|
||||
_, err := SelectLatestRelease("v0.11.0-beta", nil)
|
||||
if err == nil {
|
||||
t.Error("Should return an error when no release were available")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,6 @@ func upgradeToURL(binary, url string) error {
|
||||
return ErrUpgradeUnsupported
|
||||
}
|
||||
|
||||
func LatestRelease(prerelease bool) (Release, error) {
|
||||
func LatestRelease(version string) (Release, error) {
|
||||
return Release{}, ErrUpgradeUnsupported
|
||||
}
|
||||
|
||||
@@ -22,8 +22,9 @@ import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// A container for relevant properties of a UPnP InternetGatewayDevice.
|
||||
@@ -91,44 +92,71 @@ type upnpRoot struct {
|
||||
}
|
||||
|
||||
// Discover discovers UPnP InternetGatewayDevices.
|
||||
// The order in which the devices appear in the result list is not deterministic.
|
||||
func Discover() []IGD {
|
||||
var result []IGD
|
||||
// The order in which the devices appear in the results list is not deterministic.
|
||||
func Discover(timeout time.Duration) []IGD {
|
||||
var results []IGD
|
||||
l.Infoln("Starting UPnP discovery...")
|
||||
|
||||
timeout := 3
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
l.Infoln("Listing network interfaces:", err)
|
||||
return results
|
||||
}
|
||||
|
||||
// Search for InternetGatewayDevice:2 devices
|
||||
result = append(result, discover("urn:schemas-upnp-org:device:InternetGatewayDevice:2", timeout, result)...)
|
||||
resultChan := make(chan IGD, 16)
|
||||
|
||||
// Search for InternetGatewayDevice:1 devices
|
||||
// InternetGatewayDevice:2 devices that correctly respond to the IGD:1 request as well will not be re-added to the result list
|
||||
result = append(result, discover("urn:schemas-upnp-org:device:InternetGatewayDevice:1", timeout, result)...)
|
||||
|
||||
if len(result) > 0 && debug {
|
||||
l.Debugln("UPnP discovery result:")
|
||||
for _, resultDevice := range result {
|
||||
l.Debugln("[" + resultDevice.uuid + "]")
|
||||
|
||||
for _, resultService := range resultDevice.services {
|
||||
l.Debugln("* [" + resultService.serviceID + "] " + resultService.serviceURL)
|
||||
// Aggregator
|
||||
go func() {
|
||||
next:
|
||||
for result := range resultChan {
|
||||
for _, existingResult := range results {
|
||||
if existingResult.uuid == result.uuid {
|
||||
if debug {
|
||||
l.Debugf("Skipping duplicate result %s with services:", result.uuid)
|
||||
for _, svc := range result.services {
|
||||
l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
|
||||
}
|
||||
}
|
||||
goto next
|
||||
}
|
||||
}
|
||||
results = append(results, result)
|
||||
if debug {
|
||||
l.Debugf("UPnP discovery result %s with services:", result.uuid)
|
||||
for _, svc := range result.services {
|
||||
l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg := sync.NewWaitGroup()
|
||||
for _, intf := range interfaces {
|
||||
for _, deviceType := range []string{"urn:schemas-upnp-org:device:InternetGatewayDevice:1", "urn:schemas-upnp-org:device:InternetGatewayDevice:2"} {
|
||||
wg.Add(1)
|
||||
go func(intf net.Interface, deviceType string) {
|
||||
discover(&intf, deviceType, timeout, resultChan)
|
||||
wg.Done()
|
||||
}(intf, deviceType)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(resultChan)
|
||||
|
||||
suffix := "devices"
|
||||
if len(result) == 1 {
|
||||
if len(results) == 1 {
|
||||
suffix = "device"
|
||||
}
|
||||
|
||||
l.Infof("UPnP discovery complete (found %d %s).", len(result), suffix)
|
||||
l.Infof("UPnP discovery complete (found %d %s).", len(results), suffix)
|
||||
|
||||
return result
|
||||
return results
|
||||
}
|
||||
|
||||
// Search for UPnP InternetGatewayDevices for <timeout> seconds, ignoring responses from any devices listed in knownDevices.
|
||||
// The order in which the devices appear in the result list is not deterministic
|
||||
func discover(deviceType string, timeout int, knownDevices []IGD) []IGD {
|
||||
func discover(intf *net.Interface, deviceType string, timeout time.Duration, results chan<- IGD) {
|
||||
ssdp := &net.UDPAddr{IP: []byte{239, 255, 255, 250}, Port: 1900}
|
||||
|
||||
tpl := `M-SEARCH * HTTP/1.1
|
||||
@@ -138,44 +166,41 @@ Man: "ssdp:discover"
|
||||
Mx: %d
|
||||
|
||||
`
|
||||
searchStr := fmt.Sprintf(tpl, deviceType, timeout)
|
||||
searchStr := fmt.Sprintf(tpl, deviceType, timeout/time.Second)
|
||||
|
||||
search := []byte(strings.Replace(searchStr, "\n", "\r\n", -1))
|
||||
|
||||
if debug {
|
||||
l.Debugln("Starting discovery of device type " + deviceType + "...")
|
||||
l.Debugln("Starting discovery of device type " + deviceType + " on " + intf.Name)
|
||||
}
|
||||
|
||||
var results []IGD
|
||||
resultChannel := make(chan IGD, 8)
|
||||
|
||||
socket, err := net.ListenMulticastUDP("udp4", nil, &net.UDPAddr{IP: ssdp.IP})
|
||||
socket, err := net.ListenMulticastUDP("udp4", intf, &net.UDPAddr{IP: ssdp.IP})
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return results
|
||||
if debug {
|
||||
l.Debugln(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer socket.Close() // Make sure our socket gets closed
|
||||
|
||||
err = socket.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second))
|
||||
err = socket.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return results
|
||||
return
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("Sending search request for device type " + deviceType + "...")
|
||||
l.Debugln("Sending search request for device type " + deviceType + " on " + intf.Name)
|
||||
}
|
||||
|
||||
var resultWaitGroup sync.WaitGroup
|
||||
|
||||
_, err = socket.WriteTo(search, ssdp)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return results
|
||||
return
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("Listening for UPnP response for device type " + deviceType + "...")
|
||||
l.Debugln("Listening for UPnP response for device type " + deviceType + " on " + intf.Name)
|
||||
}
|
||||
|
||||
// Listen for responses until a timeout is reached
|
||||
@@ -184,69 +209,42 @@ Mx: %d
|
||||
n, _, err := socket.ReadFrom(resp)
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); !ok || !e.Timeout() {
|
||||
l.Infoln(err) //legitimate error, not a timeout.
|
||||
l.Infoln("UPnP read:", err) //legitimate error, not a timeout.
|
||||
}
|
||||
|
||||
break
|
||||
} else {
|
||||
// Process results in a separate go routine so we can immediately return to listening for more responses
|
||||
resultWaitGroup.Add(1)
|
||||
go handleSearchResponse(deviceType, knownDevices, resp, n, resultChannel, &resultWaitGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all result handlers to finish processing, then close result channel
|
||||
resultWaitGroup.Wait()
|
||||
close(resultChannel)
|
||||
|
||||
// Collect our results from the result handlers using the result channel
|
||||
for result := range resultChannel {
|
||||
// Check for existing results (some routers send multiple response packets)
|
||||
for _, existingResult := range results {
|
||||
if existingResult.uuid == result.uuid {
|
||||
if debug {
|
||||
l.Debugln("Already processed device with UUID", existingResult.uuid, "continuing...")
|
||||
}
|
||||
continue
|
||||
}
|
||||
igd, err := parseResponse(deviceType, resp[:n])
|
||||
if err != nil {
|
||||
l.Infoln("UPnP parse:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// No existing results, okay to append
|
||||
results = append(results, result)
|
||||
results <- igd
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("Discovery for device type " + deviceType + " finished.")
|
||||
l.Debugln("Discovery for device type " + deviceType + " on " + intf.Name + " finished.")
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func handleSearchResponse(deviceType string, knownDevices []IGD, resp []byte, length int, resultChannel chan<- IGD, resultWaitGroup *sync.WaitGroup) {
|
||||
defer resultWaitGroup.Done() // Signal when we've finished processing
|
||||
|
||||
func parseResponse(deviceType string, resp []byte) (IGD, error) {
|
||||
if debug {
|
||||
l.Debugln("Handling UPnP response:\n\n" + string(resp[:length]))
|
||||
l.Debugln("Handling UPnP response:\n\n" + string(resp))
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(bytes.NewBuffer(resp[:length]))
|
||||
reader := bufio.NewReader(bytes.NewBuffer(resp))
|
||||
request := &http.Request{}
|
||||
response, err := http.ReadResponse(reader, request)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return
|
||||
return IGD{}, err
|
||||
}
|
||||
|
||||
respondingDeviceType := response.Header.Get("St")
|
||||
if respondingDeviceType != deviceType {
|
||||
l.Infoln("Unrecognized UPnP device of type " + respondingDeviceType)
|
||||
return
|
||||
return IGD{}, errors.New("unrecognized UPnP device of type " + respondingDeviceType)
|
||||
}
|
||||
|
||||
deviceDescriptionLocation := response.Header.Get("Location")
|
||||
if deviceDescriptionLocation == "" {
|
||||
l.Infoln("Invalid IGD response: no location specified.")
|
||||
return
|
||||
return IGD{}, errors.New("invalid IGD response: no location specified.")
|
||||
}
|
||||
|
||||
deviceDescriptionURL, err := url.Parse(deviceDescriptionLocation)
|
||||
@@ -257,8 +255,7 @@ func handleSearchResponse(deviceType string, knownDevices []IGD, resp []byte, le
|
||||
|
||||
deviceUSN := response.Header.Get("USN")
|
||||
if deviceUSN == "" {
|
||||
l.Infoln("Invalid IGD response: USN not specified.")
|
||||
return
|
||||
return IGD{}, errors.New("invalid IGD response: USN not specified.")
|
||||
}
|
||||
|
||||
deviceUUID := strings.TrimLeft(strings.Split(deviceUSN, "::")[0], "uuid:")
|
||||
@@ -267,39 +264,25 @@ func handleSearchResponse(deviceType string, knownDevices []IGD, resp []byte, le
|
||||
l.Infoln("Invalid IGD response: invalid device UUID", deviceUUID, "(continuing anyway)")
|
||||
}
|
||||
|
||||
// Don't re-add devices that are already known
|
||||
for _, knownDevice := range knownDevices {
|
||||
if deviceUUID == knownDevice.uuid {
|
||||
if debug {
|
||||
l.Debugln("Ignoring known device with UUID " + deviceUUID)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
response, err = http.Get(deviceDescriptionLocation)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return
|
||||
return IGD{}, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.StatusCode >= 400 {
|
||||
l.Infoln(errors.New(response.Status))
|
||||
return
|
||||
return IGD{}, errors.New("bad status code:" + response.Status)
|
||||
}
|
||||
|
||||
var upnpRoot upnpRoot
|
||||
err = xml.NewDecoder(response.Body).Decode(&upnpRoot)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return
|
||||
return IGD{}, err
|
||||
}
|
||||
|
||||
services, err := getServiceDescriptions(deviceDescriptionLocation, upnpRoot.Device)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return
|
||||
return IGD{}, err
|
||||
}
|
||||
|
||||
// Figure out our IP number, on the network used to reach the IGD.
|
||||
@@ -308,23 +291,16 @@ func handleSearchResponse(deviceType string, knownDevices []IGD, resp []byte, le
|
||||
// suggestions on a better way to do this...
|
||||
localIPAddress, err := localIP(deviceDescriptionURL)
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return
|
||||
return IGD{}, err
|
||||
}
|
||||
|
||||
igd := IGD{
|
||||
return IGD{
|
||||
uuid: deviceUUID,
|
||||
friendlyName: upnpRoot.Device.FriendlyName,
|
||||
url: deviceDescriptionURL,
|
||||
services: services,
|
||||
localIPAddress: localIPAddress,
|
||||
}
|
||||
|
||||
resultChannel <- igd
|
||||
|
||||
if debug {
|
||||
l.Debugln("Finished handling of UPnP response.")
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func localIP(url *url.URL) (string, error) {
|
||||
@@ -478,7 +454,7 @@ func soapRequest(url, service, function, message string) ([]byte, error) {
|
||||
}
|
||||
req.Header.Set("Content-Type", `text/xml; charset="utf-8"`)
|
||||
req.Header.Set("User-Agent", "syncthing/1.0")
|
||||
req.Header.Set("SOAPAction", fmt.Sprintf(`"%s#%s"`, service, function))
|
||||
req.Header["SOAPAction"] = []string{fmt.Sprintf(`"%s#%s"`, service, function)} // Enforce capitalization in header-entry for sensitive routers. See issue #1696
|
||||
req.Header.Set("Connection", "Close")
|
||||
req.Header.Set("Cache-Control", "no-cache")
|
||||
req.Header.Set("Pragma", "no-cache")
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -43,7 +45,7 @@ func NewExternal(folderID, folderPath string, params map[string]string) Versione
|
||||
// Move away the named file to a version archive. If this function returns
|
||||
// nil, the named file does not exist any more (has been archived).
|
||||
func (v External) Archive(filePath string) error {
|
||||
_, err := os.Lstat(filePath)
|
||||
_, err := osutil.Lstat(filePath)
|
||||
if os.IsNotExist(err) {
|
||||
if debug {
|
||||
l.Debugln("not archiving nonexistent file", filePath)
|
||||
@@ -82,7 +84,7 @@ func (v External) Archive(filePath string) error {
|
||||
}
|
||||
|
||||
// return error if the file was not removed
|
||||
if _, err = os.Lstat(filePath); os.IsNotExist(err) {
|
||||
if _, err = osutil.Lstat(filePath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Versioner: file was not removed by external script")
|
||||
|
||||
@@ -46,7 +46,7 @@ func NewSimple(folderID, folderPath string, params map[string]string) Versioner
|
||||
// Move away the named file to a version archive. If this function returns
|
||||
// nil, the named file does not exist any more (has been archived).
|
||||
func (v Simple) Archive(filePath string) error {
|
||||
fileInfo, err := os.Lstat(filePath)
|
||||
fileInfo, err := osutil.Lstat(filePath)
|
||||
if os.IsNotExist(err) {
|
||||
if debug {
|
||||
l.Debugln("not archiving nonexistent file", filePath)
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -33,7 +33,7 @@ type Staggered struct {
|
||||
cleanInterval int64
|
||||
folderPath string
|
||||
interval [4]Interval
|
||||
mutex *sync.Mutex
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Rename versions with old version format
|
||||
@@ -87,7 +87,6 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
|
||||
versionsDir = params["versionsPath"]
|
||||
}
|
||||
|
||||
var mutex sync.Mutex
|
||||
s := Staggered{
|
||||
versionsPath: versionsDir,
|
||||
cleanInterval: cleanInterval,
|
||||
@@ -98,7 +97,7 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
|
||||
{86400, 592000}, // next 30 days -> 1 day between versions
|
||||
{604800, maxAge}, // next year -> 1 week between versions
|
||||
},
|
||||
mutex: &mutex,
|
||||
mutex: sync.NewMutex(),
|
||||
}
|
||||
|
||||
if debug {
|
||||
@@ -210,7 +209,7 @@ func (v Staggered) expire(versions []string) {
|
||||
var prevAge int64
|
||||
firstFile := true
|
||||
for _, file := range versions {
|
||||
fi, err := os.Lstat(file)
|
||||
fi, err := osutil.Lstat(file)
|
||||
if err != nil {
|
||||
l.Warnln("versioner:", err)
|
||||
continue
|
||||
@@ -281,7 +280,7 @@ func (v Staggered) Archive(filePath string) error {
|
||||
v.mutex.Lock()
|
||||
defer v.mutex.Unlock()
|
||||
|
||||
_, err := os.Lstat(filePath)
|
||||
_, err := osutil.Lstat(filePath)
|
||||
if os.IsNotExist(err) {
|
||||
if debug {
|
||||
l.Debugln("not archiving nonexistent file", filePath)
|
||||
|
||||
3
test/.gitignore
vendored
@@ -17,3 +17,6 @@ s4d
|
||||
http
|
||||
h*/index*
|
||||
*.syncthing-reset*
|
||||
panic-*.log
|
||||
audit-*.log
|
||||
h*/config.xml.v*
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -47,43 +48,8 @@ func TestConflict(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Starting sender...")
|
||||
sender := syncthingProcess{ // id1
|
||||
instance: "1",
|
||||
argv: []string{"-home", "h1"},
|
||||
port: 8081,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err = sender.start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sender, receiver := coSenderReceiver(t)
|
||||
defer sender.stop()
|
||||
|
||||
// Wait for one scan to succeed, or up to 20 seconds... This is to let
|
||||
// startup, UPnP etc complete and make sure the sender has the full index
|
||||
// before they connect.
|
||||
for i := 0; i < 20; i++ {
|
||||
err := sender.rescan("default")
|
||||
if err != nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
log.Println("Starting receiver...")
|
||||
receiver := syncthingProcess{ // id2
|
||||
instance: "2",
|
||||
argv: []string{"-home", "h2"},
|
||||
port: 8082,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err = receiver.start()
|
||||
if err != nil {
|
||||
sender.stop()
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer receiver.stop()
|
||||
|
||||
if err = coCompletion(sender, receiver); err != nil {
|
||||
@@ -213,6 +179,294 @@ func TestConflict(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialMergeConflicts(t *testing.T) {
|
||||
log.Println("Cleaning...")
|
||||
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = os.Mkdir("s1", 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.Mkdir("s2", 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// File 1 is a conflict
|
||||
|
||||
err = ioutil.WriteFile("s1/file1", []byte("hello\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile("s2/file1", []byte("goodbye\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// File 2 exists on s1 only
|
||||
|
||||
err = ioutil.WriteFile("s1/file2", []byte("hello\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// File 3 exists on s2 only
|
||||
|
||||
err = ioutil.WriteFile("s2/file3", []byte("goodbye\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Let them sync
|
||||
|
||||
sender, receiver := coSenderReceiver(t)
|
||||
defer sender.stop()
|
||||
defer receiver.stop()
|
||||
|
||||
log.Println("Syncing...")
|
||||
|
||||
if err = coCompletion(sender, receiver); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender.stop()
|
||||
receiver.stop()
|
||||
|
||||
log.Println("Verifying...")
|
||||
|
||||
// s1 should have three-four files (there's a conflict from s2 which may or may not have synced yet)
|
||||
|
||||
files, err := filepath.Glob("s1/file*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) < 3 || len(files) > 4 {
|
||||
t.Errorf("Expected 3-4 files in s1 instead of %d", len(files))
|
||||
}
|
||||
|
||||
// s2 should have four files (there's a conflict)
|
||||
|
||||
files, err = filepath.Glob("s2/file*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 4 {
|
||||
t.Errorf("Expected 4 files in s2 instead of %d", len(files))
|
||||
}
|
||||
|
||||
// file1 is in conflict, so there's two versions of that one
|
||||
|
||||
files, err = filepath.Glob("s2/file1*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("Expected 2 'file1' files in s2 instead of %d", len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResetConflicts(t *testing.T) {
|
||||
log.Println("Cleaning...")
|
||||
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = os.Mkdir("s1", 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.Mkdir("s2", 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Three files on s1
|
||||
|
||||
err = ioutil.WriteFile("s1/file1", []byte("hello\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile("s1/file2", []byte("hello\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ioutil.WriteFile("s2/file3", []byte("hello\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Let them sync
|
||||
|
||||
sender, receiver := coSenderReceiver(t)
|
||||
defer sender.stop()
|
||||
defer receiver.stop()
|
||||
|
||||
log.Println("Syncing...")
|
||||
|
||||
if err = coCompletion(sender, receiver); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Verifying...")
|
||||
|
||||
// s1 should have three files
|
||||
|
||||
files, err := filepath.Glob("s1/file*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 3 {
|
||||
t.Errorf("Expected 3 files in s1 instead of %d", len(files))
|
||||
}
|
||||
|
||||
// s2 should have three
|
||||
|
||||
files, err = filepath.Glob("s2/file*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 3 {
|
||||
t.Errorf("Expected 3 files in s2 instead of %d", len(files))
|
||||
}
|
||||
|
||||
log.Println("Updating...")
|
||||
|
||||
// change s2/file2 a few times, so that it's version counter increases.
|
||||
// This will make the file on the cluster look newer than what we have
|
||||
// locally after we rest the index, unless we have a fix for that.
|
||||
|
||||
err = ioutil.WriteFile("s2/file2", []byte("hello1\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = receiver.rescan("default")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
err = ioutil.WriteFile("s2/file2", []byte("hello2\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = receiver.rescan("default")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
err = ioutil.WriteFile("s2/file2", []byte("hello3\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = receiver.rescan("default")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
if err = coCompletion(sender, receiver); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now nuke the index
|
||||
|
||||
log.Println("Resetting...")
|
||||
|
||||
receiver.stop()
|
||||
removeAll("h2/index*")
|
||||
|
||||
// s1/file1 (remote) changes while receiver is down
|
||||
|
||||
err = ioutil.WriteFile("s1/file1", []byte("goodbye\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// s1 must know about it
|
||||
err = sender.rescan("default")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// s2/file2 (local) changes while receiver is down
|
||||
|
||||
err = ioutil.WriteFile("s2/file2", []byte("goodbye\n"), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
receiver.start()
|
||||
|
||||
log.Println("Syncing...")
|
||||
|
||||
if err = coCompletion(sender, receiver); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// s2 should have five files (three plus two conflicts)
|
||||
|
||||
files, err = filepath.Glob("s2/file*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 5 {
|
||||
t.Errorf("Expected 5 files in s2 instead of %d", len(files))
|
||||
}
|
||||
|
||||
// file1 is in conflict, so there's two versions of that one
|
||||
|
||||
files, err = filepath.Glob("s2/file1*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("Expected 2 'file1' files in s2 instead of %d", len(files))
|
||||
}
|
||||
|
||||
// file2 is in conflict, so there's two versions of that one
|
||||
|
||||
files, err = filepath.Glob("s2/file2*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(files) != 2 {
|
||||
t.Errorf("Expected 2 'file2' files in s2 instead of %d", len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func coSenderReceiver(t *testing.T) (syncthingProcess, syncthingProcess) {
|
||||
log.Println("Starting sender...")
|
||||
sender := syncthingProcess{ // id1
|
||||
instance: "1",
|
||||
argv: []string{"-home", "h1"},
|
||||
port: 8081,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err := sender.start()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Starting receiver...")
|
||||
receiver := syncthingProcess{ // id2
|
||||
instance: "2",
|
||||
argv: []string{"-home", "h2"},
|
||||
port: 8082,
|
||||
apiKey: apiKey,
|
||||
}
|
||||
err = receiver.start()
|
||||
if err != nil {
|
||||
sender.stop()
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return sender, receiver
|
||||
}
|
||||
|
||||
func coCompletion(p ...syncthingProcess) error {
|
||||
mainLoop:
|
||||
for {
|
||||
|
||||
@@ -234,7 +234,7 @@ func TestPOSTWithoutCSRF(t *testing.T) {
|
||||
|
||||
// Should succeed with CSRF
|
||||
|
||||
req, err = http.NewRequest("POST", "http://127.0.0.1:8082/rest/error/clear", nil)
|
||||
req, err = http.NewRequest("POST", "http://127.0.0.1:8082/rest/system/error/clear", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestOverride(t *testing.T) {
|
||||
|
||||
log.Println("Hitting Override on master...")
|
||||
|
||||
resp, err := master.post("/rest/model/override?folder=default", nil)
|
||||
resp, err := master.post("/rest/db/override?folder=default", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -118,11 +118,18 @@ func (p *syncthingProcess) stop() error {
|
||||
|
||||
raceConditionStart := []byte("WARNING: DATA RACE")
|
||||
raceConditionSep := []byte("==================")
|
||||
panicConditionStart := []byte("panic:")
|
||||
panicConditionSep := []byte(p.id.String()[:5])
|
||||
sc := bufio.NewScanner(fd)
|
||||
race := false
|
||||
_panic := false
|
||||
for sc.Scan() {
|
||||
line := sc.Bytes()
|
||||
if race {
|
||||
if race || _panic {
|
||||
if bytes.Contains(line, panicConditionSep) {
|
||||
_panic = false
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", line)
|
||||
if bytes.Contains(line, raceConditionSep) {
|
||||
race = false
|
||||
@@ -134,6 +141,11 @@ func (p *syncthingProcess) stop() error {
|
||||
if err == nil {
|
||||
err = errors.New("Race condition detected")
|
||||
}
|
||||
} else if bytes.Contains(line, panicConditionStart) {
|
||||
_panic = true
|
||||
if err == nil {
|
||||
err = errors.New("Panic detected")
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||