mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-04 03:49:12 -05:00
Compare commits
64 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8ac9721d7 | ||
|
|
b1b68b58fe | ||
|
|
ca21db9481 | ||
|
|
93ad803073 | ||
|
|
6cc7f70a65 | ||
|
|
2b0c33f74d | ||
|
|
dae1d36a23 | ||
|
|
824fa8f17a | ||
|
|
31cd0b943c | ||
|
|
070eced2f6 | ||
|
|
986f8dfb2e | ||
|
|
8c0c03eb38 | ||
|
|
fd9bc20bc5 | ||
|
|
089fca2319 | ||
|
|
e936890927 | ||
|
|
0450d48f89 | ||
|
|
2463819a3d | ||
|
|
2b2cae2d50 | ||
|
|
0f1b40da71 | ||
|
|
f73d5a9ab2 | ||
|
|
4eb0e24c6e | ||
|
|
1d2235abe7 | ||
|
|
d347e54acb | ||
|
|
b5198d8119 | ||
|
|
b8b5c5ff34 | ||
|
|
a738490a3b | ||
|
|
54a8de2059 | ||
|
|
cb2c0e7ac5 | ||
|
|
510d309b8a | ||
|
|
a7ce2a7aa5 | ||
|
|
cfe24ecdd9 | ||
|
|
c5e9cb025c | ||
|
|
c3d07d60ca | ||
|
|
a0897a7456 | ||
|
|
c50ba9267c | ||
|
|
423e69916c | ||
|
|
b56c76f8ad | ||
|
|
cb2d2f000f | ||
|
|
69af77a3bd | ||
|
|
7767746d3e | ||
|
|
7219aaeb89 | ||
|
|
7af1863e81 | ||
|
|
4beb42bf45 | ||
|
|
12a3086a9e | ||
|
|
198725216f | ||
|
|
08647f1267 | ||
|
|
87811efc30 | ||
|
|
82c3e6f87f | ||
|
|
1ac40a3043 | ||
|
|
127b0c3332 | ||
|
|
a6d9150b14 | ||
|
|
7e5197c566 | ||
|
|
2d217e72bd | ||
|
|
12331cc62b | ||
|
|
2449f1e1b6 | ||
|
|
6a6593c656 | ||
|
|
ad220d61f9 | ||
|
|
1e35383b4d | ||
|
|
c8457ab005 | ||
|
|
070a308217 | ||
|
|
c1f4477376 | ||
|
|
d728320ece | ||
|
|
fee0d7168a | ||
|
|
7c23b32de3 |
2
AUTHORS
2
AUTHORS
@@ -22,10 +22,12 @@ Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Elias Jarlebring <jarlebring@gmail.com>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Erik Meitner <e.meitner@willystreet.coop>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell <fxgsell@gmail.com>
|
||||
Frank Isemann <frank@isemann.name>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
|
||||
26
Godeps/Godeps.json
generated
26
Godeps/Godeps.json
generated
@@ -7,7 +7,7 @@
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
"Rev": "4f7c2045dbd17b802370e2e6022200468abf02ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
@@ -21,25 +21,25 @@
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/go-snappy/snappy",
|
||||
"Rev": "eaa750b9bf4dcb7cb20454be850613b66cda3273"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "c5abe513796336ee2869745bff0638508450e9c5"
|
||||
"Rev": "faa59ce93750e747b2997635e8b7daf30024b1ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
|
||||
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "e7db2648034fb71b051902a02bc25d4468ed492e"
|
||||
"Rev": "95e15c95f21b81b09772f07de5c142a3e68f78db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "87e4e645d80ae9c537e8f2dee52b28036a5dd75e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
||||
"Rev": "a06509502ca32565bdf74afc1e573050023f261c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/thejerf/suture",
|
||||
@@ -59,19 +59,19 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
|
||||
"Rev": "1e856cbfdf9bc25eefca75f83f25d55e35ae72e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
|
||||
"Rev": "1e856cbfdf9bc25eefca75f83f25d55e35ae72e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
|
||||
"Rev": "df923bbb63f8ea3a26bb743e2a497abd0ab585f7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
|
||||
"Rev": "df923bbb63f8ea3a26bb743e2a497abd0ab585f7"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
23
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzz.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build gofuzz
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ln := binary.LittleEndian.Uint32(data)
|
||||
if ln > (1 << 21) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if _, err := Decode(nil, data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
@@ -179,7 +179,12 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
|
||||
6
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
6
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
@@ -25,8 +25,10 @@
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
|
||||
7
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
7
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
@@ -1,5 +1,8 @@
|
||||
This package contains an efficient token-bucket-based rate limiter.
|
||||
Copyright (C) 2015 Canonical Ltd.
|
||||
All files in this repository are licensed as follows. If you contribute
|
||||
to this repository, it is assumed that you license your contribution
|
||||
under the same license unless you state otherwise.
|
||||
|
||||
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
|
||||
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
|
||||
10
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
10
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
@@ -7,6 +7,7 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -55,7 +56,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
}
|
||||
@@ -217,10 +218,3 @@ func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
|
||||
tb.availTick = currentTick
|
||||
return
|
||||
}
|
||||
|
||||
func abs(f float64) float64 {
|
||||
if f < 0 {
|
||||
return -f
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
4
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
4
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
@@ -4,7 +4,9 @@
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file.
|
||||
or finding resources located relative to the executable file. Both
|
||||
working directory and the os.Args[0] value are arbitrary and cannot
|
||||
be relied on; os.Args[0] can be "faked".
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
|
||||
6
Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
6
Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
@@ -16,12 +16,12 @@ func Executable() (string, error) {
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name.
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
}
|
||||
|
||||
6
Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go
generated
vendored
6
Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go
generated
vendored
@@ -17,12 +17,14 @@ import (
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
const deletedSuffix = " (deleted)"
|
||||
const deletedTag = " (deleted)"
|
||||
execpath, err := os.Readlink("/proc/self/exe")
|
||||
if err != nil {
|
||||
return execpath, err
|
||||
}
|
||||
return strings.TrimSuffix(execpath, deletedSuffix), nil
|
||||
execpath = strings.TrimSuffix(execpath, deletedTag)
|
||||
execpath = strings.TrimPrefix(execpath, deletedTag)
|
||||
return execpath, nil
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd", "dragonfly":
|
||||
|
||||
23
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
23
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
@@ -24,6 +24,29 @@ const (
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
|
||||
59
Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
59
Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
@@ -31,8 +31,7 @@ const (
|
||||
|
||||
const (
|
||||
stateInitial = iota
|
||||
stateCCRcvd
|
||||
stateIdxRcvd
|
||||
stateReady
|
||||
)
|
||||
|
||||
// FileInfo flags
|
||||
@@ -103,7 +102,6 @@ type rawConnection struct {
|
||||
id DeviceID
|
||||
name string
|
||||
receiver Model
|
||||
state int
|
||||
|
||||
cr *countingReader
|
||||
cw *countingWriter
|
||||
@@ -142,9 +140,9 @@ type isEofer interface {
|
||||
IsEOF() bool
|
||||
}
|
||||
|
||||
const (
|
||||
pingTimeout = 30 * time.Second
|
||||
pingIdleTime = 60 * time.Second
|
||||
var (
|
||||
PingTimeout = 30 * time.Second
|
||||
PingIdleTime = 60 * time.Second
|
||||
)
|
||||
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
|
||||
@@ -155,7 +153,6 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiv
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
state: stateInitial,
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
outbox: make(chan hdrMsg),
|
||||
@@ -285,6 +282,7 @@ func (c *rawConnection) readerLoop() (err error) {
|
||||
c.close(err)
|
||||
}()
|
||||
|
||||
state := stateInitial
|
||||
for {
|
||||
select {
|
||||
case <-c.closed:
|
||||
@@ -298,47 +296,54 @@ func (c *rawConnection) readerLoop() (err error) {
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case ClusterConfigMessage:
|
||||
if state != stateInitial {
|
||||
return fmt.Errorf("protocol error: cluster config message in state %d", state)
|
||||
}
|
||||
go c.receiver.ClusterConfig(c.id, msg)
|
||||
state = stateReady
|
||||
|
||||
case IndexMessage:
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
if c.state < stateCCRcvd {
|
||||
return fmt.Errorf("protocol error: index message in state %d", c.state)
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: index message in state %d", state)
|
||||
}
|
||||
c.handleIndex(msg)
|
||||
c.state = stateIdxRcvd
|
||||
state = stateReady
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: index update message in state %d", c.state)
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: index update message in state %d", state)
|
||||
}
|
||||
c.handleIndexUpdate(msg)
|
||||
state = stateReady
|
||||
}
|
||||
|
||||
case RequestMessage:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: request message in state %d", c.state)
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: request message in state %d", state)
|
||||
}
|
||||
// Requests are handled asynchronously
|
||||
go c.handleRequest(hdr.msgID, msg)
|
||||
|
||||
case ResponseMessage:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: response message in state %d", c.state)
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: response message in state %d", state)
|
||||
}
|
||||
c.handleResponse(hdr.msgID, msg)
|
||||
|
||||
case pingMessage:
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: ping message in state %d", state)
|
||||
}
|
||||
c.send(hdr.msgID, messageTypePong, pongMessage{})
|
||||
|
||||
case pongMessage:
|
||||
c.handlePong(hdr.msgID)
|
||||
|
||||
case ClusterConfigMessage:
|
||||
if c.state != stateInitial {
|
||||
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
|
||||
if state != stateReady {
|
||||
return fmt.Errorf("protocol error: pong message in state %d", state)
|
||||
}
|
||||
go c.receiver.ClusterConfig(c.id, msg)
|
||||
c.state = stateCCRcvd
|
||||
c.handlePong(hdr.msgID)
|
||||
|
||||
case CloseMessage:
|
||||
return errors.New(msg.Reason)
|
||||
@@ -679,17 +684,17 @@ func (c *rawConnection) idGenerator() {
|
||||
|
||||
func (c *rawConnection) pingerLoop() {
|
||||
var rc = make(chan bool, 1)
|
||||
ticker := time.Tick(pingIdleTime / 2)
|
||||
ticker := time.Tick(PingIdleTime / 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
if d := time.Since(c.cr.Last()); d < pingIdleTime {
|
||||
if d := time.Since(c.cr.Last()); d < PingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after rd", d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if d := time.Since(c.cw.Last()); d < pingIdleTime {
|
||||
if d := time.Since(c.cw.Last()); d < PingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after wr", d)
|
||||
}
|
||||
@@ -709,7 +714,7 @@ func (c *rawConnection) pingerLoop() {
|
||||
if !ok {
|
||||
c.close(fmt.Errorf("ping failure"))
|
||||
}
|
||||
case <-time.After(pingTimeout):
|
||||
case <-time.After(PingTimeout):
|
||||
c.close(fmt.Errorf("ping timeout"))
|
||||
case <-c.closed:
|
||||
return
|
||||
|
||||
30
Godeps/_workspace/src/github.com/syncthing/protocol/protocol_test.go
generated
vendored
30
Godeps/_workspace/src/github.com/syncthing/protocol/protocol_test.go
generated
vendored
@@ -67,8 +67,10 @@ func TestPing(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c0 := NewConnection(c0ID, ar, bw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c1 := NewConnection(c1ID, br, aw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
@@ -81,8 +83,8 @@ func TestPing(t *testing.T) {
|
||||
func TestPingErr(t *testing.T) {
|
||||
e := errors.New("something broke")
|
||||
|
||||
for i := 0; i < 16; i++ {
|
||||
for j := 0; j < 16; j++ {
|
||||
for i := 0; i < 32; i++ {
|
||||
for j := 0; j < 32; j++ {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
@@ -92,12 +94,16 @@ func TestPingErr(t *testing.T) {
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
|
||||
c1 := NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
res := c0.ping()
|
||||
if (i < 8 || j < 8) && res {
|
||||
// This should have resulted in failure, as there is no way an empty ClusterConfig plus a Ping message fits in eight bytes.
|
||||
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
|
||||
} else if (i >= 12 && j >= 12) && !res {
|
||||
} else if (i >= 28 && j >= 28) && !res {
|
||||
// This should have worked though, as 28 bytes is plenty for both.
|
||||
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
|
||||
}
|
||||
}
|
||||
@@ -168,7 +174,9 @@ func TestVersionErr(t *testing.T) {
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
@@ -191,7 +199,9 @@ func TestTypeErr(t *testing.T) {
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
@@ -214,7 +224,9 @@ func TestClose(t *testing.T) {
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
c0.ClusterConfig(ClusterConfigMessage{})
|
||||
c1.ClusterConfig(ClusterConfigMessage{})
|
||||
|
||||
c0.close(nil)
|
||||
|
||||
|
||||
403
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
403
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -63,13 +63,14 @@ type DB struct {
|
||||
journalAckC chan error
|
||||
|
||||
// Compaction.
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
compErrC chan error
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compStats []cStats
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
compErrC chan error
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compWriteLocking bool
|
||||
compStats []cStats
|
||||
|
||||
// Close.
|
||||
closeW sync.WaitGroup
|
||||
@@ -108,28 +109,44 @@ func openDB(s *session) (*DB, error) {
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read-only mode.
|
||||
readOnly := s.o.GetReadOnly()
|
||||
|
||||
// Remove any obsolete files.
|
||||
if err := db.checkAndCleanFiles(); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
if readOnly {
|
||||
// Recover journals (read-only mode).
|
||||
if err := db.recoverJournalRO(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
} else {
|
||||
// Recover journals.
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove any obsolete files.
|
||||
if err := db.checkAndCleanFiles(); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Doesn't need to be included in the wait group.
|
||||
go db.compactionError()
|
||||
go db.mpoolDrain()
|
||||
|
||||
db.closeW.Add(3)
|
||||
go db.tCompaction()
|
||||
go db.mCompaction()
|
||||
go db.jWriter()
|
||||
if readOnly {
|
||||
db.SetReadOnly()
|
||||
} else {
|
||||
db.closeW.Add(3)
|
||||
go db.tCompaction()
|
||||
go db.mCompaction()
|
||||
go db.jWriter()
|
||||
}
|
||||
|
||||
s.logf("db@open done T·%v", time.Since(start))
|
||||
|
||||
@@ -275,7 +292,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
// We will drop corrupted table.
|
||||
strict = o.GetStrict(opt.StrictRecovery)
|
||||
|
||||
rec = &sessionRecord{numLevel: o.GetNumLevel()}
|
||||
rec = &sessionRecord{}
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
@@ -450,132 +467,136 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
}
|
||||
|
||||
func (db *DB) recoverJournal() error {
|
||||
// Get all tables and sort it by file number.
|
||||
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
journalFiles := files(journalFiles_)
|
||||
journalFiles.sort()
|
||||
files(allJournalFiles).sort()
|
||||
|
||||
// Discard older journal.
|
||||
prev := -1
|
||||
for i, file := range journalFiles {
|
||||
if file.Num() >= db.s.stJournalNum {
|
||||
if prev >= 0 {
|
||||
i--
|
||||
journalFiles[i] = journalFiles[prev]
|
||||
}
|
||||
journalFiles = journalFiles[i:]
|
||||
break
|
||||
} else if file.Num() == db.s.stPrevJournalNum {
|
||||
prev = i
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
}
|
||||
}
|
||||
|
||||
var jr *journal.Reader
|
||||
var of storage.File
|
||||
var mem *memdb.DB
|
||||
batch := new(Batch)
|
||||
cm := newCMem(db.s)
|
||||
buf := new(util.Buffer)
|
||||
// Options.
|
||||
strict := db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer := db.s.o.GetWriteBuffer()
|
||||
recoverJournal := func(file storage.File) error {
|
||||
db.logf("journal@recovery recovering @%d", file.Num())
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
var (
|
||||
of storage.File // Obsolete file.
|
||||
rec = &sessionRecord{}
|
||||
)
|
||||
|
||||
// Create/reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := cm.commit(file.Num(), db.seq); err != nil {
|
||||
return err
|
||||
}
|
||||
cm.reset()
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
mem.Reset()
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
} else {
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
|
||||
if strict || !errors.IsCorrupted(err) {
|
||||
return errors.SetFile(err, file)
|
||||
} else {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
|
||||
// Flush it if large enough.
|
||||
if mem.Size() >= writeBuffer {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
mem.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
of = file
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all journals.
|
||||
if len(journalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(journalFiles))
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(recJournalFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
|
||||
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
|
||||
|
||||
mem = memdb.New(db.s.icmp, writeBuffer)
|
||||
for _, file := range journalFiles {
|
||||
if err := recoverJournal(file); err != nil {
|
||||
var (
|
||||
// Options.
|
||||
strict = db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer = db.s.o.GetWriteBuffer()
|
||||
|
||||
jr *journal.Reader
|
||||
mdb = memdb.New(db.s.icmp, writeBuffer)
|
||||
buf = &util.Buffer{}
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rec.setJournalNum(jf.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
rec.resetAddedTables()
|
||||
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
mdb.Reset()
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
|
||||
// Flush it if large enough.
|
||||
if mdb.Size() >= writeBuffer {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
mdb.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
of = jf
|
||||
}
|
||||
|
||||
// Flush the last journal.
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
// Flush the last memdb.
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -587,8 +608,10 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Commit.
|
||||
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
|
||||
// Close journal.
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
// Close journal on error.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
@@ -604,6 +627,103 @@ func (db *DB) recoverJournal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) recoverJournalRO() error {
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files(allJournalFiles).sort()
|
||||
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Options.
|
||||
strict = db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer = db.s.o.GetWriteBuffer()
|
||||
|
||||
mdb = memdb.New(db.s.icmp, writeBuffer)
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
|
||||
|
||||
var (
|
||||
jr *journal.Reader
|
||||
buf = &util.Buffer{}
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Set memDB.
|
||||
db.mem = &memDB{db: db, DB: mdb, ref: 1}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
@@ -614,7 +734,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, mv, me := m.mdb.Find(ikey)
|
||||
mk, mv, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
@@ -652,7 +772,7 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, _, me := m.mdb.Find(ikey)
|
||||
mk, _, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
@@ -784,7 +904,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
|
||||
const prefix = "leveldb."
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
return "", ErrNotFound
|
||||
}
|
||||
p := name[len(prefix):]
|
||||
|
||||
@@ -798,7 +918,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
var rest string
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
|
||||
err = errors.New("leveldb: GetProperty: invalid property: " + name)
|
||||
err = ErrNotFound
|
||||
} else {
|
||||
value = fmt.Sprint(v.tLen(int(level)))
|
||||
}
|
||||
@@ -837,7 +957,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
case p == "aliveiters":
|
||||
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
|
||||
default:
|
||||
err = errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
err = ErrNotFound
|
||||
}
|
||||
|
||||
return
|
||||
@@ -900,6 +1020,9 @@ func (db *DB) Close() error {
|
||||
var err error
|
||||
select {
|
||||
case err = <-db.compErrC:
|
||||
if err == ErrReadOnly {
|
||||
err = nil
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
|
||||
114
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
114
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
@@ -62,58 +61,8 @@ func (p *cStatsStaging) stopTimer() {
|
||||
}
|
||||
}
|
||||
|
||||
type cMem struct {
|
||||
s *session
|
||||
level int
|
||||
rec *sessionRecord
|
||||
}
|
||||
|
||||
func newCMem(s *session) *cMem {
|
||||
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
|
||||
}
|
||||
|
||||
func (c *cMem) flush(mem *memdb.DB, level int) error {
|
||||
s := c.s
|
||||
|
||||
// Write memdb to table.
|
||||
iter := mem.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pick level.
|
||||
if level < 0 {
|
||||
v := s.version()
|
||||
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
|
||||
v.release()
|
||||
}
|
||||
c.rec.addTableFile(level, t)
|
||||
|
||||
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
|
||||
c.level = level
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cMem) reset() {
|
||||
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
|
||||
}
|
||||
|
||||
func (c *cMem) commit(journal, seq uint64) error {
|
||||
c.rec.setJournalNum(journal)
|
||||
c.rec.setSeqNum(seq)
|
||||
|
||||
// Commit changes.
|
||||
return c.s.commit(c.rec)
|
||||
}
|
||||
|
||||
func (db *DB) compactionError() {
|
||||
var (
|
||||
err error
|
||||
wlocked bool
|
||||
)
|
||||
var err error
|
||||
noerr:
|
||||
// No error.
|
||||
for {
|
||||
@@ -121,7 +70,7 @@ noerr:
|
||||
case err = <-db.compErrSetC:
|
||||
switch {
|
||||
case err == nil:
|
||||
case errors.IsCorrupted(err):
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
goto haserr
|
||||
@@ -139,7 +88,7 @@ haserr:
|
||||
switch {
|
||||
case err == nil:
|
||||
goto noerr
|
||||
case errors.IsCorrupted(err):
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
}
|
||||
@@ -155,9 +104,9 @@ hasperr:
|
||||
case db.compPerErrC <- err:
|
||||
case db.writeLockC <- struct{}{}:
|
||||
// Hold write lock, so that write won't pass-through.
|
||||
wlocked = true
|
||||
db.compWriteLocking = true
|
||||
case _, _ = <-db.closeC:
|
||||
if wlocked {
|
||||
if db.compWriteLocking {
|
||||
// We should release the lock or Close will hang.
|
||||
<-db.writeLockC
|
||||
}
|
||||
@@ -287,21 +236,18 @@ func (db *DB) compactionExitTransact() {
|
||||
}
|
||||
|
||||
func (db *DB) memCompaction() {
|
||||
mem := db.getFrozenMem()
|
||||
if mem == nil {
|
||||
mdb := db.getFrozenMem()
|
||||
if mdb == nil {
|
||||
return
|
||||
}
|
||||
defer mem.decref()
|
||||
defer mdb.decref()
|
||||
|
||||
c := newCMem(db.s)
|
||||
stats := new(cStatsStaging)
|
||||
|
||||
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
|
||||
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
|
||||
|
||||
// Don't compact empty memdb.
|
||||
if mem.mdb.Len() == 0 {
|
||||
db.logf("mem@flush skipping")
|
||||
// drop frozen mem
|
||||
if mdb.Len() == 0 {
|
||||
db.logf("memdb@flush skipping")
|
||||
// drop frozen memdb
|
||||
db.dropFrozenMem()
|
||||
return
|
||||
}
|
||||
@@ -317,13 +263,20 @@ func (db *DB) memCompaction() {
|
||||
return
|
||||
}
|
||||
|
||||
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
var (
|
||||
rec = &sessionRecord{}
|
||||
stats = &cStatsStaging{}
|
||||
flushLevel int
|
||||
)
|
||||
|
||||
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
defer stats.stopTimer()
|
||||
return c.flush(mem.mdb, -1)
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, func() error {
|
||||
for _, r := range c.rec.addedTables {
|
||||
db.logf("mem@flush revert @%d", r.num)
|
||||
for _, r := range rec.addedTables {
|
||||
db.logf("memdb@flush revert @%d", r.num)
|
||||
f := db.s.getTableFile(r.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
@@ -332,20 +285,23 @@ func (db *DB) memCompaction() {
|
||||
return nil
|
||||
})
|
||||
|
||||
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
defer stats.stopTimer()
|
||||
return c.commit(db.journalFile.Num(), db.frozenSeq)
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
err = db.s.commit(rec)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, nil)
|
||||
|
||||
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
|
||||
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
||||
|
||||
for _, r := range c.rec.addedTables {
|
||||
for _, r := range rec.addedTables {
|
||||
stats.write += r.size
|
||||
}
|
||||
db.compStats[c.level].add(stats)
|
||||
db.compStats[flushLevel].add(stats)
|
||||
|
||||
// Drop frozen mem.
|
||||
// Drop frozen memdb.
|
||||
db.dropFrozenMem()
|
||||
|
||||
// Resume table compaction.
|
||||
@@ -557,7 +513,7 @@ func (b *tableCompactionBuilder) revert() error {
|
||||
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
defer c.release()
|
||||
|
||||
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
|
||||
rec := &sessionRecord{}
|
||||
rec.addCompPtr(c.level, c.imax)
|
||||
|
||||
if !noTrivial && c.trivial() {
|
||||
|
||||
32
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
32
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@@ -8,6 +8,7 @@ package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -39,11 +40,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
emi := em.mdb.NewIterator(slice)
|
||||
emi := em.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
fmi := fm.mdb.NewIterator(slice)
|
||||
fmi := fm.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
i = append(i, fmi)
|
||||
}
|
||||
@@ -80,6 +81,10 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
|
||||
return iter
|
||||
}
|
||||
|
||||
func (db *DB) iterSamplingRate() int {
|
||||
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
|
||||
}
|
||||
|
||||
type dir int
|
||||
|
||||
const (
|
||||
@@ -98,11 +103,21 @@ type dbIter struct {
|
||||
seq uint64
|
||||
strict bool
|
||||
|
||||
dir dir
|
||||
key []byte
|
||||
value []byte
|
||||
err error
|
||||
releaser util.Releaser
|
||||
smaplingGap int
|
||||
dir dir
|
||||
key []byte
|
||||
value []byte
|
||||
err error
|
||||
releaser util.Releaser
|
||||
}
|
||||
|
||||
func (i *dbIter) sampleSeek() {
|
||||
ikey := i.iter.Key()
|
||||
i.smaplingGap -= len(ikey) + len(i.iter.Value())
|
||||
for i.smaplingGap < 0 {
|
||||
i.smaplingGap += i.db.iterSamplingRate()
|
||||
i.db.sampleSeek(ikey)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *dbIter) setErr(err error) {
|
||||
@@ -175,6 +190,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
func (i *dbIter) next() bool {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
switch kt {
|
||||
case ktDel:
|
||||
@@ -225,6 +241,7 @@ func (i *dbIter) prev() bool {
|
||||
if i.iter.Valid() {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
@@ -266,6 +283,7 @@ func (i *dbIter) Prev() bool {
|
||||
case dirForward:
|
||||
for i.iter.Prev() {
|
||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
}
|
||||
|
||||
23
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
23
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
db *DB
|
||||
mdb *memdb.DB
|
||||
db *DB
|
||||
*memdb.DB
|
||||
ref int32
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@ func (m *memDB) incref() {
|
||||
func (m *memDB) decref() {
|
||||
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
||||
// Only put back memdb with std capacity.
|
||||
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.mdb.Reset()
|
||||
m.db.mpoolPut(m.mdb)
|
||||
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.Reset()
|
||||
m.db.mpoolPut(m.DB)
|
||||
}
|
||||
m.db = nil
|
||||
m.mdb = nil
|
||||
m.DB = nil
|
||||
} else if ref < 0 {
|
||||
panic("negative memdb ref")
|
||||
}
|
||||
@@ -48,6 +48,15 @@ func (db *DB) addSeq(delta uint64) {
|
||||
atomic.AddUint64(&db.seq, delta)
|
||||
}
|
||||
|
||||
func (db *DB) sampleSeek(ikey iKey) {
|
||||
v := db.s.version()
|
||||
if v.sampleSeek(ikey) {
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
|
||||
func (db *DB) mpoolPut(mem *memdb.DB) {
|
||||
defer func() {
|
||||
recover()
|
||||
@@ -117,7 +126,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
}
|
||||
mem = &memDB{
|
||||
db: db,
|
||||
mdb: mdb,
|
||||
DB: mdb,
|
||||
ref: 2,
|
||||
}
|
||||
db.mem = mem
|
||||
|
||||
148
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
148
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@@ -405,19 +405,21 @@ func (h *dbHarness) compactRange(min, max string) {
|
||||
t.Log("DB range compaction done")
|
||||
}
|
||||
|
||||
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
||||
t := h.t
|
||||
db := h.db
|
||||
|
||||
s, err := db.SizeOf([]util.Range{
|
||||
func (h *dbHarness) sizeOf(start, limit string) uint64 {
|
||||
sz, err := h.db.SizeOf([]util.Range{
|
||||
{[]byte(start), []byte(limit)},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error("SizeOf: got error: ", err)
|
||||
h.t.Error("SizeOf: got error: ", err)
|
||||
}
|
||||
if s.Sum() < low || s.Sum() > hi {
|
||||
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
|
||||
shorten(start), shorten(limit), low, hi, s.Sum())
|
||||
return sz.Sum()
|
||||
}
|
||||
|
||||
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
||||
sz := h.sizeOf(start, limit)
|
||||
if sz < low || sz > hi {
|
||||
h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
|
||||
shorten(start), shorten(limit), low, hi, sz)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2443,7 +2445,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec := &sessionRecord{}
|
||||
rec.addTableFile(i, tf)
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -2453,7 +2455,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Build grandparent.
|
||||
v := s.version()
|
||||
c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec := &sessionRecord{}
|
||||
b := &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@@ -2477,7 +2479,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Build level-1.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec = &sessionRecord{}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@@ -2521,7 +2523,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Compaction with transient error.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec = &sessionRecord{}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@@ -2577,3 +2579,123 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
|
||||
func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
|
||||
const (
|
||||
vSize = 200 * opt.KiB
|
||||
tSize = 100 * opt.MiB
|
||||
mIter = 100
|
||||
n = tSize / vSize
|
||||
)
|
||||
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
Compression: opt.NoCompression,
|
||||
DisableBlockCache: true,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
key := func(x int) string {
|
||||
return fmt.Sprintf("v%06d", x)
|
||||
}
|
||||
|
||||
// Fill.
|
||||
value := strings.Repeat("x", vSize)
|
||||
for i := 0; i < n; i++ {
|
||||
h.put(key(i), value)
|
||||
}
|
||||
h.compactMem()
|
||||
|
||||
// Delete all.
|
||||
for i := 0; i < n; i++ {
|
||||
h.delete(key(i))
|
||||
}
|
||||
h.compactMem()
|
||||
|
||||
var (
|
||||
limit = n / limitDiv
|
||||
|
||||
startKey = key(0)
|
||||
limitKey = key(limit)
|
||||
maxKey = key(n)
|
||||
slice = &util.Range{Limit: []byte(limitKey)}
|
||||
|
||||
initialSize0 = h.sizeOf(startKey, limitKey)
|
||||
initialSize1 = h.sizeOf(limitKey, maxKey)
|
||||
)
|
||||
|
||||
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
|
||||
|
||||
for r := 0; true; r++ {
|
||||
if r >= mIter {
|
||||
t.Fatal("taking too long to compact")
|
||||
}
|
||||
|
||||
// Iterates.
|
||||
iter := h.db.NewIterator(slice, h.ro)
|
||||
for iter.Next() {
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("Iter err: %v", err)
|
||||
}
|
||||
iter.Release()
|
||||
|
||||
// Wait compaction.
|
||||
h.waitCompaction()
|
||||
|
||||
// Check size.
|
||||
size0 := h.sizeOf(startKey, limitKey)
|
||||
size1 := h.sizeOf(limitKey, maxKey)
|
||||
t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
|
||||
if size0 < initialSize0/10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if initialSize1 > 0 {
|
||||
h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDB_IterTriggeredCompaction(t *testing.T) {
|
||||
testDB_IterTriggeredCompaction(t, 1)
|
||||
}
|
||||
|
||||
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
|
||||
testDB_IterTriggeredCompaction(t, 2)
|
||||
}
|
||||
|
||||
func TestDB_ReadOnly(t *testing.T) {
|
||||
h := newDbHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("bar", "v2")
|
||||
h.compactMem()
|
||||
|
||||
h.put("xfoo", "v1")
|
||||
h.put("xbar", "v2")
|
||||
|
||||
t.Log("Trigger read-only")
|
||||
if err := h.db.SetReadOnly(); err != nil {
|
||||
h.close()
|
||||
t.Fatalf("SetReadOnly error: %v", err)
|
||||
}
|
||||
|
||||
h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
|
||||
|
||||
ro := func(key, value, wantValue string) {
|
||||
if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
h.getVal(key, wantValue)
|
||||
}
|
||||
|
||||
ro("foo", "vx", "v1")
|
||||
|
||||
h.o.ReadOnly = true
|
||||
h.reopenDB()
|
||||
|
||||
ro("foo", "vx", "v1")
|
||||
ro("bar", "vx", "v2")
|
||||
h.assertNumKeys(4)
|
||||
}
|
||||
|
||||
75
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
75
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -63,24 +63,24 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
|
||||
delayed := false
|
||||
flush := func() (retry bool) {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
mem = db.getEffectiveMem()
|
||||
mdb = db.getEffectiveMem()
|
||||
defer func() {
|
||||
if retry {
|
||||
mem.decref()
|
||||
mem = nil
|
||||
mdb.decref()
|
||||
mdb = nil
|
||||
}
|
||||
}()
|
||||
nn = mem.mdb.Free()
|
||||
mdbFree = mdb.Free()
|
||||
switch {
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
|
||||
delayed = true
|
||||
time.Sleep(time.Millisecond)
|
||||
case nn >= n:
|
||||
case mdbFree >= n:
|
||||
return false
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
|
||||
delayed = true
|
||||
@@ -90,15 +90,15 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
}
|
||||
default:
|
||||
// Allow memdb to grow if it has no entry.
|
||||
if mem.mdb.Len() == 0 {
|
||||
nn = n
|
||||
if mdb.Len() == 0 {
|
||||
mdbFree = n
|
||||
} else {
|
||||
mem.decref()
|
||||
mem, err = db.rotateMem(n)
|
||||
mdb.decref()
|
||||
mdb, err = db.rotateMem(n)
|
||||
if err == nil {
|
||||
nn = mem.mdb.Free()
|
||||
mdbFree = mdb.Free()
|
||||
} else {
|
||||
nn = 0
|
||||
mdbFree = 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -157,18 +157,18 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
mem, memFree, err := db.flush(b.size())
|
||||
mdb, mdbFree, err := db.flush(b.size())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer mem.decref()
|
||||
defer mdb.decref()
|
||||
|
||||
// Calculate maximum size of the batch.
|
||||
m := 1 << 20
|
||||
if x := b.size(); x <= 128<<10 {
|
||||
m = x + (128 << 10)
|
||||
}
|
||||
m = minInt(m, memFree)
|
||||
m = minInt(m, mdbFree)
|
||||
|
||||
// Merge with other batch.
|
||||
drain:
|
||||
@@ -197,7 +197,7 @@ drain:
|
||||
select {
|
||||
case db.journalC <- b:
|
||||
// Write into memdb
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
if berr := b.memReplay(mdb.DB); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
case err = <-db.compPerErrC:
|
||||
@@ -211,7 +211,7 @@ drain:
|
||||
case err = <-db.journalAckC:
|
||||
if err != nil {
|
||||
// Revert memdb if error detected
|
||||
if berr := b.revertMemReplay(mem.mdb); berr != nil {
|
||||
if berr := b.revertMemReplay(mdb.DB); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
return
|
||||
@@ -225,7 +225,7 @@ drain:
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
if berr := b.memReplay(mdb.DB); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
}
|
||||
@@ -233,7 +233,7 @@ drain:
|
||||
// Set last seq number.
|
||||
db.addSeq(uint64(b.Len()))
|
||||
|
||||
if b.size() >= memFree {
|
||||
if b.size() >= mdbFree {
|
||||
db.rotateMem(0)
|
||||
}
|
||||
return
|
||||
@@ -249,8 +249,7 @@ func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
|
||||
return db.Write(b, wo)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. It returns ErrNotFound if
|
||||
// the DB does not contain the key.
|
||||
// Delete deletes the value for the given key.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Delete returns.
|
||||
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
@@ -290,9 +289,9 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
}
|
||||
|
||||
// Check for overlaps in memdb.
|
||||
mem := db.getEffectiveMem()
|
||||
defer mem.decref()
|
||||
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
|
||||
mdb := db.getEffectiveMem()
|
||||
defer mdb.decref()
|
||||
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := db.rotateMem(0); err != nil {
|
||||
<-db.writeLockC
|
||||
@@ -309,3 +308,31 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
// Table compaction.
|
||||
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
|
||||
}
|
||||
|
||||
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
|
||||
func (db *DB) SetReadOnly() error {
|
||||
if err := db.ok(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lock writer.
|
||||
select {
|
||||
case db.writeLockC <- struct{}{}:
|
||||
db.compWriteLocking = true
|
||||
case err := <-db.compPerErrC:
|
||||
return err
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
// Set compaction read-only.
|
||||
select {
|
||||
case db.compErrSetC <- ErrReadOnly:
|
||||
case perr := <-db.compPerErrC:
|
||||
return perr
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
generated
vendored
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.ErrNotFound
|
||||
ErrReadOnly = errors.New("leveldb: read-only mode")
|
||||
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
|
||||
ErrIterReleased = errors.New("leveldb: iterator released")
|
||||
ErrClosed = errors.New("leveldb: closed")
|
||||
|
||||
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
@@ -206,6 +206,7 @@ func (p *DB) randHeight() (h int) {
|
||||
return
|
||||
}
|
||||
|
||||
// Must hold RW-lock if prev == true, as it use shared prevNode slice.
|
||||
func (p *DB) findGE(key []byte, prev bool) (int, bool) {
|
||||
node := 0
|
||||
h := p.maxHeight - 1
|
||||
@@ -302,7 +303,7 @@ func (p *DB) Put(key []byte, value []byte) error {
|
||||
node := len(p.nodeData)
|
||||
p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
|
||||
for i, n := range p.prevNode[:h] {
|
||||
m := n + 4 + i
|
||||
m := n + nNext + i
|
||||
p.nodeData = append(p.nodeData, p.nodeData[m])
|
||||
p.nodeData[m] = node
|
||||
}
|
||||
@@ -434,20 +435,22 @@ func (p *DB) Len() int {
|
||||
|
||||
// Reset resets the DB to initial empty state. Allows reuse the buffer.
|
||||
func (p *DB) Reset() {
|
||||
p.mu.Lock()
|
||||
p.rnd = rand.New(rand.NewSource(0xdeadbeef))
|
||||
p.maxHeight = 1
|
||||
p.n = 0
|
||||
p.kvSize = 0
|
||||
p.kvData = p.kvData[:0]
|
||||
p.nodeData = p.nodeData[:4+tMaxHeight]
|
||||
p.nodeData = p.nodeData[:nNext+tMaxHeight]
|
||||
p.nodeData[nKV] = 0
|
||||
p.nodeData[nKey] = 0
|
||||
p.nodeData[nVal] = 0
|
||||
p.nodeData[nHeight] = tMaxHeight
|
||||
for n := 0; n < tMaxHeight; n++ {
|
||||
p.nodeData[4+n] = 0
|
||||
p.nodeData[nNext+n] = 0
|
||||
p.prevNode[n] = 0
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// New creates a new initalized in-memory key/value DB. The capacity
|
||||
|
||||
50
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
50
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@@ -34,10 +34,11 @@ var (
|
||||
DefaultCompactionTotalSize = 10 * MiB
|
||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultOpenFilesCacher = LRUCacher
|
||||
DefaultOpenFilesCacheCapacity = 500
|
||||
DefaultIteratorSamplingRate = 1 * MiB
|
||||
DefaultMaxMemCompationLevel = 2
|
||||
DefaultNumLevel = 7
|
||||
DefaultOpenFilesCacher = LRUCacher
|
||||
DefaultOpenFilesCacheCapacity = 500
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
DefaultWriteL0PauseTrigger = 12
|
||||
DefaultWriteL0SlowdownTrigger = 8
|
||||
@@ -249,6 +250,11 @@ type Options struct {
|
||||
// The default value (DefaultCompression) uses snappy compression.
|
||||
Compression Compression
|
||||
|
||||
// DisableBufferPool allows disable use of util.BufferPool functionality.
|
||||
//
|
||||
// The default value is false.
|
||||
DisableBufferPool bool
|
||||
|
||||
// DisableBlockCache allows disable use of cache.Cache functionality on
|
||||
// 'sorted table' block.
|
||||
//
|
||||
@@ -288,6 +294,13 @@ type Options struct {
|
||||
// The default value is nil.
|
||||
Filter filter.Filter
|
||||
|
||||
// IteratorSamplingRate defines approximate gap (in bytes) between read
|
||||
// sampling of an iterator. The samples will be used to determine when
|
||||
// compaction should be triggered.
|
||||
//
|
||||
// The default is 1MiB.
|
||||
IteratorSamplingRate int
|
||||
|
||||
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
|
||||
// will be pushed into if doesn't creates overlap. This should less than
|
||||
// NumLevel. Use -1 for level-0.
|
||||
@@ -313,6 +326,11 @@ type Options struct {
|
||||
// The default value is 500.
|
||||
OpenFilesCacheCapacity int
|
||||
|
||||
// If true then opens DB in read-only mode.
|
||||
//
|
||||
// The default value is false.
|
||||
ReadOnly bool
|
||||
|
||||
// Strict defines the DB strict level.
|
||||
Strict Strict
|
||||
|
||||
@@ -464,6 +482,20 @@ func (o *Options) GetCompression() Compression {
|
||||
return o.Compression
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableBufferPool() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableBufferPool
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableBlockCache() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableBlockCache
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableCompactionBackoff() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
@@ -492,6 +524,13 @@ func (o *Options) GetFilter() filter.Filter {
|
||||
return o.Filter
|
||||
}
|
||||
|
||||
func (o *Options) GetIteratorSamplingRate() int {
|
||||
if o == nil || o.IteratorSamplingRate <= 0 {
|
||||
return DefaultIteratorSamplingRate
|
||||
}
|
||||
return o.IteratorSamplingRate
|
||||
}
|
||||
|
||||
func (o *Options) GetMaxMemCompationLevel() int {
|
||||
level := DefaultMaxMemCompationLevel
|
||||
if o != nil {
|
||||
@@ -533,6 +572,13 @@ func (o *Options) GetOpenFilesCacheCapacity() int {
|
||||
return o.OpenFilesCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetReadOnly() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.ReadOnly
|
||||
}
|
||||
|
||||
func (o *Options) GetStrict(strict Strict) bool {
|
||||
if o == nil || o.Strict == 0 {
|
||||
return DefaultStrict&strict != 0
|
||||
|
||||
264
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
264
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@@ -11,10 +11,8 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
@@ -127,11 +125,16 @@ func (s *session) recover() (err error) {
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
strict := s.o.GetStrict(opt.StrictManifest)
|
||||
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
|
||||
staging := s.stVersion.newStaging()
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
var (
|
||||
// Options.
|
||||
numLevel = s.o.GetNumLevel()
|
||||
strict = s.o.GetStrict(opt.StrictManifest)
|
||||
|
||||
jr = journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
rec = &sessionRecord{}
|
||||
staging = s.stVersion.newStaging()
|
||||
)
|
||||
for {
|
||||
var r io.Reader
|
||||
r, err = jr.Next()
|
||||
@@ -143,7 +146,7 @@ func (s *session) recover() (err error) {
|
||||
return errors.SetFile(err, m)
|
||||
}
|
||||
|
||||
err = rec.decode(r)
|
||||
err = rec.decode(r, numLevel)
|
||||
if err == nil {
|
||||
// save compact pointers
|
||||
for _, r := range rec.compPtrs {
|
||||
@@ -206,250 +209,3 @@ func (s *session) commit(r *sessionRecord) (err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(t0) == 0 {
|
||||
t0 = append(t0, tables[0])
|
||||
}
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
ts := (*tSet)(p)
|
||||
level = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
v := s.version()
|
||||
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid compacting too much in one shot in case the range is large.
|
||||
// But we cannot do this for level-0 since level-0 files can overlap
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
if total >= limit {
|
||||
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
|
||||
t0 = t0[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
return c
|
||||
}
|
||||
|
||||
// compaction represent a compaction state.
|
||||
type compaction struct {
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
func (c *compaction) save() {
|
||||
c.snapGPI = c.gpi
|
||||
c.snapSeenKey = c.seenKey
|
||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) restore() {
|
||||
c.gpi = c.snapGPI
|
||||
c.seenKey = c.snapSeenKey
|
||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) release() {
|
||||
if !c.released {
|
||||
c.released = true
|
||||
c.v.release()
|
||||
}
|
||||
}
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
// We've advanced far enough.
|
||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level.
|
||||
return false
|
||||
}
|
||||
break
|
||||
}
|
||||
c.tPtrs[level]++
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
c.gpOverlappedBytes += gp.size
|
||||
}
|
||||
}
|
||||
c.seenKey = true
|
||||
|
||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
||||
// Too much overlap for current output; start new output.
|
||||
c.gpOverlappedBytes = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
// Special case for level-0
|
||||
icap = len(c.tables[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
// Options.
|
||||
ro := &opt.ReadOptions{
|
||||
DontFillCache: true,
|
||||
Strict: opt.StrictOverride,
|
||||
}
|
||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
||||
if strict {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
|
||||
for i, tables := range c.tables {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
||||
}
|
||||
|
||||
287
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
generated
vendored
Normal file
287
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func (s *session) pickMemdbLevel(umin, umax []byte) int {
|
||||
v := s.version()
|
||||
defer v.release()
|
||||
return v.pickMemdbLevel(umin, umax)
|
||||
}
|
||||
|
||||
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
|
||||
// Create sorted table.
|
||||
iter := mdb.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return level, err
|
||||
}
|
||||
|
||||
// Pick level and add to record.
|
||||
if level < 0 {
|
||||
level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
|
||||
}
|
||||
rec.addTableFile(level, t)
|
||||
|
||||
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
return level, nil
|
||||
}
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(t0) == 0 {
|
||||
t0 = append(t0, tables[0])
|
||||
}
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
ts := (*tSet)(p)
|
||||
level = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
v := s.version()
|
||||
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid compacting too much in one shot in case the range is large.
|
||||
// But we cannot do this for level-0 since level-0 files can overlap
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
if total >= limit {
|
||||
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
|
||||
t0 = t0[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
return c
|
||||
}
|
||||
|
||||
// compaction represent a compaction state.
|
||||
type compaction struct {
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
func (c *compaction) save() {
|
||||
c.snapGPI = c.gpi
|
||||
c.snapSeenKey = c.seenKey
|
||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) restore() {
|
||||
c.gpi = c.snapGPI
|
||||
c.seenKey = c.snapSeenKey
|
||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) release() {
|
||||
if !c.released {
|
||||
c.released = true
|
||||
c.v.release()
|
||||
}
|
||||
}
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
// We've advanced far enough.
|
||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level.
|
||||
return false
|
||||
}
|
||||
break
|
||||
}
|
||||
c.tPtrs[level]++
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
c.gpOverlappedBytes += gp.size
|
||||
}
|
||||
}
|
||||
c.seenKey = true
|
||||
|
||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
||||
// Too much overlap for current output; start new output.
|
||||
c.gpOverlappedBytes = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
// Special case for level-0.
|
||||
icap = len(c.tables[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
// Options.
|
||||
ro := &opt.ReadOptions{
|
||||
DontFillCache: true,
|
||||
Strict: opt.StrictOverride,
|
||||
}
|
||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
||||
if strict {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
|
||||
for i, tables := range c.tables {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
@@ -52,8 +52,6 @@ type dtRecord struct {
|
||||
}
|
||||
|
||||
type sessionRecord struct {
|
||||
numLevel int
|
||||
|
||||
hasRec int
|
||||
comparer string
|
||||
journalNum uint64
|
||||
@@ -230,7 +228,7 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
@@ -238,14 +236,14 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if x >= uint64(p.numLevel) {
|
||||
if x >= uint64(numLevel) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
|
||||
return 0
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) decode(r io.Reader) error {
|
||||
func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
br, ok := r.(byteReader)
|
||||
if !ok {
|
||||
br = bufio.NewReader(r)
|
||||
@@ -286,13 +284,13 @@ func (p *sessionRecord) decode(r io.Reader) error {
|
||||
p.setSeqNum(x)
|
||||
}
|
||||
case recCompPtr:
|
||||
level := p.readLevel("comp-ptr.level", br)
|
||||
level := p.readLevel("comp-ptr.level", br, numLevel)
|
||||
ikey := p.readBytes("comp-ptr.ikey", br)
|
||||
if p.err == nil {
|
||||
p.addCompPtr(level, iKey(ikey))
|
||||
}
|
||||
case recAddTable:
|
||||
level := p.readLevel("add-table.level", br)
|
||||
level := p.readLevel("add-table.level", br, numLevel)
|
||||
num := p.readUvarint("add-table.num", br)
|
||||
size := p.readUvarint("add-table.size", br)
|
||||
imin := p.readBytes("add-table.imin", br)
|
||||
@@ -301,7 +299,7 @@ func (p *sessionRecord) decode(r io.Reader) error {
|
||||
p.addTable(level, num, size, imin, imax)
|
||||
}
|
||||
case recDelTable:
|
||||
level := p.readLevel("del-table.level", br)
|
||||
level := p.readLevel("del-table.level", br, numLevel)
|
||||
num := p.readUvarint("del-table.num", br)
|
||||
if p.err == nil {
|
||||
p.delTable(level, num)
|
||||
|
||||
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
@@ -19,8 +19,8 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
err = v.decode(b)
|
||||
v2 := &sessionRecord{}
|
||||
err = v.decode(b, opt.DefaultNumLevel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
|
||||
func TestSessionRecord_EncodeDecode(t *testing.T) {
|
||||
big := uint64(1) << 50
|
||||
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
v := &sessionRecord{}
|
||||
i := uint64(0)
|
||||
test := func() {
|
||||
res, err := decodeEncode(v)
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
@@ -182,7 +182,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
defer v.release()
|
||||
}
|
||||
if rec == nil {
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec = &sessionRecord{}
|
||||
}
|
||||
s.fillRecord(rec, true)
|
||||
v.fillRecord(rec)
|
||||
|
||||
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
@@ -42,6 +42,8 @@ type tsOp uint
|
||||
const (
|
||||
tsOpOpen tsOp = iota
|
||||
tsOpCreate
|
||||
tsOpReplace
|
||||
tsOpRemove
|
||||
tsOpRead
|
||||
tsOpReadAt
|
||||
tsOpWrite
|
||||
@@ -241,6 +243,10 @@ func (tf tsFile) Replace(newfile storage.File) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tf.shouldErr(tsOpReplace) {
|
||||
err = errors.New("leveldb.testStorage: emulated create error")
|
||||
return
|
||||
}
|
||||
err = tf.File.Replace(newfile.(tsFile).File)
|
||||
if err != nil {
|
||||
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
||||
@@ -258,6 +264,10 @@ func (tf tsFile) Remove() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tf.shouldErr(tsOpRemove) {
|
||||
err = errors.New("leveldb.testStorage: emulated create error")
|
||||
return
|
||||
}
|
||||
err = tf.File.Remove()
|
||||
if err != nil {
|
||||
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
||||
|
||||
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@@ -441,22 +441,26 @@ func newTableOps(s *session) *tOps {
|
||||
var (
|
||||
cacher cache.Cacher
|
||||
bcache *cache.Cache
|
||||
bpool *util.BufferPool
|
||||
)
|
||||
if s.o.GetOpenFilesCacheCapacity() > 0 {
|
||||
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
|
||||
}
|
||||
if !s.o.DisableBlockCache {
|
||||
if !s.o.GetDisableBlockCache() {
|
||||
var bcacher cache.Cacher
|
||||
if s.o.GetBlockCacheCapacity() > 0 {
|
||||
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
|
||||
}
|
||||
bcache = cache.NewCache(bcacher)
|
||||
}
|
||||
if !s.o.GetDisableBufferPool() {
|
||||
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
|
||||
}
|
||||
return &tOps{
|
||||
s: s,
|
||||
cache: cache.NewCache(cacher),
|
||||
bcache: bcache,
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
bpool: bpool,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@@ -14,7 +14,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/gosnappy/snappy"
|
||||
"github.com/google/go-snappy/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syndtr/gosnappy/snappy"
|
||||
"github.com/google/go-snappy/snappy"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
|
||||
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
@@ -136,9 +136,8 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
if !tseek {
|
||||
if tset == nil {
|
||||
tset = &tSet{level, t}
|
||||
} else if tset.table.consumeSeek() <= 0 {
|
||||
} else {
|
||||
tseek = true
|
||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,6 +202,28 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
|
||||
return true
|
||||
})
|
||||
|
||||
if tseek && tset.table.consumeSeek() <= 0 {
|
||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
|
||||
var tset *tSet
|
||||
|
||||
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
|
||||
if tset == nil {
|
||||
tset = &tSet{level, t}
|
||||
return true
|
||||
} else {
|
||||
if tset.table.consumeSeek() <= 0 {
|
||||
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
|
||||
}
|
||||
return false
|
||||
}
|
||||
}, nil)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -279,7 +300,7 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) pickLevel(umin, umax []byte) (level int) {
|
||||
func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
|
||||
if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
|
||||
var overlaps tFiles
|
||||
maxLevel := v.s.o.GetMaxMemCompationLevel()
|
||||
|
||||
2
NICKS
2
NICKS
@@ -25,6 +25,7 @@ dzarda <dzardacz@gmail.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
@@ -55,4 +56,5 @@ tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
wsgcsysadmin <e.meitner@willystreet.coo>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
|
||||
@@ -36,8 +36,7 @@ const (
|
||||
func Assets() map[string][]byte {
|
||||
var assets = make(map[string][]byte, {{.Assets | len}})
|
||||
{{range $asset := .Assets}}
|
||||
assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
|
||||
{{end}}
|
||||
assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}"){{end}}
|
||||
return assets
|
||||
}
|
||||
|
||||
|
||||
@@ -49,10 +49,10 @@ var (
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut = sync.NewMutex()
|
||||
startTime = time.Now()
|
||||
eventSub *events.BufferedSubscription
|
||||
)
|
||||
|
||||
type apiSvc struct {
|
||||
id protocol.DeviceID
|
||||
cfg config.GUIConfiguration
|
||||
assetDir string
|
||||
model *model.Model
|
||||
@@ -60,14 +60,17 @@ type apiSvc struct {
|
||||
fss *folderSummarySvc
|
||||
stop chan struct{}
|
||||
systemConfigMut sync.Mutex
|
||||
eventSub *events.BufferedSubscription
|
||||
}
|
||||
|
||||
func newAPISvc(cfg config.GUIConfiguration, assetDir string, m *model.Model) (*apiSvc, error) {
|
||||
func newAPISvc(id protocol.DeviceID, cfg config.GUIConfiguration, assetDir string, m *model.Model, eventSub *events.BufferedSubscription) (*apiSvc, error) {
|
||||
svc := &apiSvc{
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
assetDir: assetDir,
|
||||
model: m,
|
||||
systemConfigMut: sync.NewMutex(),
|
||||
eventSub: eventSub,
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -125,9 +128,6 @@ func (s *apiSvc) Serve() {
|
||||
s.stop = make(chan struct{})
|
||||
|
||||
l.AddHandler(logger.LevelWarn, s.showGuiError)
|
||||
sub := events.Default.Subscribe(events.AllEvents)
|
||||
eventSub = events.NewBufferedSubscription(sub, 1000)
|
||||
defer events.Default.Unsubscribe(sub)
|
||||
|
||||
// The GET handlers
|
||||
getRestMux := http.NewServeMux()
|
||||
@@ -190,14 +190,14 @@ func (s *apiSvc) Serve() {
|
||||
|
||||
// Wrap everything in CSRF protection. The /rest prefix should be
|
||||
// protected, other requests will grant cookies.
|
||||
handler := csrfMiddleware("/rest", s.cfg.APIKey, mux)
|
||||
handler := csrfMiddleware(s.id.String()[:5], "/rest", s.cfg.APIKey, mux)
|
||||
|
||||
// Add our version as a header to responses
|
||||
handler = withVersionMiddleware(handler)
|
||||
// Add our version and ID as a header to responses
|
||||
handler = withDetailsMiddleware(s.id, handler)
|
||||
|
||||
// Wrap everything in basic auth, if user/password is set.
|
||||
if len(s.cfg.User) > 0 && len(s.cfg.Password) > 0 {
|
||||
handler = basicAuthAndSessionMiddleware(s.cfg, handler)
|
||||
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], s.cfg, handler)
|
||||
}
|
||||
|
||||
// Redirect to HTTPS if we are supposed to
|
||||
@@ -336,9 +336,10 @@ func noCacheMiddleware(h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func withVersionMiddleware(h http.Handler) http.Handler {
|
||||
func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Syncthing-Version", Version)
|
||||
w.Header().Set("X-Syncthing-ID", id.String())
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
@@ -427,7 +428,10 @@ func folderSummary(m *model.Model, folder string) map[string]interface{} {
|
||||
res["error"] = err.Error()
|
||||
}
|
||||
|
||||
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
|
||||
lv, _ := m.CurrentLocalVersion(folder)
|
||||
rv, _ := m.RemoteLocalVersion(folder)
|
||||
|
||||
res["version"] = lv + rv
|
||||
|
||||
ignorePatterns, _, _ := m.GetIgnores(folder)
|
||||
res["ignorePatterns"] = false
|
||||
@@ -572,21 +576,26 @@ func (s *apiSvc) postSystemRestart(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *apiSvc) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
var err error
|
||||
if len(folder) == 0 {
|
||||
err = resetDB()
|
||||
} else {
|
||||
err = s.model.ResetFolder(folder)
|
||||
}
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
|
||||
if len(folder) > 0 {
|
||||
if _, ok := cfg.Folders()[folder]; !ok {
|
||||
http.Error(w, "Invalid folder ID", 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(folder) == 0 {
|
||||
// Reset all folders.
|
||||
for folder := range cfg.Folders() {
|
||||
s.model.ResetFolder(folder)
|
||||
}
|
||||
s.flushResponse(`{"ok": "resetting database"}`, w)
|
||||
} else {
|
||||
s.flushResponse(`{"ok": "resetting folder " + folder}`, w)
|
||||
// Reset a specific folder, assuming it's supposed to exist.
|
||||
s.model.ResetFolder(folder)
|
||||
s.flushResponse(`{"ok": "resetting folder `+folder+`"}`, w)
|
||||
}
|
||||
|
||||
go restart()
|
||||
}
|
||||
|
||||
@@ -743,7 +752,7 @@ func (s *apiSvc) getEvents(w http.ResponseWriter, r *http.Request) {
|
||||
f := w.(http.Flusher)
|
||||
f.Flush()
|
||||
|
||||
evs := eventSub.Since(since, nil)
|
||||
evs := s.eventSub.Since(since, nil)
|
||||
if 0 < limit && limit < len(evs) {
|
||||
evs = evs[len(evs)-limit:]
|
||||
}
|
||||
|
||||
@@ -24,14 +24,15 @@ var (
|
||||
sessionsMut = sync.NewMutex()
|
||||
)
|
||||
|
||||
func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if cfg.APIKey != "" && r.Header.Get("X-API-Key") == cfg.APIKey {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
cookie, err := r.Cookie("sessionid")
|
||||
cookie, err := r.Cookie(cookieName)
|
||||
if err == nil && cookie != nil {
|
||||
sessionsMut.Lock()
|
||||
_, ok := sessions[cookie.Value]
|
||||
@@ -86,7 +87,7 @@ func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handle
|
||||
sessions[sessionid] = true
|
||||
sessionsMut.Unlock()
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "sessionid",
|
||||
Name: cookieName,
|
||||
Value: sessionid,
|
||||
MaxAge: 0,
|
||||
})
|
||||
|
||||
@@ -24,7 +24,7 @@ var csrfMut = sync.NewMutex()
|
||||
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
|
||||
// the request with 403. For / and /index.html, set a new CSRF cookie if none
|
||||
// is currently set.
|
||||
func csrfMiddleware(prefix, apiKey string, next http.Handler) http.Handler {
|
||||
func csrfMiddleware(unique, prefix, apiKey string, next http.Handler) http.Handler {
|
||||
loadCsrfTokens()
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Allow requests carrying a valid API key
|
||||
@@ -35,10 +35,10 @@ func csrfMiddleware(prefix, apiKey string, next http.Handler) http.Handler {
|
||||
|
||||
// Allow requests for the front page, and set a CSRF cookie if there isn't already a valid one.
|
||||
if !strings.HasPrefix(r.URL.Path, prefix) {
|
||||
cookie, err := r.Cookie("CSRF-Token")
|
||||
cookie, err := r.Cookie("CSRF-Token-" + unique)
|
||||
if err != nil || !validCsrfToken(cookie.Value) {
|
||||
cookie = &http.Cookie{
|
||||
Name: "CSRF-Token",
|
||||
Name: "CSRF-Token-" + unique,
|
||||
Value: newCsrfToken(),
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
@@ -54,7 +54,7 @@ func csrfMiddleware(prefix, apiKey string, next http.Handler) http.Handler {
|
||||
}
|
||||
|
||||
// Verify the CSRF token
|
||||
token := r.Header.Get("X-CSRF-Token")
|
||||
token := r.Header.Get("X-CSRF-Token-" + unique)
|
||||
if !validCsrfToken(token) {
|
||||
http.Error(w, "CSRF Error", 403)
|
||||
return
|
||||
|
||||
@@ -31,6 +31,7 @@ const (
|
||||
locCsrfTokens = "csrfTokens"
|
||||
locPanicLog = "panicLog"
|
||||
locAuditLog = "auditLog"
|
||||
locGUIAssets = "GUIAssets"
|
||||
locDefFolder = "defFolder"
|
||||
)
|
||||
|
||||
@@ -52,6 +53,7 @@ var locations = map[locationEnum]string{
|
||||
locCsrfTokens: "${config}/csrftokens.txt",
|
||||
locPanicLog: "${config}/panic-${timestamp}.log",
|
||||
locAuditLog: "${config}/audit-${timestamp}.log",
|
||||
locGUIAssets: "${config}/gui",
|
||||
locDefFolder: "${home}/Sync",
|
||||
}
|
||||
|
||||
|
||||
@@ -252,6 +252,10 @@ func main() {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
|
||||
if guiAssets == "" {
|
||||
guiAssets = locations[locGUIAssets]
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if logFile == "" {
|
||||
// Use the default log file location
|
||||
@@ -443,12 +447,13 @@ func syncthingMain() {
|
||||
mainSvc.Add(newVerboseSvc())
|
||||
}
|
||||
|
||||
// Event subscription for the API; must start early to catch the early events.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
events.Default.Log(events.Starting, map[string]string{"home": baseDirs["config"]})
|
||||
|
||||
// Ensure that that we have a certificate and key.
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
if err != nil {
|
||||
@@ -468,6 +473,13 @@ func syncthingMain() {
|
||||
l.Infoln(LongVersion)
|
||||
l.Infoln("My ID:", myID)
|
||||
|
||||
// Emit the Starting event, now that we know who we are.
|
||||
|
||||
events.Default.Log(events.Starting, map[string]string{
|
||||
"home": baseDirs["config"],
|
||||
"myID": myID.String(),
|
||||
})
|
||||
|
||||
// Prepare to be able to save configuration
|
||||
|
||||
cfgFile := locations[locConfigFile]
|
||||
@@ -553,6 +565,9 @@ func syncthingMain() {
|
||||
symlinks.Supported = false
|
||||
}
|
||||
|
||||
protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
|
||||
protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second
|
||||
|
||||
if opts.MaxSendKbps > 0 {
|
||||
writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
|
||||
}
|
||||
@@ -589,7 +604,6 @@ func syncthingMain() {
|
||||
|
||||
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
|
||||
cfg.Subscribe(m)
|
||||
mainSvc.Add(m)
|
||||
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
@@ -622,9 +636,11 @@ func syncthingMain() {
|
||||
}
|
||||
}
|
||||
|
||||
mainSvc.Add(m)
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainSvc, cfg, m)
|
||||
setupGUI(mainSvc, cfg, m, apiSub)
|
||||
|
||||
// The default port we announce, possibly modified by setupUPnP next.
|
||||
|
||||
@@ -700,7 +716,9 @@ func syncthingMain() {
|
||||
}
|
||||
}
|
||||
|
||||
events.Default.Log(events.StartupComplete, nil)
|
||||
events.Default.Log(events.StartupComplete, map[string]string{
|
||||
"myID": myID.String(),
|
||||
})
|
||||
go generatePingEvents()
|
||||
|
||||
cleanConfigDirectory()
|
||||
@@ -764,7 +782,7 @@ func startAuditing(mainSvc *suture.Supervisor) {
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
}
|
||||
|
||||
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model) {
|
||||
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription) {
|
||||
opts := cfg.Options()
|
||||
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
|
||||
|
||||
@@ -793,7 +811,7 @@ func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model) {
|
||||
|
||||
urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
|
||||
l.Infoln("Starting web GUI on", urlShow)
|
||||
api, err := newAPISvc(guiCfg, guiAssets, m)
|
||||
api, err := newAPISvc(myID, guiCfg, guiAssets, m, apiSub)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Няма логика на зададен отрицателен брой дни.",
|
||||
"A new major version may not be compatible with previous versions.": "Нова основна версия, която може да не е съвмеситима с предишни версии.",
|
||||
"API Key": "API Ключ",
|
||||
"About": "За Програмата",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Бъгове",
|
||||
"CPU Utilization": "Натоварване на Процесора",
|
||||
"Changelog": "Сипъск с промени",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Изчисти след",
|
||||
"Close": "Затвори",
|
||||
"Command": "Команда",
|
||||
"Comment, when used at the start of a line": "Коментар, използван в началото на реда",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Копиран от оригинала",
|
||||
"Copyright © 2015 the following Contributors:": "Правата запазени © 2015 Сътрудници:",
|
||||
"Delete": "Изтрий",
|
||||
"Deleted": "Изтрито",
|
||||
"Device ID": "Идентификатор на устройство",
|
||||
"Device Identification": "Идентификация на устройство",
|
||||
"Device Name": "Име на устройство",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "По ред на дърпане",
|
||||
"File Versioning": "Файлови Версии",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Битовете за права за достъп са игнорирани, когато се проверява за промени. Използвай с файлови системи тип FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Файловете биват преместени в .stversions папка, когато са заменен или изтрити от Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Когато syncthing замени или изтрие файл той се премества в .stversions и преименува с дабавени дата и час.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файловете са защитени от промени направени на други устройства, но промени направени на това устройство ще бъдат синхронизирани с другите устройства.",
|
||||
"Folder ID": "Идентификатор на папка",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "По-късно",
|
||||
"Local Discovery": "Локално Откриване",
|
||||
"Local State": "Локално състояние",
|
||||
"Local State (Total)": "Локално Състояние (Общо)",
|
||||
"Major Upgrade": "Основно Обновяване",
|
||||
"Maximum Age": "Максимална Възраст",
|
||||
"Metadata Only": "Само мета информация",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Използва се следния интервал: за първия час се пази версия всеки 30 секунди, за първия ден се пази версия всеки час, за първите 30 дена се пази версия всеки ден, до максимума се пази една версия всяка седмица.",
|
||||
"The maximum age must be a number and cannot be blank.": "Максималната възраст трябва да е число и не може д ае празна.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максималното време да се пазят весрсии (в дни, сложи 0, за да пазиш версии завинаги).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Броят дни трябва да бъде число и неможе да бъде празно.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Броят дни за запазване на файловете в кошчето. Нула значи завинаги.",
|
||||
"The number of old versions to keep, per file.": "Броят стари версии, които да бъдат пазени за всеки файл.",
|
||||
"The number of versions must be a number and cannot be blank.": "Броят версии трябва да бъде число и не може да бъде празно.",
|
||||
"The path cannot be blank.": "Пътят неможе да бъде празен.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервала на сканиране трябва да бъде не отрицателно число в секунди.",
|
||||
"This is a major version upgrade.": "Това е нова основна версия.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Версии на файлове в кошчето",
|
||||
"Unknown": "Неясен",
|
||||
"Unshared": "Споделянето прекратено",
|
||||
"Unused": "Неизползван",
|
||||
"Up to Date": "Актуален",
|
||||
"Updated": "Обновено",
|
||||
"Upgrade": "Обнови",
|
||||
"Upgrade To {%version%}": "Обновен До {{version}}",
|
||||
"Upgrading": "Обновяване",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiat de l'original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents col·laboradors:",
|
||||
"Delete": "Esborrar",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "ID del dispositiu",
|
||||
"Device Identification": "Identificació del dispositiu",
|
||||
"Device Name": "Nom del dispositiu",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Després",
|
||||
"Local Discovery": "Descobriment Local",
|
||||
"Local State": "Estat local",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Actualització major",
|
||||
"Maximum Age": "Antiguitat Màxima",
|
||||
"Metadata Only": "Només metadades",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "No compartit",
|
||||
"Unused": "No usat",
|
||||
"Up to Date": "Actualitzat",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Actualització",
|
||||
"Upgrade To {%version%}": "Actualitzar a {{version}}",
|
||||
"Upgrading": "Actualitzant",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Un nombre negatiu de dies no té sentit.",
|
||||
"A new major version may not be compatible with previous versions.": "Una nova versión amb canvis importants pot no ser compatible amb versions prèvies.",
|
||||
"API Key": "Clau API",
|
||||
"About": "Sobre",
|
||||
"Actions": "Actions",
|
||||
"Actions": "Accions",
|
||||
"Add": "Afegir",
|
||||
"Add Device": "Afegir dispositiu",
|
||||
"Add Folder": "Afegir carpeta",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Errors (Bugs)",
|
||||
"CPU Utilization": "Utilització de la CPU",
|
||||
"Changelog": "Registre de canvis",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Netejar després de",
|
||||
"Close": "Tancar",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentar, quant s'utilitza al principi d'una línia",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiat de l'original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents Col·laboradors:",
|
||||
"Delete": "Esborrar",
|
||||
"Deleted": "Esborrat",
|
||||
"Device ID": "ID del dispositiu",
|
||||
"Device Identification": "Identificació del dispositiu",
|
||||
"Device Name": "Nom del dispositiu",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Ordre de fitxers del pull",
|
||||
"File Versioning": "Versionat de fitxer",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Els bits de permís del fitxer són ignorats quant es busquen els canvis. Utilitzar en sistemes de fitxers FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Els arxius es menejen a la carpeta .stversions quant són substituïts o esborrats per Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Els fitxers són canviats a versions amb indicació de data en una carpeta \".stversions\" quant són reemplaçats o esborrats per Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Els fitxers són protegits dels canvis fets en altres dispositius, però els canvis fets en aquest dispositiu seràn enviats a la resta del grup (cluster).",
|
||||
"Folder ID": "ID de carpeta",
|
||||
@@ -66,7 +67,7 @@
|
||||
"Global Discovery": "Descobriment global",
|
||||
"Global Discovery Server": "Servidor de descobriment global",
|
||||
"Global State": "Estat global",
|
||||
"Help": "Help",
|
||||
"Help": "Ajuda",
|
||||
"Ignore": "Ignorar",
|
||||
"Ignore Patterns": "Patrons a ignorar",
|
||||
"Ignore Permissions": "Permisos a ignorar",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Més tard",
|
||||
"Local Discovery": "Descobriment local",
|
||||
"Local State": "Estat local",
|
||||
"Local State (Total)": "Estat Local (Total)",
|
||||
"Major Upgrade": "Actualització important",
|
||||
"Maximum Age": "Edat màxima",
|
||||
"Metadata Only": "Sols metadades",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "S'utilitzen els següents intervals: per a la primera hora es guarda una versió cada 30 segons, per al primer dia es guarda una versió cada hora, per als primers 30 dies es guarda una versió diaria, fins l'edat màxima es guarda una versió cada setmana.",
|
||||
"The maximum age must be a number and cannot be blank.": "L'edat màxima deu ser un nombre i no pot estar buida.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "El temps màxim per a guardar una versió (en dies, ficar 0 per a guardar les versions per a sempre).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "El nombre de dies deu ser un nombre i no pot estar en blanc.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "El nombre de dies per a mantindre els arxius a la paperera. Cero vol dir \"per a sempre\".",
|
||||
"The number of old versions to keep, per file.": "El nombre de versions antigues per a guardar, per cada fitxer.",
|
||||
"The number of versions must be a number and cannot be blank.": "El nombre de versions deu ser un nombre i no pot estar buit.",
|
||||
"The path cannot be blank.": "La ruta no pot estar buida.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "L'interval de reescaneig deu ser un nombre positiu de segons.",
|
||||
"This is a major version upgrade.": "Aquesta és una actualització important de la versió.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Versionat d'arxius de la paperera",
|
||||
"Unknown": "Desconegut",
|
||||
"Unshared": "No compartit",
|
||||
"Unused": "No utilitzat",
|
||||
"Up to Date": "Actualitzat",
|
||||
"Updated": "Actualitzat",
|
||||
"Upgrade": "Actualitzar",
|
||||
"Upgrade To {%version%}": "Actualitzar a {{version}}",
|
||||
"Upgrading": "Actualitzant",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Zkopírováno z originálu",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 následující přispěvatelé:",
|
||||
"Delete": "Smazat",
|
||||
"Deleted": "Smazáno",
|
||||
"Device ID": "ID přístroje",
|
||||
"Device Identification": "Identifikace přístroje",
|
||||
"Device Name": "Jméno přístroje",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Později",
|
||||
"Local Discovery": "Místní oznamování",
|
||||
"Local State": "Místní status",
|
||||
"Local State (Total)": "Místní status (Celkem)",
|
||||
"Major Upgrade": "Důležitá aktualizace",
|
||||
"Maximum Age": "Maximální časový limit",
|
||||
"Metadata Only": "Pouze metadata",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Nesdílený",
|
||||
"Unused": "Nepoužitý",
|
||||
"Up to Date": "Aktuální",
|
||||
"Updated": "Aktualizováno",
|
||||
"Upgrade": "Aktualizace",
|
||||
"Upgrade To {%version%}": "Aktualizovat na {{version}}",
|
||||
"Upgrading": "Aktualizuji",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Eine negative Anzahl von Tagen ergibt keinen Sinn.",
|
||||
"A new major version may not be compatible with previous versions.": "Die neue Hauptversion ist evtl. nicht mit vorherigen Versionen kompatibel.",
|
||||
"API Key": "API-Schlüssel",
|
||||
"About": "Über Syncthing",
|
||||
@@ -12,7 +12,7 @@
|
||||
"Addresses": "Adressen",
|
||||
"All Data": "Alle Daten",
|
||||
"Allow Anonymous Usage Reporting?": "Übertragung von anonymen Nutzungsberichten erlauben?",
|
||||
"Alphabetic": "alphabetisch",
|
||||
"Alphabetic": "Alphabetisch",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Ordner entfernen.",
|
||||
"Anonymous Usage Reporting": "Anonymer Nutzungsbericht",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Alle Geräte, die beim Verteiler eingetragen sind, werden auch bei diesem Gerät eingetragen",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Fehler",
|
||||
"CPU Utilization": "Prozessorauslastung",
|
||||
"Changelog": "Änderungsprotokoll",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Aufräumen nach",
|
||||
"Close": "Schließen",
|
||||
"Command": "Kommando",
|
||||
"Comment, when used at the start of a line": "Kommentar, wenn am Anfang der Zeile benutzt.",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Vom Original kopiert",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 die folgenden Unterstützer:",
|
||||
"Delete": "Löschen",
|
||||
"Deleted": "gelöscht",
|
||||
"Device ID": "Geräte ID",
|
||||
"Device Identification": "Gerät Identifikation",
|
||||
"Device Name": "Gerätename",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Dateiübertragungsreihenfolge",
|
||||
"File Versioning": "Dateiversionierung",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen zu verwenden.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner namens .stversions verschoben.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner namens .stversions verschoben.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind vor Veränderung durch andere Geräte geschützt. Auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
|
||||
"Folder ID": "Verzeichnis ID",
|
||||
@@ -63,23 +64,24 @@
|
||||
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche",
|
||||
"GUI Listen Addresses": "Adresse(n) für die Benutzeroberfläche",
|
||||
"Generate": "Generieren",
|
||||
"Global Discovery": "verfügbare Indexserver",
|
||||
"Global Discovery": "Globale Gerätesuche",
|
||||
"Global Discovery Server": "Globale(r) Indexserver",
|
||||
"Global State": "Globaler Status",
|
||||
"Help": "Hilfe",
|
||||
"Ignore": "Ignorieren",
|
||||
"Ignore Patterns": "Ignoriermuster",
|
||||
"Ignore Permissions": "Berechtigungen ignorieren",
|
||||
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KiB/s)",
|
||||
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KB/s)",
|
||||
"Introducer": "Verteilergerät",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (z.B. schließe nicht aus)",
|
||||
"Keep Versions": "Versionen erhalten",
|
||||
"Largest First": "Größtes zuerst",
|
||||
"Last File Received": "Letzte Datei empfangen",
|
||||
"Largest First": "Größte zuerst",
|
||||
"Last File Received": "Letzte empfangene Datei ",
|
||||
"Last seen": "Zuletzt online",
|
||||
"Later": "Später",
|
||||
"Local Discovery": "Client lokal freigeben",
|
||||
"Local Discovery": "Lokale Gerätesuche",
|
||||
"Local State": "Lokaler Status",
|
||||
"Local State (Total)": "Lokaler Status (total)",
|
||||
"Major Upgrade": "Hauptversionsupgrade",
|
||||
"Maximum Age": "Höchstalter",
|
||||
"Metadata Only": "Nur Metadaten",
|
||||
@@ -88,16 +90,16 @@
|
||||
"Never": "Nie",
|
||||
"New Device": "Neues Gerät",
|
||||
"New Folder": "Neues Verzeichnis",
|
||||
"Newest First": "Neuestes zuerst",
|
||||
"Newest First": "Neueste zuerst",
|
||||
"No": "Nein",
|
||||
"No File Versioning": "Keine Dateiversionierung",
|
||||
"Notice": "Hinweis",
|
||||
"OK": "OK",
|
||||
"Off": "Aus",
|
||||
"Oldest First": "Ältestes zuerst",
|
||||
"Oldest First": "Älteste zuerst",
|
||||
"Out Of Sync": "Nicht synchronisiert",
|
||||
"Out of Sync Items": "Nicht synchronisierte Objekte",
|
||||
"Outgoing Rate Limit (KiB/s)": "Ausgehendes Datenratelimit (KiB/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limit Datenrate (ausgehend) (KB/s)",
|
||||
"Override Changes": "Änderungen überschreiben",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Verzeichnis auf dem lokalen Rechner. Wird erzeugt, wenn es nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem die Versionen gespeichert werden sollen (ohne Angabe wird das Verzeichnis .stversions im Verzeichnis verwendet).",
|
||||
@@ -163,8 +165,8 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es wird in folgenden Abständen versioniert: in der ersten Stunde wird alle 30 Sekunden eine Version behalten, am ersten Tag eine jede Stunde, in den ersten 30 Tagen eine jeden Tag, danach wird bis zum Höchstalter eine Version pro Woche beibehalten.",
|
||||
"The maximum age must be a number and cannot be blank.": "Das Höchstalter muss angegeben werden und eine Zahl sein.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Die längste Zeit, die alte Versionen vorgehalten werden (in Tagen, 0 bedeutet, alte Versionen für immer zu behalten).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Dauer in Tagen für welche die Dateien aufgehoben werden sollen. 0 bedeutet für immer.",
|
||||
"The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei gespeichert werden sollen.",
|
||||
"The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
|
||||
"The path cannot be blank.": "Der Pfad darf nicht leer sein.",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Ungeteilt",
|
||||
"Unused": "Ungenutzt",
|
||||
"Up to Date": "Aktuell",
|
||||
"Updated": "aktualisiert",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Update auf {{version}}",
|
||||
"Upgrading": "Wird aktualisiert",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Αριθμός ημερών με αρνητικό πρόσημο, δε βγάζει νόημα.",
|
||||
"A new major version may not be compatible with previous versions.": "Μια νέα σημαντική έκδοση μπορεί να μην είναι συμβατή με τις προηγούμενες εκδόσεις.",
|
||||
"API Key": "Κλειδί API",
|
||||
"About": "Σχετικά με το Syncthing",
|
||||
"Actions": "Actions",
|
||||
"Actions": "Ενέργειες",
|
||||
"Add": "Προσθήκη",
|
||||
"Add Device": "Προσθήκη συσκευής",
|
||||
"Add Folder": "Προσθήκη φακέλου",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Bugs",
|
||||
"CPU Utilization": "Επιβάρυνση του επεξεργαστή",
|
||||
"Changelog": "Πληροφορίες εκδόσεων",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Μετά από αυτό, εκκαθάρισε",
|
||||
"Close": "Τέλος",
|
||||
"Command": "Εντολή",
|
||||
"Comment, when used at the start of a line": "Σχόλιο, όταν χρησιμοποιείται στην αρχή μιας γραμμής",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Έχει αντιγραφεί από το πρωτότυπο",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 από τους παρακάτω συνεισφορείς:",
|
||||
"Delete": "Διαγραφή",
|
||||
"Deleted": "Διαγραμμένα",
|
||||
"Device ID": "Ταυτότητα συσκευής",
|
||||
"Device Identification": "Ταυτότητα συσκευής",
|
||||
"Device Name": "Όνομα συσκευής",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Σειρά με την οποία θα κατεβαίνουν τα αρχεία",
|
||||
"File Versioning": "Τήρηση εκδόσεων",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Τα δικαιώματα των αρχείων θα αγνοούνται όταν κοιτάζω για αλλαγές. Αφορά συστήματα αρχείων FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Τα αρχεία μετακινούνται στον φάκελο .stversions, όταν αντικαθίστανται ή διαγράφονται από το Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Τα αρχεία που σβήνονται ή αντικαθιστούνται από το Syncthing μετακινούνται σε έναν φάκελο .stversions με χρονοσφραγίδα.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Τα αρχεία προστατεύονται από αλλαγές που γίνονται σε άλλες συσκευές, αλλά όποιες αλλαγές γίνουν σε αυτή τη συσκευή θα αποσταλούν σε όλη τη συστάδα συσκευών.",
|
||||
"Folder ID": "Ταυτότητα φακέλου",
|
||||
@@ -66,7 +67,7 @@
|
||||
"Global Discovery": "Καθολική ανεύρεση",
|
||||
"Global Discovery Server": "Διακομιστής καθολικής ανεύρεσης κόμβου",
|
||||
"Global State": "Καθολική κατάσταση",
|
||||
"Help": "Help",
|
||||
"Help": "Βοήθεια",
|
||||
"Ignore": "Αγνόησε",
|
||||
"Ignore Patterns": "Πρότυπο για αγνόηση",
|
||||
"Ignore Permissions": "Αγνόησε τα δικαιώματα",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Αργότερα",
|
||||
"Local Discovery": "Τοπική ανεύρεση",
|
||||
"Local State": "Τοπική κατάσταση",
|
||||
"Local State (Total)": "Τοπική κατάσταση (συνολικά)",
|
||||
"Major Upgrade": "Σημαντική αναβάθμιση",
|
||||
"Maximum Age": "Μέγιστη ηλικία",
|
||||
"Metadata Only": "Μόνο μεταδεδομένα",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Θα χρησιμοποιούνται τα εξής διαστήματα: Την πρώτη ώρα θα τηρείται μια έκδοση κάθε 30 δευτερόλεπτα. Την πρώτη ημέρα, μια έκδοση κάθε μια ώρα. Τις πρώτες 30 ημέρες, μία έκδοση κάθε ημέρα. Από εκεί και έπειτα μέχρι τη μέγιστη ηλικία, θα τηρείται μια έκδοση κάθε εβδομάδα.",
|
||||
"The maximum age must be a number and cannot be blank.": "Η μέγιστη ηλικία πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Η μέγιστη ηλικία παλιότερων εκδόσεων (σε ημέρες, αν δώσεις 0 οι παλιότερες εκδόσεις θα διατηρούνται για πάντα).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Ο αριθμός ημερών πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Ο αριθμός ημερών για τήρηση αρχείων στον Κάδο. Αριθμός μηδέν σημαίνει τήρηση για πάντα.",
|
||||
"The number of old versions to keep, per file.": "Πόσες παλιότερες εκδόσεις θα διατηρούνται, ανά αρχείο.",
|
||||
"The number of versions must be a number and cannot be blank.": "Ο αριθμός εκδόσεων πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
|
||||
"The path cannot be blank.": "Το μονοπάτι δεν μπορεί να είναι κενό.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Ο χρόνος επανελέγχου για αλλαγές είναι σε δευτερόλεπτα (δηλ. θετικός αριθμός).",
|
||||
"This is a major version upgrade.": "Αυτή είναι μιας σημαντική αναβάθμιση.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Ο Κάδος μπορεί να τηρεί εκδόσεις",
|
||||
"Unknown": "Άγνωστο",
|
||||
"Unshared": "Δε μοιράζεται",
|
||||
"Unused": "Δε χρησιμοποιείται",
|
||||
"Up to Date": "Ενημερωμένος",
|
||||
"Updated": "Ενημερωμένο",
|
||||
"Upgrade": "Αναβάθμιση",
|
||||
"Upgrade To {%version%}": "Αναβάθμιση στην έκδοση {{version}}",
|
||||
"Upgrading": "Αναβάθμιση",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Delete": "Delete",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Device ID",
|
||||
"Device Identification": "Device Identification",
|
||||
"Device Name": "Device Name",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Later",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maximum Age",
|
||||
"Metadata Only": "Metadata Only",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Up to Date",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Upgrade to {{version}}",
|
||||
"Upgrading": "Upgrading",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Delete": "Delete",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Device ID",
|
||||
"Device Identification": "Device Identification",
|
||||
"Device Name": "Device Name",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Later",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maximum Age",
|
||||
"Metadata Only": "Metadata Only",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Up to Date",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Upgrade To {{version}}",
|
||||
"Upgrading": "Upgrading",
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Un número negativo de días no tiene sentido.",
|
||||
"A new major version may not be compatible with previous versions.": "Una nueva versión con cambios importantes puede no ser compatible con versiones anteriores.",
|
||||
"API Key": "Clave del API",
|
||||
"About": "Acerca de",
|
||||
"Actions": "Actions",
|
||||
"Actions": "Acciones",
|
||||
"Add": "Añadir",
|
||||
"Add Device": "Añadir dispositivo",
|
||||
"Add Folder": "Añadir repositorio",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Errores (bugs)",
|
||||
"CPU Utilization": "Uso de CPU",
|
||||
"Changelog": "Informe de cambios",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Limpiar tras",
|
||||
"Close": "Cerrar",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentar, cuando se usa al comienzo de una línea",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiado del original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 los siguientes Colaboradores:",
|
||||
"Delete": "Borrar",
|
||||
"Deleted": "Borrado",
|
||||
"Device ID": "ID del dispositivo",
|
||||
"Device Identification": "Identificación del dispositivo",
|
||||
"Device Name": "Nombre del dispositivo",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Orden de ficheros del pull",
|
||||
"File Versioning": "Versionado de ficheros",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Los bits de permiso de ficheros son ignorados cuando se buscan cambios. Utilizar en sistemas de ficheros FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Los archivos serán movidos a la carpeta .stversions cuando sean reemplazados o borrados por Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Los ficheros son cambiados a versiones con indicación de fecha en una carpeta \".stversions\" cuando son reemplazados o borrados por Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Los ficheros son protegidos por los cambios hechos en otros dispositivos, pero los cambios hechos en este dispositivo serán enviados al resto del grupo (cluster).",
|
||||
"Folder ID": "ID de carpeta",
|
||||
@@ -66,7 +67,7 @@
|
||||
"Global Discovery": "Descubrimiento global",
|
||||
"Global Discovery Server": "Servidor de descubrimiento global",
|
||||
"Global State": "Estado global",
|
||||
"Help": "Help",
|
||||
"Help": "Ayuda",
|
||||
"Ignore": "Ignorar",
|
||||
"Ignore Patterns": "Patrones a ignorar",
|
||||
"Ignore Permissions": "Permisos a ignorar",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Más tarde",
|
||||
"Local Discovery": "Descubrimiento local",
|
||||
"Local State": "Estado local",
|
||||
"Local State (Total)": "Estado Local (Total)",
|
||||
"Major Upgrade": "Actualización importante",
|
||||
"Maximum Age": "Edad máxima",
|
||||
"Metadata Only": "Sólo metadatos",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Se utilizan los siguientes intervalos: para la primera hora se mantiene una versión cada 30 segundos, para el primer día se mantiene una versión cada hora, para los primeros 30 días se mantiene una versión diaria hasta la edad máxima de una semana.",
|
||||
"The maximum age must be a number and cannot be blank.": "La edad máxima debe ser un número y no puede estar vacía.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "El tiempo máximo para mantener una versión en días (introducir 0 para mantener las versiones indefinidamente).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "El número de días debe ser un número y no puede estar en blanco.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "El número de días para mantener los archivos en la papelera. Cero significa \"para siempre\".",
|
||||
"The number of old versions to keep, per file.": "El número de versiones a antiguas a mantener para cada fichero.",
|
||||
"The number of versions must be a number and cannot be blank.": "El número de versiones debe ser un número y no puede estar vacío.",
|
||||
"The path cannot be blank.": "La ruta no puede estar vacía.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "El intervalo de actualización debe ser un número positivo de segundos.",
|
||||
"This is a major version upgrade.": "Hay una actualización importante.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Versionado de archivos de la papelera",
|
||||
"Unknown": "Desconocido",
|
||||
"Unshared": "No compartido",
|
||||
"Unused": "No usado",
|
||||
"Up to Date": "Actualizado",
|
||||
"Updated": "Actualizado",
|
||||
"Upgrade": "Actualizar",
|
||||
"Upgrade To {%version%}": "Actualizar a {{version}}",
|
||||
"Upgrading": "Actualizando",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Un número negativo no tiene sentido",
|
||||
"A new major version may not be compatible with previous versions.": "Una versión mayor nueva puede ser incompatible con versiones anteriores.",
|
||||
"API Key": "Clave API",
|
||||
"About": "Acerca de",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Errores",
|
||||
"CPU Utilization": "Uso de CPU",
|
||||
"Changelog": "Registro de cambios",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Limpiar después",
|
||||
"Close": "Cerrar",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentario, cuando es utilizado al inicio de una línea.",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiado del original",
|
||||
"Copyright © 2015 the following Contributors:": "Derechos de autor © 2015 los siguientes colaboradores:",
|
||||
"Delete": "Suprimir",
|
||||
"Deleted": "Suprimido",
|
||||
"Device ID": "ID del dispositivo",
|
||||
"Device Identification": "Identificación del dispositivo",
|
||||
"Device Name": "Nombre del dispositivo",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Más tarde",
|
||||
"Local Discovery": "Búsqueda en red local",
|
||||
"Local State": "Estado local",
|
||||
"Local State (Total)": "Estado local (total)",
|
||||
"Major Upgrade": "Actualización mayor",
|
||||
"Maximum Age": "Edad máxima",
|
||||
"Metadata Only": "Sólo metadatos",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Los siguientes intervalos se utilizan: para la primera hora una versión se mantiene cada 30 segundos, para el primer día de una versión se mantiene cada hora, durante los primeros 30 días de la versión se mantiene todos los días, hasta que la edad máxima de una versión se mantiene cada semana.",
|
||||
"The maximum age must be a number and cannot be blank.": "La edad máxima debe ser un número y no puede estar en blanco.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "El tiempo máximo para mantener una versión (en días, establece en 0 para mantener versiones para siempre).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "El número de días debe ser un número y no puede estar vacío.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "El tiempo máximo para mantener un archivo en el cubo de basura (en días, establece en 0 para mantener versiones para siempre).",
|
||||
"The number of old versions to keep, per file.": "El numero de versiones anteriores a conservar, por archivo.",
|
||||
"The number of versions must be a number and cannot be blank.": "El número de versiones debe ser un número y no puede estar vacío.",
|
||||
"The path cannot be blank.": "La ruta no puede estar vacía.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "El intervalo de reescaneo debe ser un número no negativo de segundos.",
|
||||
"This is a major version upgrade.": "Esta es una actualización de version mayor.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Versiones como cubo de basura",
|
||||
"Unknown": "Desconocido",
|
||||
"Unshared": "No compartido",
|
||||
"Unused": "No utilizado",
|
||||
"Up to Date": "Actualizado",
|
||||
"Updated": "Actualizado",
|
||||
"Upgrade": "Actualizar",
|
||||
"Upgrade To {%version%}": "Actualizar a {{version}}",
|
||||
"Upgrading": "Actualizando",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Kopioitu alkuperäisestä lähteestä",
|
||||
"Copyright © 2015 the following Contributors:": "Tekijänoikeus © 2015 seuraavat avustajat:",
|
||||
"Delete": "Poista",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Laitteen ID",
|
||||
"Device Identification": "Laitteen tunniste",
|
||||
"Device Name": "Laitteen nimi",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Myöhemmin",
|
||||
"Local Discovery": "Paikallinen etsintä",
|
||||
"Local State": "Paikallinen tila",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maksimi-ikä",
|
||||
"Metadata Only": "Vain metadata",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Jakamaton",
|
||||
"Unused": "Käyttämätön",
|
||||
"Up to Date": "Ajan tasalla",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Päivitä versioon {{version}}",
|
||||
"Upgrading": "Päivitetään",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Un nombre négatif de jours n'a pas de sens.",
|
||||
"A new major version may not be compatible with previous versions.": "Une nouvelle version majeure peut présenter des incompatibilités avec les versions antérieures.",
|
||||
"API Key": "Clé API",
|
||||
"About": "À propos",
|
||||
@@ -19,8 +19,8 @@
|
||||
"Automatic upgrades": "Mises à jour automatiques",
|
||||
"Bugs": "Bugs",
|
||||
"CPU Utilization": "Utilisation du CPU",
|
||||
"Changelog": "Nouveautés",
|
||||
"Clean out after": "Clean out after",
|
||||
"Changelog": "Historique des changements",
|
||||
"Clean out after": "Nettoyer après",
|
||||
"Close": "Fermer",
|
||||
"Command": "Commande",
|
||||
"Comment, when used at the start of a line": "Commentaire, lorsque utilisé en début de ligne",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copié de l'original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 Les contributeurs suivants:",
|
||||
"Delete": "Supprimer",
|
||||
"Deleted": "Supprimé",
|
||||
"Device ID": "ID du périphérique",
|
||||
"Device Identification": "Identification de l'appareil",
|
||||
"Device Name": "Nom du périphérique",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Ordre d'envoi de fichier",
|
||||
"File Versioning": "Versions de fichier",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Les bits de permission de fichier sont ignorés lors de la recherche de changements. Utilisé sur les systèmes de fichiers FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Les fichiers sont déplacés vers le dossier .stversions quand ils sont remplacés ou effacés par Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Les fichiers sont déplacés, avec horodatage, dans un dossier .stversions quand ils sont remplacés ou supprimés par Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Les fichiers sont protégés des changements réalisés sur les autres appareils, mais les changements réalisés sur cet appareil seront transférés au reste du groupe.",
|
||||
"Folder ID": "ID du répertoire",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Plus tard",
|
||||
"Local Discovery": "Recherche locale",
|
||||
"Local State": "État local",
|
||||
"Local State (Total)": "Etat local (Total)",
|
||||
"Major Upgrade": "Mise à jour majeure",
|
||||
"Maximum Age": "Ancienneté maximum",
|
||||
"Metadata Only": "Métadonnées uniquement",
|
||||
@@ -163,23 +165,24 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Les intervalles suivant sont utilisés: la première heure une version est conservée chaque 30 secondes, le premier jour une version est conservée chaque heure, les premiers 30 jours une version est conservée chaque jour, jusqu'à la limite d'âge maximum une version est conservée chaque semaine.",
|
||||
"The maximum age must be a number and cannot be blank.": "L'ancienneté maximum doit être un nombre et ne peut être vide.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Le temps maximum de conservation d'une version (en jours, mettre à 0 pour conserver les versions pour toujours)",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Le nombre de jours doit être numérique et ne peut pas être vide.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Le nombre de jours de conservation des fichiers dans la poubelle. Zéro signifie toujours.",
|
||||
"The number of old versions to keep, per file.": "Le nombre d'anciennes versions à garder, par fichier.",
|
||||
"The number of versions must be a number and cannot be blank.": "Le nombre de versions doit être numérique, et ne peut pas être vide.",
|
||||
"The path cannot be blank.": "Le chemin ne peut pas être vide.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "L'intervalle d'analyse ne doit pas être un nombre négatif de secondes.",
|
||||
"This is a major version upgrade.": "Ceci est une mise à jour majeure",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Gestion des versions de fichier de la poubelle.",
|
||||
"Unknown": "Inconnu",
|
||||
"Unshared": "Non partagé",
|
||||
"Unused": "Non utilisé",
|
||||
"Up to Date": "Synchronisation à jour",
|
||||
"Up to Date": "Synchronisé",
|
||||
"Updated": "Mis à jour",
|
||||
"Upgrade": "Mise à jour",
|
||||
"Upgrade To {%version%}": "Mettre à jour vers {{version}}",
|
||||
"Upgrading": "Mise à jour de Syncthing",
|
||||
"Upload Rate": "Débit d'envoi",
|
||||
"Uptime": "Durée de fonctionnement depuis dernier démarrage",
|
||||
"Uptime": "Durée de fonctionnement",
|
||||
"Use HTTPS for GUI": "Utiliser l'HTTPS pour le GUI",
|
||||
"Version": "Version",
|
||||
"Versions Path": "Emplacement des versions",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Másolva az eredetiről",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 az alábbi Közreműködők",
|
||||
"Delete": "Törlés",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Eszköz azonosító",
|
||||
"Device Identification": "Eszköz azonosító",
|
||||
"Device Name": "Eszköz neve",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Később",
|
||||
"Local Discovery": "Helyi felfedezés",
|
||||
"Local State": "Helyi állapot",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Főverzió frissítés",
|
||||
"Maximum Age": "Maximális kor",
|
||||
"Metadata Only": "Csak metaadatok",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Nincs megosztva",
|
||||
"Unused": "Nincs használatban",
|
||||
"Up to Date": "Friss",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Frissítés",
|
||||
"Upgrade To {%version%}": "Frissítés a {{version}} verzióra",
|
||||
"Upgrading": "Frissítés",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Un numero di giorni negativo non ha alcun senso.",
|
||||
"A new major version may not be compatible with previous versions.": "Una nuova versione principale potrebbe non essere compatibile con le versioni precedenti.",
|
||||
"API Key": "Chiave API",
|
||||
"About": "Informazioni",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Bug",
|
||||
"CPU Utilization": "Utilizzo CPU",
|
||||
"Changelog": "Changelog",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Svuota dopo",
|
||||
"Close": "Chiudi",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Per commentare, va inserito all'inizio di una riga",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiato dall'originale",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 i seguenti Collaboratori:",
|
||||
"Delete": "Elimina",
|
||||
"Deleted": "Cancellato",
|
||||
"Device ID": "ID Dispositivo",
|
||||
"Device Identification": "Identificazione Dispositivo",
|
||||
"Device Name": "Nome Dispositivo",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Ordine di prelievo dei file",
|
||||
"File Versioning": "Controllo Versione dei File",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Il software evita i bit dei permessi dei file durante il controllo delle modifiche. Utilizzato nei filesystem FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "I file sono spostati nella certella .stversions quando vengono sostituiti o cancellati da Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "I file sostituiti o eliminati da Syncthing vengono datati e spostati in una cartella .stversions.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "I file sono protetti dalle modifiche effettuate negli altri dispositivi, ma le modifiche effettuate in questo dispositivo verranno inviate anche al resto del cluster.",
|
||||
"Folder ID": "ID Cartella",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Più Tardi",
|
||||
"Local Discovery": "Individuazione Locale",
|
||||
"Local State": "Stato Locale",
|
||||
"Local State (Total)": "Stato Locale (Totale)",
|
||||
"Major Upgrade": "Aggiornamento principale",
|
||||
"Maximum Age": "Durata Massima",
|
||||
"Metadata Only": "Solo i Metadati",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Vengono utilizzati i seguenti intervalli temporali: per la prima ora viene mantenuta una versione ogni 30 secondi, per il primo giorno viene mantenuta una versione ogni ora, per i primi 30 giorni viene mantenuta una versione al giorno, successivamente viene mantenuta una versione ogni settimana fino al periodo massimo impostato.",
|
||||
"The maximum age must be a number and cannot be blank.": "La durata massima dev'essere un numero e non può essere vuoto.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "La durata massima di una versione (in giorni, imposta a 0 per mantenere le versioni per sempre).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Il numero di giorni deve essere un numero e non può essere vuoto.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Il numero di giorni per conservare i file nel cestino. Zero significa per sempre.",
|
||||
"The number of old versions to keep, per file.": "Il numero di vecchie versioni da mantenere, per file.",
|
||||
"The number of versions must be a number and cannot be blank.": "Il numero di versioni dev'essere un numero e non può essere vuoto.",
|
||||
"The path cannot be blank.": "Il percorso non può essere vuoto.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "L'intervallo di scansione deve essere un numero superiore a zero secondi.",
|
||||
"This is a major version upgrade.": "Questo è un aggiornamento di versione principale",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Controllo Versione con Cestino",
|
||||
"Unknown": "Sconosciuto",
|
||||
"Unshared": "Non Condiviso",
|
||||
"Unused": "Non Utilizzato",
|
||||
"Up to Date": "Sincronizzato",
|
||||
"Updated": "Aggiornato",
|
||||
"Upgrade": "Aggiornamento",
|
||||
"Upgrade To {%version%}": "Aggiorna alla {{version}}",
|
||||
"Upgrading": "Aggiornamento",
|
||||
|
||||
197
gui/assets/lang/lang-ja.json
Normal file
197
gui/assets/lang/lang-ja.json
Normal file
@@ -0,0 +1,197 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "負の日数は無理です。",
|
||||
"A new major version may not be compatible with previous versions.": "新しいメジャーバージョンは以前のバージョンと互換性がないかもしれません",
|
||||
"API Key": "APIキー",
|
||||
"About": "Syncthingについて",
|
||||
"Actions": "メニュー",
|
||||
"Add": "追加",
|
||||
"Add Device": "デバイスの追加",
|
||||
"Add Folder": "フォルダの追加",
|
||||
"Add new folder?": "フォルダを新規作成しますか?",
|
||||
"Address": "アドレス",
|
||||
"Addresses": "アドレス",
|
||||
"All Data": "全てのデータ",
|
||||
"Allow Anonymous Usage Reporting?": "匿名での利用者状況のレポートを許可しますか?",
|
||||
"Alphabetic": "ABC順",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "バージョニングを行う外部コマンド。同期フォルダからファイルを削除する必要があります。",
|
||||
"Anonymous Usage Reporting": "匿名での利用者状況レポート",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "紹介デバイスで設定されたデバイスはここにも追加されます。",
|
||||
"Automatic upgrades": "自動アップデート",
|
||||
"Bugs": "バグ",
|
||||
"CPU Utilization": "CPU使用率",
|
||||
"Changelog": "更新履歴",
|
||||
"Clean out after": "後で掃除",
|
||||
"Close": "閉じる",
|
||||
"Command": "コマンド",
|
||||
"Comment, when used at the start of a line": "行頭で使用されるコメント",
|
||||
"Compression": "圧縮",
|
||||
"Connection Error": "接続エラー",
|
||||
"Copied from elsewhere": "他の所からコピーしました",
|
||||
"Copied from original": "オリジナルからコピーしました",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 以下の協力者たちの皆さん:",
|
||||
"Delete": "削除",
|
||||
"Deleted": "削除した",
|
||||
"Device ID": "デバイスID",
|
||||
"Device Identification": "デバイスの身分証明書",
|
||||
"Device Name": "デバイスの名前",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "デバイス{{device}} ({{address}})が接続しますか? ",
|
||||
"Devices": "デバイス",
|
||||
"Disconnected": "切断されました",
|
||||
"Documentation": "マニュアル",
|
||||
"Download Rate": "ダウンロード率",
|
||||
"Downloaded": "ダウンロード済",
|
||||
"Downloading": "ダウンロード中",
|
||||
"Edit": "編集",
|
||||
"Edit Device": "デバイスの変更",
|
||||
"Edit Folder": "フォルダーの変更",
|
||||
"Editing": "編集中",
|
||||
"Enable UPnP": "UPnPを許可する",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "自動接続の場合は「dynamic」またはカンマ区切り「IPアドレス:ポート」を入力をしてください",
|
||||
"Enter ignore patterns, one per line.": "無視パターンを入力してください。一列一条件。",
|
||||
"Error": "エラー",
|
||||
"External File Versioning": "外部ファイルバージョニング",
|
||||
"File Pull Order": "ファイルの引き順番",
|
||||
"File Versioning": "ファイルバージョニング",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "更新時、ファイルパーミッションの設定が無視されます。FATファイルシステムでご利用ください。",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Syncthingによって移動や削除が行われるとファイルは.stversionsフォルダに移されます。",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Syncthingによって移動や削除が行われるとファイルは.stversionsフォルダ内のタイムスタンプバージョンに移されます。",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "ファイルは他デバイスによる変更から保護されます。しかしこのデバイス上での変更は他のクラスタに送信されます。",
|
||||
"Folder ID": "フォルダID",
|
||||
"Folder Master": "フォルダのマスター",
|
||||
"Folder Path": "フォルダパス",
|
||||
"Folders": "フォルダ",
|
||||
"GUI Authentication Password": "GUI 認証パスワード",
|
||||
"GUI Authentication User": "GUI 認証ユーザー",
|
||||
"GUI Listen Addresses": "GUIリスンアドレス",
|
||||
"Generate": "生成",
|
||||
"Global Discovery": "グローバルディスカバリー",
|
||||
"Global Discovery Server": "グローバルディスカバリーサーバー",
|
||||
"Global State": "グローバル状態",
|
||||
"Help": "ヘルプ",
|
||||
"Ignore": "無視",
|
||||
"Ignore Patterns": "パターンを無視する",
|
||||
"Ignore Permissions": "アクセス許可を無視する",
|
||||
"Incoming Rate Limit (KiB/s)": "着信率制限(KiB/s)",
|
||||
"Introducer": "紹介デバイス",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "条件の裏(と言うのは省かないで)",
|
||||
"Keep Versions": "バージョン保持",
|
||||
"Largest First": "大きい順",
|
||||
"Last File Received": "最後に受けとったファイル",
|
||||
"Last seen": "最後に見た",
|
||||
"Later": "後",
|
||||
"Local Discovery": "ローカルディスカバリー",
|
||||
"Local State": "ローカル状態",
|
||||
"Local State (Total)": "ローカル状態(総和)",
|
||||
"Major Upgrade": "メジャーアップグレード",
|
||||
"Maximum Age": "再",
|
||||
"Metadata Only": "メータデータだけ",
|
||||
"Move to top of queue": "最優先にする",
|
||||
"Multi level wildcard (matches multiple directory levels)": "広範なワイルドカード(複数のディレクトリに適用されます)",
|
||||
"Never": "決して",
|
||||
"New Device": "新規デバイス",
|
||||
"New Folder": "新規フォルダ",
|
||||
"Newest First": "新しい順",
|
||||
"No": "いいえ",
|
||||
"No File Versioning": "ファイルバージョニング不利用",
|
||||
"Notice": "通知",
|
||||
"OK": "OK",
|
||||
"Off": "オフ",
|
||||
"Oldest First": "古い順",
|
||||
"Out Of Sync": "シンク外",
|
||||
"Out of Sync Items": "シンクアイテム外",
|
||||
"Outgoing Rate Limit (KiB/s)": "発信率制限(KiB/s)",
|
||||
"Override Changes": "変更をオーバーライドする",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "ローカルコンピュータ上のフォルダパス。存在しない場合は作成されます。チルダ(~)をショートカットで利用することができます",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "バージョンが保持されるパス(空欄の場合、デフォルトで.stversionsになります)",
|
||||
"Please consult the release notes before performing a major upgrade.": "メジャーアップグレードをする前にリリースノートを参考してください。",
|
||||
"Please wait": "お待ちください",
|
||||
"Preview": "プレビュー",
|
||||
"Preview Usage Report": "利用状況レポートのプレビュー",
|
||||
"Quick guide to supported patterns": "サポートされているパターンの簡易ガイド",
|
||||
"RAM Utilization": "メモリ利用率",
|
||||
"Random": "ランダム",
|
||||
"Release Notes": "リリースノート",
|
||||
"Rescan": "再スキャン",
|
||||
"Rescan All": "すべて再スキャン",
|
||||
"Rescan Interval": "再スキャンの間隔",
|
||||
"Restart": "再起動",
|
||||
"Restart Needed": "再起動が必要です",
|
||||
"Restarting": "再起動中",
|
||||
"Reused": "再使用されている",
|
||||
"Save": "保存",
|
||||
"Scanning": "スキャン中",
|
||||
"Select the devices to share this folder with.": "このフォルダをシェアするデバイスを選んでください。",
|
||||
"Select the folders to share with this device.": "このデバイスでシェアしたいフォルダを選んでください",
|
||||
"Settings": "設定",
|
||||
"Share": "共有",
|
||||
"Share Folder": "フォルダを共有する",
|
||||
"Share Folders With Device": "デバイスでフォルダをシェアする",
|
||||
"Share With Devices": "デバイスでシェアする",
|
||||
"Share this folder?": "このフォルダを共有しますか?",
|
||||
"Shared With": "シェアされている",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "このフォルダの短いID。全てのデバイス上で同じである必要があります。",
|
||||
"Show ID": "IDを表示",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "クラスタステータスでデバイスIDの代わりに表示されます。他のデバイス上でもこれがデフォルトとして表示されます。",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "クラスタステータスでデバイスIDの代わりに表示されます。空欄の場合デバイスが要請する名前に更新されます。",
|
||||
"Shutdown": "シャットダウン",
|
||||
"Shutdown Complete": "シャットダウン完了",
|
||||
"Simple File Versioning": "簡易ファイルバージョニング",
|
||||
"Single level wildcard (matches within a directory only)": "ワイルドカード(一つのディレクトリだけに適用されます)",
|
||||
"Smallest First": "小さい順",
|
||||
"Source Code": "ソースコード",
|
||||
"Staggered File Versioning": "簡易ファイルバージョニング",
|
||||
"Start Browser": "ブラウザーを起動する",
|
||||
"Stopped": "止り",
|
||||
"Support": "サポート",
|
||||
"Sync Protocol Listen Addresses": "同期プロトコル待ち受けるアドレス",
|
||||
"Syncing": "同期中",
|
||||
"Syncthing has been shut down.": "Syncthingがシャットダウンしました。",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthingは以下のソフトウェアかその一部を内包しています:",
|
||||
"Syncthing is restarting.": "Syncthingが再起動しています",
|
||||
"Syncthing is upgrading.": "Syncthingがアップグレード中です",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthingが落ちているか、インターネット接続に問題があります。リトライ中です…",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "リクエストの処理に問題があるようです。問題が継続する場合、ページを更新するかSyncthingを再起動してください。",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "全体の統計は{{url}}でご覧いただけます。",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "設定がセーブされましたが、有効にはなっていません。設定を有効にするにはSyncthingを再起動する必要があります。",
|
||||
"The device ID cannot be blank.": "デバイスIDは空欄にできません",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "ここで入力したデバイスIDは他デバイス上の\"編集 > IDを表示\"で見ることができます。スペースとハイフンは無視されます。",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "暗号化された使用状況レポートが\b日ごとに送られます。これはプラットフォーム、フォルダの大きさ、アプリのバージョンを追跡するために利用されます。レポートのデータが変更された場合、このダイアログがまた表示されます。",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "入力されたデバイスIDが正しくありません。52から56文字のアルファベットと数字かスペース、ハイフンの列である必要があります。",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "第一コマンドパラメータはフォルダパス、第二パラメータはフォルダ内の相対パスです。",
|
||||
"The folder ID cannot be blank.": "フォルダIDは空欄にできません",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "フォルダID(64文字以内)は数字、ドット(.)、ハイフン(-)、アンダースコア(_)で構成されている必要があります。",
|
||||
"The folder ID must be unique.": "フォルダIDは固有である必要があります。",
|
||||
"The folder path cannot be blank.": "フォルダーパスは空欄にできません",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "以下の間隔が使われます: 最初の一時間はバージョンは30秒ごとに保持、最初の一日は一時間ごとに、最初の30日は一日ごとに、最大寿命までは一週間ごとに。",
|
||||
"The maximum age must be a number and cannot be blank.": "最大日数は番号である必要があり、空欄ではいけません。",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "バージョンを保持する最大日数(0にすると永続的に保持します)",
|
||||
"The number of days must be a number and cannot be blank.": "日数は番号である必要があり、空欄ではいけません。",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "ゴミ箱にファイルを保持する日数。0だと永続的に保持します。",
|
||||
"The number of old versions to keep, per file.": "ファイルごとの保持する古いバージョンの数",
|
||||
"The number of versions must be a number and cannot be blank.": "バージョンの数は番号である必要があり、空欄ではいけません。",
|
||||
"The path cannot be blank.": "パスは空欄にできません",
|
||||
"The rescan interval must be a non-negative number of seconds.": "リスキャン間隔はマイナス秒ではいけません。",
|
||||
"This is a major version upgrade.": "メージャーアップグレードです。",
|
||||
"Trash Can File Versioning": "ゴミ箱のファイルバージョニング",
|
||||
"Unknown": "不明",
|
||||
"Unshared": "シェアされていない",
|
||||
"Unused": "使われていない",
|
||||
"Up to Date": "最新",
|
||||
"Updated": "更新済み",
|
||||
"Upgrade": "アップグレード",
|
||||
"Upgrade To {%version%}": "{{version}}にアップグレードする",
|
||||
"Upgrading": "アップグレード中",
|
||||
"Upload Rate": "アップロード率",
|
||||
"Uptime": "稼働時間",
|
||||
"Use HTTPS for GUI": "GUIにHTTPSを使う",
|
||||
"Version": "バージョン",
|
||||
"Versions Path": "バージョンパス",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "バージョンは、最大寿命もしくは最大同時数を超えた場合、自動的に削除されます。",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "新しいデバイスを加える際、そのデバイスにもこのデバイスを加える必要があることを留意してください。",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "新しいフォルダを追加する際、フォルダIDはケースセンシティブで全てのデバイス上で完全に同じである必要があります。",
|
||||
"Yes": "はい",
|
||||
"You must keep at least one version.": "バージョン一つ少なくとも保持してください",
|
||||
"full documentation": "完全マニュアル",
|
||||
"items": "アイテム",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}}がフォルダ\"{{folder}}\"をシェアしがたっています。"
|
||||
}
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "원본에서 복사됨",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Delete": "삭제",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "기기 ID",
|
||||
"Device Identification": "기기 식별자",
|
||||
"Device Name": "기기 이름",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "나중에",
|
||||
"Local Discovery": "로컬 노드 검색",
|
||||
"Local State": "로컬 상태",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "메이저 업데이트",
|
||||
"Maximum Age": "최대 보존 기간",
|
||||
"Metadata Only": "메타데이터만",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "공유되지 않음",
|
||||
"Unused": "사용되지 않음",
|
||||
"Up to Date": "최신 데이터",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "업데이트",
|
||||
"Upgrade To {%version%}": "{{version}} 으로 업데이트",
|
||||
"Upgrading": "업데이트 중",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Privalo būti teigiamas skaičius.",
|
||||
"A new major version may not be compatible with previous versions.": "Nauja versija gali būti nesuderinama su senomis versijomis.",
|
||||
"API Key": "API raktas",
|
||||
"About": "Apie programą",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Klaidos",
|
||||
"CPU Utilization": "Procesoriaus panaudojimas",
|
||||
"Changelog": "Pasikeitimai",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Išvalyto po",
|
||||
"Close": "Uždaryti",
|
||||
"Command": "Komanda",
|
||||
"Comment, when used at the start of a line": "Komentaras naudojamas naujoje eilutėje",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Nukopijuota iš originalo",
|
||||
"Copyright © 2015 the following Contributors:": "Visos teisės saugomos © 2015 šių bendraautorių:",
|
||||
"Delete": "Trinti",
|
||||
"Deleted": "Ištrinta",
|
||||
"Device ID": "Įrenginio ID",
|
||||
"Device Identification": "Įrenginio identifikacija",
|
||||
"Device Name": "Įrenginio pavadinimas",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Failų siuntimo tvarka",
|
||||
"File Versioning": "Versijų valdymas",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Ieškant pakeitimų, į failų leidimų bitus yra nekreipiama dėmesio. Naudoti FAT failų sistemose.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Failai perkeliami į .stversions aplanką kai tampa pakeisti arba ištrinti.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Programai Syncthing pakeičiant ar ištrinant failus, jie yra perkeliami į datomis pažymėtas versijas, aplanke .stversions.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Failai apsaugoti nuo pakeitimų atliktų kituose įrenginiuose, bet pakeitimai šiame įrenginyje bus nusiųsti kitiems.",
|
||||
"Folder ID": "Aplanko ID",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Vėliau",
|
||||
"Local Discovery": "Vietinis matomumas",
|
||||
"Local State": "Vietinė būsena",
|
||||
"Local State (Total)": "Vietinė būsena (Bendrai)",
|
||||
"Major Upgrade": "Stambus atnaujinimas",
|
||||
"Maximum Age": "Maksimalus amžius",
|
||||
"Metadata Only": "Metaduomenims",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Šie pertraukų nustatymai naudojami: pirmą valandą versijos laikomos 30 sekundžių, pirmą dieną versijos laikomos valandą, pirmas 30 dienų versijos laikomos parą, kol nebus viršytas nustatytas maksimalus amžius.",
|
||||
"The maximum age must be a number and cannot be blank.": "Maksimalus amžius turi būti skaitmuo ir negali būti tuščias laukelis.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimalus laikas kurį bus saugojama versija (dienomis, nustatykite 0 norėdami saugoti amžinai).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Dienų skaičius turi būti teigiamas skaičius.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Kiek dienų laikyti failus šiukšliadėžėje. Nulis reiškia amžinai.",
|
||||
"The number of old versions to keep, per file.": "Kiek failo versijų saugoti.",
|
||||
"The number of versions must be a number and cannot be blank.": "Versijų skaičius turi būti skaitmuo ir negali būti tuščias laukelis.",
|
||||
"The path cannot be blank.": "Kelias negali būti tuščias.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Nuskaitymo dažnis negali būti neigiamas skaičius.",
|
||||
"This is a major version upgrade.": "Tai yra stambus atnaujinimas.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Šiukšliadėžės versijų valdymas",
|
||||
"Unknown": "Nežinoma",
|
||||
"Unshared": "Nesidalinama",
|
||||
"Unused": "Nenaudojamas",
|
||||
"Up to Date": "Atnaujinta",
|
||||
"Updated": "Atnaujinta",
|
||||
"Upgrade": "Atnaujinimas",
|
||||
"Upgrade To {%version%}": "Atnaujinti į {{version}}",
|
||||
"Upgrading": "Atnaujinama",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Et negativt antall dager gir ikke mening.",
|
||||
"A new major version may not be compatible with previous versions.": "En ny hovedversjon kan bli ikke-kompatibel med en eldre versjon.",
|
||||
"API Key": "API-nøkkel",
|
||||
"About": "Om",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Programfeil",
|
||||
"CPU Utilization": "CPU-utnyttelse",
|
||||
"Changelog": "Endringslog",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Tøm etter",
|
||||
"Close": "Lukk",
|
||||
"Command": "Kommando",
|
||||
"Comment, when used at the start of a line": "Kommentar, når det blir brukt i starten av en linje.",
|
||||
@@ -30,7 +30,8 @@
|
||||
"Copied from original": "Kopiert fra original",
|
||||
"Copyright © 2015 the following Contributors:": "Kopirett © 2015 de følgende bidragsytere:",
|
||||
"Delete": "Slett",
|
||||
"Device ID": "Enhet ID",
|
||||
"Deleted": "Slettet",
|
||||
"Device ID": "Enhets ID",
|
||||
"Device Identification": "Enhetskjennemerke",
|
||||
"Device Name": "Navn På Enhet",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Enhet {{device}} ({{address}}) ønsker å koble seg til. Legg til ny enhet?",
|
||||
@@ -49,10 +50,10 @@
|
||||
"Enter ignore patterns, one per line.": "Skriv inn mønster som skal utelates, ett per linje.",
|
||||
"Error": "Feilmelding",
|
||||
"External File Versioning": "Ekstern versjonskontroll",
|
||||
"File Pull Order": "Fil henterekkefølge",
|
||||
"File Pull Order": "Filenes Henterekkefølge",
|
||||
"File Versioning": "Versjonskontroll",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Fil bit-rettigheter ignoreres når forandringer oppdages. Bruk FAT filsystem. ",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Filer som slettes eller erstattes av Syncthing flyttes til katalogen .stversions",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Filer flyttes til en datostemplet versjon i .stversions-katalogen når den oppdateres eller slettes av Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Filer er beskyttet mot endringer som er gjort på andre enheter, men endringer som er gjort på denne enheten blir sendt til resten av gruppen.",
|
||||
"Folder ID": "Mappe ID",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Senere",
|
||||
"Local Discovery": "Lokal Søking",
|
||||
"Local State": "Lokal Tilstand",
|
||||
"Local State (Total)": "Lokal Tilstand (Total)",
|
||||
"Major Upgrade": "Hovedoppgradering",
|
||||
"Maximum Age": "Maksimal Levetid",
|
||||
"Metadata Only": "Kun metadata",
|
||||
@@ -99,7 +101,7 @@
|
||||
"Out of Sync Items": "Ikke Synkroniserte Element",
|
||||
"Outgoing Rate Limit (KiB/s)": "Utgående Hastighetsbegrensning (KiB/s)",
|
||||
"Override Changes": "Overstyr Endringer",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappen på datamaskinen. Blir opprettet om den ikke finnes. Krøllstrektegnet (~) kan brukes som forkortelse for",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappen på datamaskinen. Denne vil bli opprettet dersom den ikke finnes. Krøllstrektegnet (~) kan brukes som forkortelse for",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Plasseringen for lagrede versjoner (la denne være tom for å bruke standard .stversions-mappen i mappen).",
|
||||
"Please consult the release notes before performing a major upgrade.": "Se \"release notes\" før en hovedoppgradering utføres.",
|
||||
"Please wait": "Vennligst vent",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Følgende intervall blir brukt: den første timen blir en versjon lagret hvert 30. sekund, den første dagen blir en versjon lagret hver time, de første 30 dagene blir en versjon lagret hver dag, og inntil maksimal levetid blir en versjon lagret hver uke.",
|
||||
"The maximum age must be a number and cannot be blank.": "Maksimal levetid må være et tall og kan ikke være tomt.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimal tid å beholde en versjon (i dager, sett til 0 for å beholde versjoner på ubegrenset tid).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Antall dager må være et tall og kan ikke være tomt.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Antall dager man skal bevare filene i papirkurven. Null betyr for alltid.",
|
||||
"The number of old versions to keep, per file.": "Antall gamle versjoner å beholde, per fil.",
|
||||
"The number of versions must be a number and cannot be blank.": "Antall versjoner må være et tall og kan ikke være tomt.",
|
||||
"The path cannot be blank.": "Plasseringen kan ikke være tom.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Antall sekund i skanneintervallet kan ikke være negativt.",
|
||||
"This is a major version upgrade.": "Dette er en hovedoppgradering",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Fil versjonskontroll i papirkurven",
|
||||
"Unknown": "Ukjent",
|
||||
"Unshared": "Ikke delt",
|
||||
"Unused": "Ikke i bruk",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Updated": "Oppdatert",
|
||||
"Upgrade": "Oppgradere",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Gekopieerd van het origineel",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 de volgende Bijdragers:",
|
||||
"Delete": "Verwijderen",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Apparaat-ID",
|
||||
"Device Identification": "Apparaat identificatie",
|
||||
"Device Name": "Naam apparaat",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Later",
|
||||
"Local Discovery": "Lokaal zoeken",
|
||||
"Local State": "Lokale status",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Grote update",
|
||||
"Maximum Age": "Maximum leeftijd",
|
||||
"Metadata Only": "Alleen Metadata",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Niet gedeeld",
|
||||
"Unused": "Ongebruikt",
|
||||
"Up to Date": "Gesynchroniseerd",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Update",
|
||||
"Upgrade To {%version%}": "Upgrade naar {{version}}",
|
||||
"Upgrading": "Bezig met upgrade",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Kopiert frå originalen",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Delete": "Slett",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Eining ID",
|
||||
"Device Identification": "Einingskjennemerke",
|
||||
"Device Name": "Namn På Eining",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Seinare",
|
||||
"Local Discovery": "Lokal oppdaging",
|
||||
"Local State": "Lokal Tilstand",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maksimal Levetid",
|
||||
"Metadata Only": "Berre metadata",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Ikkje delt",
|
||||
"Unused": "Ubrukt",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Skopiowane z oryginału",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
|
||||
"Delete": "Usuń",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "ID urządzenia",
|
||||
"Device Identification": "Identyfikator urządzenia",
|
||||
"Device Name": "Nazwa urządzenia",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Później",
|
||||
"Local Discovery": "Lokalne odnajdywanie",
|
||||
"Local State": "Status lokalny",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Maksymalny wiek",
|
||||
"Metadata Only": "Tylko metadane",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Nieudostępnione",
|
||||
"Unused": "Nieużywane",
|
||||
"Up to Date": "Aktualny",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Aktualizacja",
|
||||
"Upgrade To {%version%}": "Aktualizuj do {{version}}",
|
||||
"Upgrading": "Aktualizowanie",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Um número negativo de dias não faz sentido.",
|
||||
"A new major version may not be compatible with previous versions.": "Uma nova versão principal pode não ser compatível com versões anteriores.",
|
||||
"API Key": "Chave da API",
|
||||
"About": "Sobre",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Erros",
|
||||
"CPU Utilization": "Uso de CPU",
|
||||
"Changelog": "Registro de alterações",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Limpar depois de",
|
||||
"Close": "Fechar",
|
||||
"Command": "Comando",
|
||||
"Comment, when used at the start of a line": "Comentário, se usado no início de uma linha",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiado do original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015. Direitos reservados aos seguintes colaboradores:",
|
||||
"Delete": "Apagar",
|
||||
"Deleted": "Apagado",
|
||||
"Device ID": "ID do dispositivo",
|
||||
"Device Identification": "Identificação do dispositivo",
|
||||
"Device Name": "Nome do dispositivo",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Ordem de retirada do arquivo",
|
||||
"File Versioning": "Versionamento de arquivos",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Os bits de permissão de um arquivo são ignorados durante as verificações. Use em sistemas de arquivo FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Os arquivos são motivos para a pasta .stversions quando substituídos ou apagados pelo Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Os arquivos são renomeados com suas datas na pasta .stversions quando são substituídos ou removidos pelo Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Os arquivos estão protegidos contra alterações feitas em outros dispositivos, mas alterações feitas neste dispositivo serão enviadas ao resto do grupo.",
|
||||
"Folder ID": "ID da pasta",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Depois",
|
||||
"Local Discovery": "Descoberta local",
|
||||
"Local State": "Estado local",
|
||||
"Local State (Total)": "Estado local (total)",
|
||||
"Major Upgrade": "Atualização \"major\"",
|
||||
"Maximum Age": "Idade máxima",
|
||||
"Metadata Only": "Somente metadados",
|
||||
@@ -161,20 +163,21 @@
|
||||
"The folder ID must be unique.": "O ID da pasta deve ser único.",
|
||||
"The folder path cannot be blank.": "O caminho da pasta não pode ficar vazio.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "São utilizados os seguintes intervalos: na primeira hora é guardada uma versão a cada 30 segundos, no primeiro dia é guardada uma versão a cada hora, nos primeiros 30 dias é guardada uma versão por dia e, até que atinja a idade máxima, é guardada uma versão por semana.",
|
||||
"The maximum age must be a number and cannot be blank.": "A idade máxima deve ser um valor numérico e não pode ficar vazio.",
|
||||
"The maximum age must be a number and cannot be blank.": "A idade máxima deve ser um valor numérico. O campo não pode ficar vazio.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "O número máximo de dias em que uma versão é guardada. (Use 0 para manter para sempre).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "O número de dias deve ser um número valido e não pode ficar em branco.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "O número de dias em que são mantidos os arquivos da lixeira. Zero significa para sempre.",
|
||||
"The number of old versions to keep, per file.": "O número de versões antigas a serem mantidas, por arquivo.",
|
||||
"The number of versions must be a number and cannot be blank.": "O número de versões deve ser um valor numério e não pode ficar vazio.",
|
||||
"The number of versions must be a number and cannot be blank.": "O número de versões deve ser um valor numérico. O campo não pode ficar vazio.",
|
||||
"The path cannot be blank.": "O caminho não pode ficar vazio.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "O intervalo entre verificações deve ser um número positivo de segundos.",
|
||||
"This is a major version upgrade.": "Esta é uma atualização para uma versão \"major\".",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Versionamento de arquivos da lixeira",
|
||||
"Unknown": "Desconhecida",
|
||||
"Unshared": "Não compartilhada",
|
||||
"Unused": "Não utilizado",
|
||||
"Up to Date": "Sincronizada",
|
||||
"Updated": "Atualizado",
|
||||
"Upgrade": "Atualização",
|
||||
"Upgrade To {%version%}": "Atualizar para {{version}}",
|
||||
"Upgrading": "Atualizando",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiado do original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 os seguintes contribuidores:",
|
||||
"Delete": "Eliminar",
|
||||
"Deleted": "Eliminado",
|
||||
"Device ID": "ID do dispositivo",
|
||||
"Device Identification": "Identificação do dispositivo",
|
||||
"Device Name": "Nome do dispositivo",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Mais tarde",
|
||||
"Local Discovery": "Busca local",
|
||||
"Local State": "Estado local",
|
||||
"Local State (Total)": "Estado local (total)",
|
||||
"Major Upgrade": "Actualização importante",
|
||||
"Maximum Age": "Idade máxima",
|
||||
"Metadata Only": "Metadados apenas",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Não partilhada",
|
||||
"Unused": "Não utilizado",
|
||||
"Up to Date": "Sincronizado",
|
||||
"Updated": "Actualizado",
|
||||
"Upgrade": "Actualizar",
|
||||
"Upgrade To {%version%}": "Actualizar para {{version}}",
|
||||
"Upgrading": "Actualizando",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copiat din original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright ©2015 Următorii Contribuitori:",
|
||||
"Delete": "Şterge",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "ID Dispozitiv",
|
||||
"Device Identification": "Identificare Dispozitiv",
|
||||
"Device Name": "Nume Dispozitiv",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Mai tîrziu",
|
||||
"Local Discovery": "Găsire Locală",
|
||||
"Local State": "Status Local",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Vârsta Maximă",
|
||||
"Metadata Only": "Doar Metadate",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Neîmpărțit",
|
||||
"Unused": "Nefolosit",
|
||||
"Up to Date": "La Zi",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "Actualizează La Versiunea {{version}}",
|
||||
"Upgrading": "Se Actualizează",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "Отрицательное число дней не имеет значения.",
|
||||
"A new major version may not be compatible with previous versions.": "Новое обновление основной версии может быть несовместимо с предыдущими версиями.",
|
||||
"API Key": "Ключ API",
|
||||
"About": "О программе",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Ошибки",
|
||||
"CPU Utilization": "Загрузка ЦПУ",
|
||||
"Changelog": "Журнал изменений",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "Очистить после",
|
||||
"Close": "Закрыть",
|
||||
"Command": "Команда",
|
||||
"Comment, when used at the start of a line": "Комментарий, если используется в начале строки",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Скопировано с оригинала",
|
||||
"Copyright © 2015 the following Contributors:": "Все права защищены ©, 2015 участники:",
|
||||
"Delete": "Удалить",
|
||||
"Deleted": "Удалено",
|
||||
"Device ID": "ID устройства",
|
||||
"Device Identification": "Идентификация устройства",
|
||||
"Device Name": "Имя устройства",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "Порядок получения файлов",
|
||||
"File Versioning": "Управление версиями",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Права на файлы игнорируются при поиске изменений. Используется на файловой системе FAT.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Файлы перемещаются в .stversions после замены или удаления Syncthing.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Файлы с временнОй меткой версии помещаются в папку .stversions при их замене или удалении Syncthing.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файлы защищены от изменений сделанных на других устройствах, но изменения сделанные на этом устройстве будут отправлены всему кластеру.",
|
||||
"Folder ID": "ID папки",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Потом",
|
||||
"Local Discovery": "Локальное обнаружение",
|
||||
"Local State": "Локальное состояние",
|
||||
"Local State (Total)": "Локально (всего)",
|
||||
"Major Upgrade": "Обновление основной версии",
|
||||
"Maximum Age": "Максимальный срок",
|
||||
"Metadata Only": "Только метаданные",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Используются следующие интервалы: в первый час версия меняется каждые 30 секунд, в первый день - каждый час, первые 30 дней - каждый день, после, до максимального срока - каждую неделю.",
|
||||
"The maximum age must be a number and cannot be blank.": "Максимальный срок должен быть числом и не может быть пустым.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максимальный срок хранения версии (в днях, 0 значит вечное хранение).",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "Количество дней должно быть числом и не может быть пустым.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Количество дней хранения файлов в корзине. Ноль значит навсегда.",
|
||||
"The number of old versions to keep, per file.": "Количество хранимых версий файла.",
|
||||
"The number of versions must be a number and cannot be blank.": "Количество версий должно быть числом и не может быть пустым.",
|
||||
"The path cannot be blank.": "Путь не может быть пустым.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервал пересканирования должен быть неотрицательным количеством секунд.",
|
||||
"This is a major version upgrade.": "Это обновление основной версии продукта.",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "Использовать Корзину для версий файлов",
|
||||
"Unknown": "Неизвестно",
|
||||
"Unshared": "Необщедоступно",
|
||||
"Unused": "Не используется",
|
||||
"Up to Date": "Обновлено",
|
||||
"Updated": "Обновлено",
|
||||
"Upgrade": "Обновить",
|
||||
"Upgrade To {%version%}": "Обновить до {{version}}",
|
||||
"Upgrading": "Обновление",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Oförändrat",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 följande medverkande:",
|
||||
"Delete": "Radera",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Enhets-ID",
|
||||
"Device Identification": "Enhetsidentifikation",
|
||||
"Device Name": "Enhetsnamn",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Senare",
|
||||
"Local Discovery": "Lokal uppslagning",
|
||||
"Local State": "Lokal status",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Stor uppgradering",
|
||||
"Maximum Age": "Högsta åldersgräns",
|
||||
"Metadata Only": "Endast metadata",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Inte delad",
|
||||
"Unused": "Oanvänd",
|
||||
"Up to Date": "Helt uppdaterad",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Uppgradering",
|
||||
"Upgrade To {%version%}": "Uppgradera till {{version}}",
|
||||
"Upgrading": "Uppgraderar",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Aslından kopyalanmış",
|
||||
"Copyright © 2015 the following Contributors:": "Telif Hakkı © 2015 Katkıda bulunanlar:",
|
||||
"Delete": "Sil",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "Cihaz ID",
|
||||
"Device Identification": "Cihaz Kimliği",
|
||||
"Device Name": "Cihaz Adı",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Sonra",
|
||||
"Local Discovery": "Yerel bulma",
|
||||
"Local State": "Yerel Durum",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Major Upgrade",
|
||||
"Maximum Age": "Azami Süre",
|
||||
"Metadata Only": "Metadata Only",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Paylaşılmayan",
|
||||
"Unused": "Kullanılmayan",
|
||||
"Up to Date": "Güncel",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Upgrade",
|
||||
"Upgrade To {%version%}": "{{version}} sürümüne yükselt",
|
||||
"Upgrading": "Yükseltiliyor",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Скопійовано з оригіналу",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 наступних контриб’юторів:",
|
||||
"Delete": "Видалити",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "ID пристрою",
|
||||
"Device Identification": "Ідентифікатор пристрою",
|
||||
"Device Name": "Назва пристрою",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "Пізніше",
|
||||
"Local Discovery": "Локальне виявлення",
|
||||
"Local State": "Локальний статус",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "Мажорне оновлення",
|
||||
"Maximum Age": "Максимальний вік",
|
||||
"Metadata Only": "Тільки метадані",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "Не розповсюджується",
|
||||
"Unused": "Не використовується",
|
||||
"Up to Date": "Актуальна версія",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "Оновлення",
|
||||
"Upgrade To {%version%}": "Оновити до {{version}}",
|
||||
"Upgrading": "Оновлення",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
|
||||
"A negative number of days doesn't make sense.": "天数不能为负。",
|
||||
"A new major version may not be compatible with previous versions.": "重大更新可能与之前的版本之间无法兼容",
|
||||
"API Key": "API Key",
|
||||
"About": "关于",
|
||||
@@ -20,7 +20,7 @@
|
||||
"Bugs": "Bug汇报",
|
||||
"CPU Utilization": "CPU使用率",
|
||||
"Changelog": "更新日志",
|
||||
"Clean out after": "Clean out after",
|
||||
"Clean out after": "在该时间后清除:",
|
||||
"Close": "关闭",
|
||||
"Command": "命令",
|
||||
"Comment, when used at the start of a line": "注释,在行首使用",
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "从源复制",
|
||||
"Copyright © 2015 the following Contributors:": "版权 ©2015 由下列贡献者所有:",
|
||||
"Delete": "删除",
|
||||
"Deleted": "已删除",
|
||||
"Device ID": "设备标识",
|
||||
"Device Identification": "设备标识",
|
||||
"Device Name": "设备名",
|
||||
@@ -52,7 +53,7 @@
|
||||
"File Pull Order": "文件拉取顺序",
|
||||
"File Versioning": "版本控制",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "当寻找文件变更时,忽略文件权限。用于FAT文件系统。",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "当文件被 Syncthing 替换或删时,将会被移动到 .stversions 文件夹",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "当某个文件在其他设备被替换或删除时,本设备将会在 .stversions 文件夹中保留该文件的备份,并在文件名中加入时间戳信息。",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "在其它设备中对该文件夹内文件的修改并不会被同步到本机,但是在本机上对其的修改,则会被同步到其它设备中。",
|
||||
"Folder ID": "文件夹标识",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "稍后",
|
||||
"Local Discovery": "在局域网上寻找设备",
|
||||
"Local State": "本地状态",
|
||||
"Local State (Total)": "本地状态汇总",
|
||||
"Major Upgrade": "重大更新",
|
||||
"Maximum Age": "历史版本最长保留时间",
|
||||
"Metadata Only": "仅元数据",
|
||||
@@ -163,18 +165,19 @@
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "保留的历史版本会满足以下条件:最近一小时内的历史版本,更新间隔小于30秒的仅保留一份。最近一天内的历史版本,更新间隔小于1小时的仅保留一份。最近一个月内的历史版本,更新间隔小于1天的仅保留一份。距离现在超过一个月且小于最长保留时间的,更新间隔小于1周的仅保留一份。",
|
||||
"The maximum age must be a number and cannot be blank.": "最长保留时间必须为数字,且不能为空。",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "历史版本保留的最长天数,0为永久保存",
|
||||
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
|
||||
"The number of days must be a number and cannot be blank.": "天数必须为数字,且不能为空。",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "文件保存在回收站的天数。零表示永久。",
|
||||
"The number of old versions to keep, per file.": "每个文件保留的版本数量上限。",
|
||||
"The number of versions must be a number and cannot be blank.": "保留版本数量必须为数字,且不能为空。",
|
||||
"The path cannot be blank.": "路径不能为空",
|
||||
"The rescan interval must be a non-negative number of seconds.": "扫描间隔单位为秒,且不能为负数。",
|
||||
"This is a major version upgrade.": "这是一个重大版本更新。",
|
||||
"Trash Can File Versioning": "Trash Can File Versioning",
|
||||
"Trash Can File Versioning": "回收站式版本控制",
|
||||
"Unknown": "未知",
|
||||
"Unshared": "未共享",
|
||||
"Unused": "未使用",
|
||||
"Up to Date": "同步完成",
|
||||
"Updated": "已更新",
|
||||
"Upgrade": "更新",
|
||||
"Upgrade To {%version%}": "升级至版本{{version}}",
|
||||
"Upgrading": "升级中",
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 下列貢獻者:",
|
||||
"Delete": "刪除",
|
||||
"Deleted": "Deleted",
|
||||
"Device ID": "裝置識別碼",
|
||||
"Device Identification": "裝置識別",
|
||||
"Device Name": "裝置名稱",
|
||||
@@ -80,6 +81,7 @@
|
||||
"Later": "稍後",
|
||||
"Local Discovery": "本地探索",
|
||||
"Local State": "本地狀態",
|
||||
"Local State (Total)": "Local State (Total)",
|
||||
"Major Upgrade": "重大更新",
|
||||
"Maximum Age": "最長保留時間",
|
||||
"Metadata Only": "僅中繼資料",
|
||||
@@ -175,6 +177,7 @@
|
||||
"Unshared": "未共享",
|
||||
"Unused": "未使用",
|
||||
"Up to Date": "最新",
|
||||
"Updated": "Updated",
|
||||
"Upgrade": "升級",
|
||||
"Upgrade To {%version%}": "升級至 {{version}}",
|
||||
"Upgrading": "正在升級",
|
||||
|
||||
@@ -1 +1 @@
|
||||
var langPrettyprint = {"bg":"Bulgarian","ca":"Catalan","ca@valencia":"Catalan (Valencian)","cs":"Czech","de":"German","el":"Greek","en":"English","en-GB":"English (United Kingdom)","es":"Spanish","es-ES":"Spanish (Spain)","fi":"Finnish","fr":"French","hu":"Hungarian","it":"Italian","ko-KR":"Korean (Korea)","lt":"Lithuanian","nb":"Norwegian Bokmål","nl":"Dutch","nn":"Norwegian Nynorsk","pl":"Polish","pt-BR":"Portuguese (Brazil)","pt-PT":"Portuguese (Portugal)","ro-RO":"Romanian (Romania)","ru":"Russian","sv":"Swedish","tr":"Turkish","uk":"Ukrainian","zh-CN":"Chinese (China)","zh-TW":"Chinese (Taiwan)"}
|
||||
var langPrettyprint = {"bg":"Bulgarian","ca":"Catalan","ca@valencia":"Catalan (Valencian)","cs":"Czech","de":"German","el":"Greek","en":"English","en-GB":"English (United Kingdom)","es":"Spanish","es-ES":"Spanish (Spain)","fi":"Finnish","fr":"French","hu":"Hungarian","it":"Italian","ja":"Japanese","ko-KR":"Korean (Korea)","lt":"Lithuanian","nb":"Norwegian Bokmål","nl":"Dutch","nn":"Norwegian Nynorsk","pl":"Polish","pt-BR":"Portuguese (Brazil)","pt-PT":"Portuguese (Portugal)","ro-RO":"Romanian (Romania)","ru":"Russian","sv":"Swedish","tr":"Turkish","uk":"Ukrainian","zh-CN":"Chinese (China)","zh-TW":"Chinese (Taiwan)"}
|
||||
|
||||
@@ -1 +1 @@
|
||||
var validLangs = ["bg","ca","ca@valencia","cs","de","el","en","en-GB","es","es-ES","fi","fr","hu","it","ko-KR","lt","nb","nl","nn","pl","pt-BR","pt-PT","ro-RO","ru","sv","tr","uk","zh-CN","zh-TW"]
|
||||
var validLangs = ["bg","ca","ca@valencia","cs","de","el","en","en-GB","es","es-ES","fi","fr","hu","it","ja","ko-KR","lt","nb","nl","nn","pl","pt-BR","pt-PT","ro-RO","ru","sv","tr","uk","zh-CN","zh-TW"]
|
||||
|
||||
17
gui/index.html
Normal file → Executable file
17
gui/index.html
Normal file → Executable file
@@ -16,15 +16,13 @@
|
||||
<meta name="author" content="">
|
||||
<link rel="shortcut icon" href="assets/img/favicon.png">
|
||||
|
||||
<title>Syncthing | {{thisDeviceName()}}</title>
|
||||
<title>{{thisDeviceName()}} | Syncthing</title>
|
||||
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="assets/font/raleway.css" rel="stylesheet">
|
||||
<link href="assets/css/overrides.css" rel="stylesheet">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div ng-controller="EventController"></div>
|
||||
|
||||
<!-- Top bar -->
|
||||
|
||||
<nav class="navbar navbar-top navbar-default" role="navigation">
|
||||
@@ -273,10 +271,12 @@
|
||||
<th><span class="glyphicon glyphicon-share-alt"></span> <span translate>Shared With</span></th>
|
||||
<td class="text-right">{{sharesFolder(folder)}}</td>
|
||||
</tr>
|
||||
<tr ng-if="!folder.readOnly && folderStats[folder.id].lastFile">
|
||||
<tr ng-if="!folder.readOnly && folderStats[folder.id].lastFile && folderStats[folder.id].lastFile.filename">
|
||||
<th><span class="glyphicon glyphicon-transfer"></span> <span translate>Last File Received</span></th>
|
||||
<td class="text-right">
|
||||
<span title="{{folderStats[folder.id].lastFile.filename}} @ {{folderStats[folder.id].lastFile.at | date:'yyyy-MM-dd HH:mm:ss'}}">
|
||||
<span translate ng-if="!folderStats[folder.id].lastFile.deleted">Updated</span>
|
||||
<span translate ng-if="folderStats[folder.id].lastFile.deleted">Deleted</span>
|
||||
{{folderStats[folder.id].lastFile.filename | basename}}
|
||||
</span>
|
||||
</td>
|
||||
@@ -327,6 +327,10 @@
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> <span translate>Upload Rate</span></th>
|
||||
<td class="text-right">{{connectionsTotal.outbps | binary}}B/s ({{connectionsTotal.outBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-home"></span> <span translate>Local State (Total)</span> </th>
|
||||
<td class="text-right">{{foldersTotalLocalFiles | alwaysNumber}} <span translate>items</span>, ~{{ foldersTotalLocalBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-th"></span> <span translate>RAM Utilization</span></th>
|
||||
<td class="text-right">{{system.sys | binary}}B</td>
|
||||
@@ -655,6 +659,7 @@
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentFolder.readOnly"> <span translate>Folder Master</span>
|
||||
</label>
|
||||
<a href="http://docs.syncthing.net/users/foldermaster.html" target="_blank"><span class="glyphicon glyphicon-book"></span> <span translate>Help</span></a>
|
||||
</div>
|
||||
<p translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
@@ -1054,10 +1059,12 @@
|
||||
<li class="auto-generated">Dominik Heidler</li>
|
||||
<li class="auto-generated">Elias Jarlebring</li>
|
||||
<li class="auto-generated">Emil Hessman</li>
|
||||
<li class="auto-generated">Erik Meitner</li>
|
||||
<li class="auto-generated">Federico Castagnini</li>
|
||||
<li class="auto-generated">Felix Ableitner</li>
|
||||
<li class="auto-generated">Felix Unterpaintner</li>
|
||||
<li class="auto-generated">Francois-Xavier Gsell</li>
|
||||
<li class="auto-generated">Frank Isemann</li>
|
||||
<li class="auto-generated">Gilli Sigurdsson</li>
|
||||
<li class="auto-generated">Jakob Borg</li>
|
||||
<li class="auto-generated">James Patterson</li>
|
||||
@@ -1118,7 +1125,7 @@
|
||||
|
||||
<!-- gui application code -->
|
||||
<script src="scripts/syncthing/core/module.js"></script>
|
||||
<script src="scripts/syncthing/core/controllers/eventController.js"></script>
|
||||
<script src="scripts/syncthing/core/services/events.js"></script>
|
||||
<script src="scripts/syncthing/core/controllers/syncthingController.js"></script>
|
||||
<script src="scripts/syncthing/core/directives/identiconDirective.js"></script>
|
||||
<script src="scripts/syncthing/core/directives/languageSelectDirective.js"></script>
|
||||
|
||||
@@ -17,10 +17,9 @@ var syncthing = angular.module('syncthing', [
|
||||
|
||||
var urlbase = 'rest';
|
||||
var guiVersion = null;
|
||||
var deviceId = null;
|
||||
|
||||
syncthing.config(function ($httpProvider, $translateProvider, LocaleServiceProvider) {
|
||||
$httpProvider.defaults.xsrfHeaderName = 'X-CSRF-Token';
|
||||
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token';
|
||||
$httpProvider.interceptors.push(function () {
|
||||
return {
|
||||
response: function (response) {
|
||||
@@ -30,6 +29,14 @@ syncthing.config(function ($httpProvider, $translateProvider, LocaleServiceProvi
|
||||
} else if (guiVersion != responseVersion) {
|
||||
document.location.reload(true);
|
||||
}
|
||||
if (!deviceId) {
|
||||
deviceId = response.headers()['x-syncthing-id'];
|
||||
if (deviceId) {
|
||||
var deviceIdShort = deviceId.substring(0, 5);
|
||||
$httpProvider.defaults.xsrfHeaderName = 'X-CSRF-Token-' + deviceIdShort;
|
||||
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token-' + deviceIdShort;
|
||||
}
|
||||
}
|
||||
return response;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
var debugEvents = false;
|
||||
|
||||
angular.module('syncthing.core')
|
||||
.controller('EventController', function ($scope, $http) {
|
||||
'use strict';
|
||||
|
||||
$scope.lastEvent = null;
|
||||
var lastID = 0;
|
||||
|
||||
var successFn = function (data) {
|
||||
// When Syncthing restarts while the long polling connection is in
|
||||
// progress the browser on some platforms returns a 200 (since the
|
||||
// headers has been flushed with the return code 200), with no data.
|
||||
// This basically means that the connection has been reset, and the call
|
||||
// was not actually successful.
|
||||
if (!data) {
|
||||
errorFn(data);
|
||||
return;
|
||||
}
|
||||
|
||||
$scope.$emit('UIOnline');
|
||||
|
||||
if (lastID > 0) {
|
||||
data.forEach(function (event) {
|
||||
if (debugEvents) {
|
||||
console.log("event", event.id, event.type, event.data);
|
||||
}
|
||||
$scope.$emit(event.type, event);
|
||||
});
|
||||
}
|
||||
|
||||
$scope.lastEvent = data[data.length - 1];
|
||||
lastID = $scope.lastEvent.id;
|
||||
|
||||
setTimeout(function () {
|
||||
$http.get(urlbase + '/events?since=' + lastID)
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
}, 500);
|
||||
};
|
||||
|
||||
var errorFn = function (data) {
|
||||
$scope.$emit('UIOffline');
|
||||
|
||||
setTimeout(function () {
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
}, 1000);
|
||||
};
|
||||
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
});
|
||||
80
gui/scripts/syncthing/core/controllers/syncthingController.js
Normal file → Executable file
80
gui/scripts/syncthing/core/controllers/syncthingController.js
Normal file → Executable file
@@ -2,7 +2,7 @@ angular.module('syncthing.core')
|
||||
.config(function($locationProvider) {
|
||||
$locationProvider.html5Mode(true).hashPrefix('!');
|
||||
})
|
||||
.controller('SyncthingController', function ($scope, $http, $location, LocaleService) {
|
||||
.controller('SyncthingController', function ($scope, $http, $location, LocaleService, Events) {
|
||||
'use strict';
|
||||
|
||||
// private/helper definitions
|
||||
@@ -15,6 +15,7 @@ angular.module('syncthing.core')
|
||||
function initController() {
|
||||
LocaleService.autoConfigLocale();
|
||||
setInterval($scope.refresh, 10000);
|
||||
Events.start();
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +45,8 @@ angular.module('syncthing.core')
|
||||
$scope.neededTotal = 0;
|
||||
$scope.neededCurrentPage = 1;
|
||||
$scope.neededPageSize = 10;
|
||||
$scope.foldersTotalLocalBytes = 0;
|
||||
$scope.foldersTotalLocalFiles = 0;
|
||||
|
||||
$(window).bind('beforeunload', function () {
|
||||
navigatingAway = true;
|
||||
@@ -66,7 +69,7 @@ angular.module('syncthing.core')
|
||||
'touch': 'asterisk'
|
||||
};
|
||||
|
||||
$scope.$on('UIOnline', function (event, arg) {
|
||||
$scope.$on(Events.ONLINE, function () {
|
||||
if (online && !restarting) {
|
||||
return;
|
||||
}
|
||||
@@ -100,7 +103,7 @@ angular.module('syncthing.core')
|
||||
$('#shutdown').modal('hide');
|
||||
});
|
||||
|
||||
$scope.$on('UIOffline', function (event, arg) {
|
||||
$scope.$on(Events.OFFLINE, function () {
|
||||
if (navigatingAway || !online) {
|
||||
return;
|
||||
}
|
||||
@@ -125,7 +128,7 @@ angular.module('syncthing.core')
|
||||
if (!restarting) {
|
||||
if (arg.status === 0) {
|
||||
// A network error, not an HTTP error
|
||||
$scope.$emit('UIOffline');
|
||||
$scope.$emit(Events.OFFLINE);
|
||||
} else if (arg.status >= 400 && arg.status <= 599) {
|
||||
// A genuine HTTP error
|
||||
$('#networkError').modal('hide');
|
||||
@@ -136,7 +139,7 @@ angular.module('syncthing.core')
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('StateChanged', function (event, arg) {
|
||||
$scope.$on(Events.STATE_CHANGED, function (event, arg) {
|
||||
var data = arg.data;
|
||||
if ($scope.model[data.folder]) {
|
||||
$scope.model[data.folder].state = data.to;
|
||||
@@ -144,21 +147,24 @@ angular.module('syncthing.core')
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('LocalIndexUpdated', function (event, arg) {
|
||||
var data = arg.data;
|
||||
$scope.$on(Events.LOCAL_INDEX_UPDATED, function (event, arg) {
|
||||
refreshFolderStats();
|
||||
});
|
||||
|
||||
$scope.$on('RemoteIndexUpdated', function (event, arg) {
|
||||
/* currently not using
|
||||
|
||||
$scope.$on('Events.REMOTE_INDEX_UPDATED', function (event, arg) {
|
||||
// Nothing
|
||||
});
|
||||
|
||||
$scope.$on('DeviceDisconnected', function (event, arg) {
|
||||
*/
|
||||
|
||||
$scope.$on(Events.DEVICE_DISCONNECTED, function (event, arg) {
|
||||
delete $scope.connections[arg.data.id];
|
||||
refreshDeviceStats();
|
||||
});
|
||||
|
||||
$scope.$on('DeviceConnected', function (event, arg) {
|
||||
$scope.$on(Events.DEVICE_CONNECTED, function (event, arg) {
|
||||
if (!$scope.connections[arg.data.id]) {
|
||||
$scope.connections[arg.data.id] = {
|
||||
inbps: 0,
|
||||
@@ -173,7 +179,7 @@ angular.module('syncthing.core')
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('ConfigLoaded', function (event) {
|
||||
$scope.$on('ConfigLoaded', function () {
|
||||
if ($scope.config.options.urAccepted === 0) {
|
||||
// If usage reporting has been neither accepted nor declined,
|
||||
// we want to ask the user to make a choice. But we don't want
|
||||
@@ -193,15 +199,15 @@ angular.module('syncthing.core')
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('DeviceRejected', function (event, arg) {
|
||||
$scope.$on(Events.DEVICE_REJECTED, function (event, arg) {
|
||||
$scope.deviceRejections[arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on('FolderRejected', function (event, arg) {
|
||||
$scope.$on(Events.FOLDER_REJECTED, function (event, arg) {
|
||||
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on('ConfigSaved', function (event, arg) {
|
||||
$scope.$on(Events.CONFIG_SAVED, function (event, arg) {
|
||||
updateLocalConfig(arg.data);
|
||||
|
||||
$http.get(urlbase + '/system/config/insync').success(function (data) {
|
||||
@@ -209,7 +215,7 @@ angular.module('syncthing.core')
|
||||
}).error($scope.emitHTTPError);
|
||||
});
|
||||
|
||||
$scope.$on('DownloadProgress', function (event, arg) {
|
||||
$scope.$on(Events.DOWNLOAD_PROGRESS, function (event, arg) {
|
||||
var stats = arg.data;
|
||||
var progress = {};
|
||||
for (var folder in stats) {
|
||||
@@ -254,12 +260,12 @@ angular.module('syncthing.core')
|
||||
console.log("DownloadProgress", $scope.progress);
|
||||
});
|
||||
|
||||
$scope.$on('FolderSummary', function (event, arg) {
|
||||
$scope.$on(Events.FOLDER_SUMMARY, function (event, arg) {
|
||||
var data = arg.data;
|
||||
$scope.model[data.folder] = data.summary;
|
||||
});
|
||||
|
||||
$scope.$on('FolderCompletion', function (event, arg) {
|
||||
$scope.$on(Events.FOLDER_COMPLETION, function (event, arg) {
|
||||
var data = arg.data;
|
||||
if (!$scope.completion[data.device]) {
|
||||
$scope.completion[data.device] = {};
|
||||
@@ -336,6 +342,15 @@ angular.module('syncthing.core')
|
||||
}
|
||||
}
|
||||
$scope.announceServersFailed = failed;
|
||||
|
||||
$scope.foldersTotalLocalBytes = 0;
|
||||
$scope.foldersTotalLocalFiles = 0;
|
||||
|
||||
for (var f in $scope.model) {
|
||||
$scope.foldersTotalLocalBytes += $scope.model[f].localBytes;
|
||||
$scope.foldersTotalLocalFiles += $scope.model[f].localFiles;
|
||||
};
|
||||
|
||||
console.log("refreshSystem", data);
|
||||
}).error($scope.emitHTTPError);
|
||||
}
|
||||
@@ -444,7 +459,7 @@ angular.module('syncthing.core')
|
||||
} else {
|
||||
return 'sync';
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function parseNeeded(data) {
|
||||
var merged = [];
|
||||
@@ -475,7 +490,7 @@ angular.module('syncthing.core')
|
||||
$scope.neededChangePageSize = function (perpage) {
|
||||
$scope.neededPageSize = perpage;
|
||||
refreshNeed($scope.neededFolder);
|
||||
}
|
||||
};
|
||||
|
||||
var refreshDeviceStats = debounce(function () {
|
||||
$http.get(urlbase + "/stats/device").success(function (data) {
|
||||
@@ -973,7 +988,7 @@ angular.module('syncthing.core')
|
||||
|
||||
$scope.$watch('currentFolder.path', function (newvalue) {
|
||||
if (newvalue && newvalue.trim().charAt(0) == '~') {
|
||||
$scope.currentFolder.path = $scope.system.tilde + newvalue.trim().substring(1)
|
||||
$scope.currentFolder.path = $scope.system.tilde + newvalue.trim().substring(1);
|
||||
}
|
||||
$http.get(urlbase + '/system/browse', {
|
||||
params: { current: newvalue }
|
||||
@@ -1035,6 +1050,7 @@ angular.module('syncthing.core')
|
||||
selectedDevices: {}
|
||||
};
|
||||
$scope.currentFolder.rescanIntervalS = 60;
|
||||
$scope.currentFolder.order = "random";
|
||||
$scope.currentFolder.fileVersioningSelector = "none";
|
||||
$scope.currentFolder.trashcanClean = 0;
|
||||
$scope.currentFolder.simpleKeep = 5;
|
||||
@@ -1052,19 +1068,19 @@ angular.module('syncthing.core')
|
||||
$scope.dismissFolderRejection(folder, device);
|
||||
$scope.currentFolder = {
|
||||
id: folder,
|
||||
selectedDevices: {}
|
||||
selectedDevices: {},
|
||||
rescanIntervalS: 60,
|
||||
fileVersioningSelector: "none",
|
||||
trashcanClean: 0,
|
||||
simpleKeep: 5,
|
||||
staggeredMaxAge: 365,
|
||||
staggeredCleanInterval: 3600,
|
||||
staggeredVersionsPath: "",
|
||||
externalCommand: "",
|
||||
autoNormalize: true
|
||||
};
|
||||
$scope.currentFolder.selectedDevices[device] = true;
|
||||
|
||||
$scope.currentFolder.rescanIntervalS = 60;
|
||||
$scope.currentFolder.fileVersioningSelector = "none";
|
||||
$scope.currentFolder.trashcanClean = 0;
|
||||
$scope.currentFolder.simpleKeep = 5;
|
||||
$scope.currentFolder.staggeredMaxAge = 365;
|
||||
$scope.currentFolder.staggeredCleanInterval = 3600;
|
||||
$scope.currentFolder.staggeredVersionsPath = "";
|
||||
$scope.currentFolder.externalCommand = "";
|
||||
$scope.currentFolder.autoNormalize = true;
|
||||
$scope.editingExisting = false;
|
||||
$scope.folderEditor.$setPristine();
|
||||
$('#editFolder').modal();
|
||||
@@ -1159,12 +1175,12 @@ angular.module('syncthing.core')
|
||||
});
|
||||
names.sort();
|
||||
return names.join(", ");
|
||||
}
|
||||
};
|
||||
|
||||
$scope.deviceFolders = function (deviceCfg) {
|
||||
var folders = [];
|
||||
for (var folderID in $scope.folders) {
|
||||
var devices = $scope.folders[folderID].devices
|
||||
var devices = $scope.folders[folderID].devices;
|
||||
for (var i = 0; i < devices.length; i++) {
|
||||
if (devices[i].deviceID == deviceCfg.deviceID) {
|
||||
folders.push(folderID);
|
||||
|
||||
85
gui/scripts/syncthing/core/services/events.js
Normal file
85
gui/scripts/syncthing/core/services/events.js
Normal file
@@ -0,0 +1,85 @@
|
||||
var debugEvents = !true;
|
||||
|
||||
angular.module('syncthing.core')
|
||||
.service('Events', ['$http', '$rootScope', '$timeout', function ($http, $rootScope, $timeout) {
|
||||
'use strict';
|
||||
|
||||
var lastID = 0;
|
||||
var self = this;
|
||||
|
||||
function successFn (data) {
|
||||
// When Syncthing restarts while the long polling connection is in
|
||||
// progress the browser on some platforms returns a 200 (since the
|
||||
// headers has been flushed with the return code 200), with no data.
|
||||
// This basically means that the connection has been reset, and the call
|
||||
// was not actually successful.
|
||||
if (!data) {
|
||||
errorFn(data);
|
||||
return;
|
||||
}
|
||||
$rootScope.$broadcast(self.ONLINE);
|
||||
|
||||
if (lastID > 0) { // not emit events from first response
|
||||
data.forEach(function (event) {
|
||||
if (debugEvents) {
|
||||
console.log("event", event.id, event.type, event.data);
|
||||
}
|
||||
$rootScope.$broadcast(event.type, event);
|
||||
});
|
||||
}
|
||||
|
||||
var lastEvent = data.pop();
|
||||
if (lastEvent) {
|
||||
lastID = lastEvent.id;
|
||||
}
|
||||
|
||||
$timeout(function () {
|
||||
$http.get(urlbase + '/events?since=' + lastID)
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
}, 500, false);
|
||||
}
|
||||
|
||||
function errorFn (dummy) {
|
||||
$rootScope.$broadcast(self.OFFLINE);
|
||||
|
||||
$timeout(function () {
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
}, 1000, false);
|
||||
}
|
||||
|
||||
angular.extend(self, {
|
||||
// emitted by this
|
||||
|
||||
ONLINE: 'UIOnline',
|
||||
OFFLINE: 'UIOffline',
|
||||
|
||||
// emitted by syncthing process
|
||||
|
||||
CONFIG_SAVED: 'ConfigSaved', // Emitted after the config has been saved by the user or by Syncthing itself
|
||||
DEVICE_CONNECTED: 'DeviceConnected', // Generated each time a connection to a device has been established
|
||||
DEVICE_DISCONNECTED: 'DeviceDisconnected', // Generated each time a connection to a device has been terminated
|
||||
DEVICE_DISCOVERED: 'DeviceDiscovered', // Emitted when a new device is discovered using local discovery
|
||||
DEVICE_REJECTED: 'DeviceRejected', // Emitted when there is a connection from a device we are not configured to talk to
|
||||
DOWNLOAD_PROGRESS: 'DownloadProgress', // Emitted during file downloads for each folder for each file
|
||||
FOLDER_COMPLETION: 'FolderCompletion', //Emitted when the local or remote contents for a folder changes
|
||||
FOLDER_REJECTED: 'FolderRejected', // Emitted when a device sends index information for a folder we do not have, or have but do not share with the device in question
|
||||
FOLDER_SUMMARY: 'FolderSummary', // Emitted when folder contents have changed locally
|
||||
ITEM_FINISHED: 'ItemFinished', // Generated when Syncthing ends synchronizing a file to a newer version
|
||||
ITEM_STARTED: 'ItemStarted', // Generated when Syncthing begins synchronizing a file to a newer version
|
||||
LOCAL_INDEX_UPDATED: 'LocalIndexUpdated', // Generated when the local index information has changed, due to synchronizing one or more items from the cluster or discovering local changes during a scan
|
||||
PING: 'Ping', // Generated automatically every 60 seconds
|
||||
REMOTE_INDEX_UPDATED: 'RemoteIndexUpdated', // Generated each time new index information is received from a device
|
||||
STARTING: 'Starting', // Emitted exactly once, when Syncthing starts, before parsing configuration etc
|
||||
STARTUP_COMPLETED: 'StartupCompleted', // Emitted exactly once, when initialization is complete and Syncthing is ready to start exchanging data with other devices
|
||||
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
|
||||
|
||||
start: function() {
|
||||
$http.get(urlbase + '/events?limit=1')
|
||||
.success(successFn)
|
||||
.error(errorFn);
|
||||
}
|
||||
});
|
||||
}]);
|
||||
File diff suppressed because one or more lines are too long
@@ -9,7 +9,6 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
const (
|
||||
OldestHandledVersion = 5
|
||||
CurrentVersion = 10
|
||||
MaxRescanIntervalS = 365 * 24 * 60 * 60
|
||||
)
|
||||
|
||||
type Configuration struct {
|
||||
@@ -237,6 +237,8 @@ type OptionsConfiguration struct {
|
||||
SymlinksEnabled bool `xml:"symlinksEnabled" json:"symlinksEnabled" default:"true"`
|
||||
LimitBandwidthInLan bool `xml:"limitBandwidthInLan" json:"limitBandwidthInLan" default:"false"`
|
||||
DatabaseBlockCacheMiB int `xml:"databaseBlockCacheMiB" json:"databaseBlockCacheMiB" default:"0"`
|
||||
PingTimeoutS int `xml:"pingTimeoutS" json:"pingTimeoutS" default:"30"`
|
||||
PingIdleTimeS int `xml:"pingIdleTimeS" json:"pingIdleTimeS" default:"60"`
|
||||
}
|
||||
|
||||
func (orig OptionsConfiguration) Copy() OptionsConfiguration {
|
||||
@@ -309,7 +311,6 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
|
||||
// Check for missing, bad or duplicate folder ID:s
|
||||
var seenFolders = map[string]*FolderConfiguration{}
|
||||
var uniqueCounter int
|
||||
for i := range cfg.Folders {
|
||||
folder := &cfg.Folders[i]
|
||||
|
||||
@@ -330,17 +331,16 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
folder.ID = "default"
|
||||
}
|
||||
|
||||
if folder.RescanIntervalS > MaxRescanIntervalS {
|
||||
folder.RescanIntervalS = MaxRescanIntervalS
|
||||
} else if folder.RescanIntervalS < 0 {
|
||||
folder.RescanIntervalS = 0
|
||||
}
|
||||
|
||||
if seen, ok := seenFolders[folder.ID]; ok {
|
||||
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
|
||||
|
||||
seen.Invalid = "duplicate folder ID"
|
||||
if seen.ID == folder.ID {
|
||||
uniqueCounter++
|
||||
seen.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
|
||||
}
|
||||
folder.Invalid = "duplicate folder ID"
|
||||
uniqueCounter++
|
||||
folder.ID = fmt.Sprintf("%s~%d", folder.ID, uniqueCounter)
|
||||
} else {
|
||||
seenFolders[folder.ID] = folder
|
||||
}
|
||||
@@ -580,7 +580,7 @@ func fillNilSlices(data interface{}) error {
|
||||
func uniqueStrings(ss []string) []string {
|
||||
var m = make(map[string]bool, len(ss))
|
||||
for _, s := range ss {
|
||||
m[s] = true
|
||||
m[strings.Trim(s, " ")] = true
|
||||
}
|
||||
|
||||
var us = make([]string, 0, len(m))
|
||||
|
||||
@@ -53,6 +53,8 @@ func TestDefaultValues(t *testing.T) {
|
||||
SymlinksEnabled: true,
|
||||
LimitBandwidthInLan: false,
|
||||
DatabaseBlockCacheMiB: 0,
|
||||
PingTimeoutS: 30,
|
||||
PingIdleTimeS: 60,
|
||||
}
|
||||
|
||||
cfg := New(device1)
|
||||
@@ -160,6 +162,8 @@ func TestOverriddenValues(t *testing.T) {
|
||||
SymlinksEnabled: false,
|
||||
LimitBandwidthInLan: true,
|
||||
DatabaseBlockCacheMiB: 42,
|
||||
PingTimeoutS: 60,
|
||||
PingIdleTimeS: 120,
|
||||
}
|
||||
|
||||
cfg, err := Load("testdata/overridenvalues.xml", device1)
|
||||
@@ -317,6 +321,29 @@ func TestIssue1262(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue1750(t *testing.T) {
|
||||
cfg, err := Load("testdata/issue-1750.xml", device4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cfg.Options().ListenAddress[0] != ":23000" {
|
||||
t.Errorf("%q != %q", cfg.Options().ListenAddress[0], ":23000")
|
||||
}
|
||||
|
||||
if cfg.Options().ListenAddress[1] != ":23001" {
|
||||
t.Errorf("%q != %q", cfg.Options().ListenAddress[1], ":23001")
|
||||
}
|
||||
|
||||
if cfg.Options().GlobalAnnServers[0] != "udp4://syncthing.nym.se:22026" {
|
||||
t.Errorf("%q != %q", cfg.Options().GlobalAnnServers[0], "udp4://syncthing.nym.se:22026")
|
||||
}
|
||||
|
||||
if cfg.Options().GlobalAnnServers[1] != "udp4://syncthing.nym.se:22027" {
|
||||
t.Errorf("%q != %q", cfg.Options().GlobalAnnServers[1], "udp4://syncthing.nym.se:22027")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWindowsPaths(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("Not useful on non-Windows")
|
||||
@@ -578,3 +605,17 @@ func TestPullOrder(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLargeRescanInterval(t *testing.T) {
|
||||
wrapper, err := Load("testdata/largeinterval.xml", device1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if wrapper.Folders()["l1"].RescanIntervalS != MaxRescanIntervalS {
|
||||
t.Error("too large rescan interval should be maxed out")
|
||||
}
|
||||
if wrapper.Folders()["l2"].RescanIntervalS != 0 {
|
||||
t.Error("negative rescan interval should become zero")
|
||||
}
|
||||
}
|
||||
|
||||
8
internal/config/testdata/issue-1750.xml
vendored
Normal file
8
internal/config/testdata/issue-1750.xml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
<configuration version="9">
|
||||
<options>
|
||||
<listenAddress> :23000</listenAddress>
|
||||
<listenAddress> :23001 </listenAddress>
|
||||
<globalAnnounceServer> udp4://syncthing.nym.se:22026</globalAnnounceServer>
|
||||
<globalAnnounceServer> udp4://syncthing.nym.se:22027 </globalAnnounceServer>
|
||||
</options>
|
||||
</configuration>
|
||||
4
internal/config/testdata/largeinterval.xml
vendored
Normal file
4
internal/config/testdata/largeinterval.xml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
<configuration version="10">
|
||||
<folder id="l1" path="~/Sync" rescanIntervalS="60000000000"></folder>
|
||||
<folder id="l2" path="~/Sync" rescanIntervalS="-1"></folder>
|
||||
</configuration>
|
||||
2
internal/config/testdata/overridenvalues.xml
vendored
2
internal/config/testdata/overridenvalues.xml
vendored
@@ -24,5 +24,7 @@
|
||||
<symlinksEnabled>false</symlinksEnabled>
|
||||
<limitBandwidthInLan>true</limitBandwidthInLan>
|
||||
<databaseBlockCacheMiB>42</databaseBlockCacheMiB>
|
||||
<pingTimeoutS>60</pingTimeoutS>
|
||||
<pingIdleTimeS>120</pingIdleTimeS>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -129,6 +129,28 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
|
||||
return valBs, true
|
||||
}
|
||||
|
||||
// PutBool stores a new boolean. Any existing value (even if of another type)
|
||||
// is overwritten.
|
||||
func (n *NamespacedKV) PutBool(key string, val bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
if val {
|
||||
n.db.Put(keyBs, []byte{0x0}, nil)
|
||||
} else {
|
||||
n.db.Put(keyBs, []byte{0x1}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Bool returns the stored value as a boolean and a boolean that
|
||||
// is false if no value was stored at the key.
|
||||
func (n NamespacedKV) Bool(key string) (bool, bool) {
|
||||
keyBs := append(n.prefix, []byte(key)...)
|
||||
valBs, err := n.db.Get(keyBs, nil)
|
||||
if err != nil {
|
||||
return false, false
|
||||
}
|
||||
return valBs[0] == 0x0, true
|
||||
}
|
||||
|
||||
// Delete deletes the specified key. It is allowed to delete a nonexistent
|
||||
// key.
|
||||
func (n NamespacedKV) Delete(key string) {
|
||||
|
||||
@@ -55,6 +55,7 @@ type service interface {
|
||||
BringToFront(string)
|
||||
DelayScan(d time.Duration)
|
||||
IndexUpdated() // Remote index was updated notification
|
||||
Scan(subs []string) error
|
||||
|
||||
setState(state folderState)
|
||||
setError(err error)
|
||||
@@ -91,15 +92,14 @@ type Model struct {
|
||||
deviceVer map[protocol.DeviceID]string
|
||||
pmut sync.RWMutex // protects protoConn and rawConn
|
||||
|
||||
addedFolder bool
|
||||
started bool
|
||||
started bool
|
||||
|
||||
reqValidationCache map[string]time.Time // folder / file name => time when confirmed to exist
|
||||
rvmut sync.RWMutex // protects reqValidationCache
|
||||
}
|
||||
|
||||
var (
|
||||
SymlinkWarning = stdsync.Once{}
|
||||
symlinkWarning = stdsync.Once{}
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
@@ -179,6 +179,7 @@ func (m *Model) StartFolderRW(folder string) {
|
||||
if !ok {
|
||||
l.Fatalf("Requested versioning type %q that does not exist", cfg.Versioning.Type)
|
||||
}
|
||||
|
||||
versioner := factory(folder, cfg.Path(), cfg.Versioning.Params)
|
||||
if service, ok := versioner.(suture.Service); ok {
|
||||
// The versioner implements the suture.Service interface, so
|
||||
@@ -189,7 +190,7 @@ func (m *Model) StartFolderRW(folder string) {
|
||||
p.versioner = versioner
|
||||
}
|
||||
|
||||
go p.Serve()
|
||||
m.Add(p)
|
||||
}
|
||||
|
||||
// StartFolderRO starts read only processing on the current model. When in
|
||||
@@ -512,7 +513,7 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
|
||||
}
|
||||
fs[i] = fs[len(fs)-1]
|
||||
fs = fs[:len(fs)-1]
|
||||
} else if symlinkInvalid(fs[i].IsSymlink()) {
|
||||
} else if symlinkInvalid(folder, fs[i]) {
|
||||
if debug {
|
||||
l.Debugln("dropping update for unsupported symlink", fs[i])
|
||||
}
|
||||
@@ -566,7 +567,7 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
}
|
||||
fs[i] = fs[len(fs)-1]
|
||||
fs = fs[:len(fs)-1]
|
||||
} else if symlinkInvalid(fs[i].IsSymlink()) {
|
||||
} else if symlinkInvalid(folder, fs[i]) {
|
||||
if debug {
|
||||
l.Debugln("dropping update for unsupported symlink", fs[i])
|
||||
}
|
||||
@@ -1025,8 +1026,8 @@ func (m *Model) folderStatRef(folder string) *stats.FolderStatisticsReference {
|
||||
return sr
|
||||
}
|
||||
|
||||
func (m *Model) receivedFile(folder, filename string) {
|
||||
m.folderStatRef(folder).ReceivedFile(filename)
|
||||
func (m *Model) receivedFile(folder string, file protocol.FileInfo) {
|
||||
m.folderStatRef(folder).ReceivedFile(file)
|
||||
}
|
||||
|
||||
func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) {
|
||||
@@ -1072,7 +1073,7 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
|
||||
maxLocalVer = f.LocalVersion
|
||||
}
|
||||
|
||||
if ignores.Match(f.Name) || symlinkInvalid(f.IsSymlink()) {
|
||||
if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
|
||||
if debug {
|
||||
l.Debugln("not sending update for ignored/unsupported symlink", f)
|
||||
}
|
||||
@@ -1123,8 +1124,10 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
|
||||
|
||||
func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
m.fmut.RLock()
|
||||
m.folderFiles[folder].Update(protocol.LocalDeviceID, fs)
|
||||
files := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
files.Update(protocol.LocalDeviceID, fs)
|
||||
|
||||
m.rvmut.Lock()
|
||||
for _, f := range fs {
|
||||
delete(m.reqValidationCache, folder+"/"+f.Name)
|
||||
@@ -1132,8 +1135,9 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
m.rvmut.Unlock()
|
||||
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"items": len(fs),
|
||||
"folder": folder,
|
||||
"items": len(fs),
|
||||
"version": files.LocalVersion(protocol.LocalDeviceID),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1175,7 +1179,6 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
|
||||
_ = ignores.Load(filepath.Join(cfg.Path(), ".stignore")) // Ignore error, there might not be an .stignore
|
||||
m.folderIgnores[cfg.ID] = ignores
|
||||
|
||||
m.addedFolder = true
|
||||
m.fmut.Unlock()
|
||||
}
|
||||
|
||||
@@ -1222,6 +1225,21 @@ func (m *Model) ScanFolder(folder string) error {
|
||||
}
|
||||
|
||||
func (m *Model) ScanFolderSubs(folder string, subs []string) error {
|
||||
m.fmut.Lock()
|
||||
runner, ok := m.folderRunners[folder]
|
||||
m.fmut.Unlock()
|
||||
|
||||
// Folders are added to folderRunners only when they are started. We can't
|
||||
// scan them before they have started, so that's what we need to check for
|
||||
// here.
|
||||
if !ok {
|
||||
return errors.New("no such folder")
|
||||
}
|
||||
|
||||
return runner.Scan(subs)
|
||||
}
|
||||
|
||||
func (m *Model) internalScanFolderSubs(folder string, subs []string) error {
|
||||
for i, sub := range subs {
|
||||
sub = osutil.NativeFilename(sub)
|
||||
if p := filepath.Clean(filepath.Join(folder, sub)); !strings.HasPrefix(p, folder) {
|
||||
@@ -1360,7 +1378,7 @@ nextSub:
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
if ignores.Match(f.Name) || symlinkInvalid(f.IsSymlink()) {
|
||||
if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
|
||||
// File has been ignored or an unsupported symlink. Set invalid bit.
|
||||
if debug {
|
||||
l.Debugln("setting invalid bit on ignored", f)
|
||||
@@ -1508,7 +1526,7 @@ func (m *Model) Override(folder string) {
|
||||
fs.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
need := fi.(protocol.FileInfo)
|
||||
if len(batch) == indexBatchSize {
|
||||
fs.Update(protocol.LocalDeviceID, batch)
|
||||
m.updateLocals(folder, batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
@@ -1528,7 +1546,7 @@ func (m *Model) Override(folder string) {
|
||||
return true
|
||||
})
|
||||
if len(batch) > 0 {
|
||||
fs.Update(protocol.LocalDeviceID, batch)
|
||||
m.updateLocals(folder, batch)
|
||||
}
|
||||
runner.setState(FolderIdle)
|
||||
}
|
||||
@@ -1536,23 +1554,23 @@ func (m *Model) Override(folder string) {
|
||||
// CurrentLocalVersion returns the change version for the given folder.
|
||||
// This is guaranteed to increment if the contents of the local folder has
|
||||
// changed.
|
||||
func (m *Model) CurrentLocalVersion(folder string) int64 {
|
||||
func (m *Model) CurrentLocalVersion(folder string) (int64, bool) {
|
||||
m.fmut.RLock()
|
||||
fs, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
// The folder might not exist, since this can be called with a user
|
||||
// specified folder name from the REST interface.
|
||||
return 0
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return fs.LocalVersion(protocol.LocalDeviceID)
|
||||
return fs.LocalVersion(protocol.LocalDeviceID), true
|
||||
}
|
||||
|
||||
// RemoteLocalVersion returns the change version for the given folder, as
|
||||
// sent by remote peers. This is guaranteed to increment if the contents of
|
||||
// the remote or global folder has changed.
|
||||
func (m *Model) RemoteLocalVersion(folder string) int64 {
|
||||
func (m *Model) RemoteLocalVersion(folder string) (int64, bool) {
|
||||
m.fmut.RLock()
|
||||
defer m.fmut.RUnlock()
|
||||
|
||||
@@ -1560,7 +1578,7 @@ func (m *Model) RemoteLocalVersion(folder string) int64 {
|
||||
if !ok {
|
||||
// The folder might not exist, since this can be called with a user
|
||||
// specified folder name from the REST interface.
|
||||
return 0
|
||||
return 0, false
|
||||
}
|
||||
|
||||
var ver int64
|
||||
@@ -1568,7 +1586,7 @@ func (m *Model) RemoteLocalVersion(folder string) int64 {
|
||||
ver += fs.LocalVersion(n)
|
||||
}
|
||||
|
||||
return ver
|
||||
return ver, true
|
||||
}
|
||||
|
||||
func (m *Model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{} {
|
||||
@@ -1677,7 +1695,7 @@ func (m *Model) CheckFolderHealth(id string) error {
|
||||
}
|
||||
|
||||
fi, err := os.Stat(folder.Path())
|
||||
if m.CurrentLocalVersion(id) > 0 {
|
||||
if v, ok := m.CurrentLocalVersion(id); ok && v > 0 {
|
||||
// Safety check. If the cached index contains files but the
|
||||
// folder doesn't exist, we have a problem. We would assume
|
||||
// that all files have been deleted which might not be the case,
|
||||
@@ -1728,15 +1746,9 @@ func (m *Model) CheckFolderHealth(id string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Model) ResetFolder(folder string) error {
|
||||
for _, f := range db.ListFolders(m.db) {
|
||||
if f == folder {
|
||||
l.Infof("Cleaning data for folder %q", folder)
|
||||
db.DropFolder(m.db, folder)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Unknown folder %q", folder)
|
||||
func (m *Model) ResetFolder(folder string) {
|
||||
l.Infof("Cleaning data for folder %q", folder)
|
||||
db.DropFolder(m.db, folder)
|
||||
}
|
||||
|
||||
func (m *Model) String() string {
|
||||
@@ -1774,11 +1786,21 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func symlinkInvalid(isLink bool) bool {
|
||||
if !symlinks.Supported && isLink {
|
||||
SymlinkWarning.Do(func() {
|
||||
func symlinkInvalid(folder string, fi db.FileIntf) bool {
|
||||
if !symlinks.Supported && fi.IsSymlink() && !fi.IsInvalid() && !fi.IsDeleted() {
|
||||
symlinkWarning.Do(func() {
|
||||
l.Warnln("Symlinks are disabled, unsupported or require Administrator privileges. This might cause your folder to appear out of sync.")
|
||||
})
|
||||
|
||||
// Need to type switch for the concrete type to be able to access fields...
|
||||
var name string
|
||||
switch fi := fi.(type) {
|
||||
case protocol.FileInfo:
|
||||
name = fi.Name
|
||||
case db.FileInfoTruncated:
|
||||
name = fi.Name
|
||||
}
|
||||
l.Infoln("Unsupported symlink", name, "in folder", folder)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -97,6 +97,7 @@ func TestRequest(t *testing.T) {
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolderRO("default")
|
||||
m.ScanFolder("default")
|
||||
m.ServeBackground()
|
||||
|
||||
// Existing, shared file
|
||||
bs, err := m.Request(device1, "default", "foo", 0, 6, nil, 0, nil)
|
||||
@@ -189,6 +190,7 @@ func benchmarkIndex(b *testing.B, nfiles int) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolderRO("default")
|
||||
m.ServeBackground()
|
||||
|
||||
files := genFiles(nfiles)
|
||||
m.Index(device1, "default", files, 0, nil)
|
||||
@@ -217,6 +219,7 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolderRO("default")
|
||||
m.ServeBackground()
|
||||
|
||||
files := genFiles(nfiles)
|
||||
ufiles := genFiles(nufiles)
|
||||
@@ -277,6 +280,7 @@ func BenchmarkRequest(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
m.ScanFolder("default")
|
||||
|
||||
const n = 1000
|
||||
@@ -327,6 +331,7 @@ func TestDeviceRename(t *testing.T) {
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.ServeBackground()
|
||||
if cfg.Devices()[device1].Name != "" {
|
||||
t.Errorf("Device already has a name")
|
||||
}
|
||||
@@ -396,6 +401,7 @@ func TestClusterConfig(t *testing.T) {
|
||||
m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(cfg.Folders[0])
|
||||
m.AddFolder(cfg.Folders[1])
|
||||
m.ServeBackground()
|
||||
|
||||
cm := m.clusterConfig(device2)
|
||||
|
||||
@@ -466,6 +472,7 @@ func TestIgnores(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolderRO("default")
|
||||
m.ServeBackground()
|
||||
|
||||
expected := []string{
|
||||
".*",
|
||||
@@ -539,6 +546,7 @@ func TestRefuseUnknownBits(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
|
||||
m.ScanFolder("default")
|
||||
m.Index(device1, "default", []protocol.FileInfo{
|
||||
@@ -596,9 +604,9 @@ func TestROScanRecovery(t *testing.T) {
|
||||
os.RemoveAll(fcfg.RawPath)
|
||||
|
||||
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolderRO("default")
|
||||
m.ServeBackground()
|
||||
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
@@ -680,9 +688,9 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
os.RemoveAll(fcfg.RawPath)
|
||||
|
||||
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolderRW("default")
|
||||
m.ServeBackground()
|
||||
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
@@ -744,6 +752,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
|
||||
b := func(isfile bool, path ...string) protocol.FileInfo {
|
||||
flags := uint32(protocol.FlagDirectory)
|
||||
@@ -993,6 +1002,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
|
||||
b := func(isfile bool, path ...string) protocol.FileInfo {
|
||||
flags := uint32(protocol.FlagDirectory)
|
||||
@@ -1166,6 +1176,8 @@ func benchmarkTree(b *testing.B, n1, n2 int) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
|
||||
m.ScanFolder("default")
|
||||
files := genDeepFiles(n1, n2)
|
||||
|
||||
|
||||
@@ -22,9 +22,15 @@ type roFolder struct {
|
||||
timer *time.Timer
|
||||
model *Model
|
||||
stop chan struct{}
|
||||
scanNow chan rescanRequest
|
||||
delayScan chan time.Duration
|
||||
}
|
||||
|
||||
type rescanRequest struct {
|
||||
subs []string
|
||||
err chan error
|
||||
}
|
||||
|
||||
func newROFolder(model *Model, folder string, interval time.Duration) *roFolder {
|
||||
return &roFolder{
|
||||
stateTracker: stateTracker{
|
||||
@@ -36,6 +42,7 @@ func newROFolder(model *Model, folder string, interval time.Duration) *roFolder
|
||||
timer: time.NewTimer(time.Millisecond),
|
||||
model: model,
|
||||
stop: make(chan struct{}),
|
||||
scanNow: make(chan rescanRequest),
|
||||
delayScan: make(chan time.Duration),
|
||||
}
|
||||
}
|
||||
@@ -76,7 +83,7 @@ func (s *roFolder) Serve() {
|
||||
l.Debugln(s, "rescan")
|
||||
}
|
||||
|
||||
if err := s.model.ScanFolder(s.folder); err != nil {
|
||||
if err := s.model.internalScanFolderSubs(s.folder, nil); err != nil {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
@@ -92,11 +99,34 @@ func (s *roFolder) Serve() {
|
||||
}
|
||||
|
||||
if s.intv == 0 {
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
reschedule()
|
||||
|
||||
case req := <-s.scanNow:
|
||||
if err := s.model.CheckFolderHealth(s.folder); err != nil {
|
||||
l.Infoln("Skipping folder", s.folder, "scan due to folder error:", err)
|
||||
req.err <- err
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln(s, "forced rescan")
|
||||
}
|
||||
|
||||
if err := s.model.internalScanFolderSubs(s.folder, req.subs); err != nil {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by setError.
|
||||
s.setError(err)
|
||||
req.err <- err
|
||||
continue
|
||||
}
|
||||
|
||||
req.err <- nil
|
||||
|
||||
case next := <-s.delayScan:
|
||||
s.timer.Reset(next)
|
||||
}
|
||||
@@ -110,6 +140,15 @@ func (s *roFolder) Stop() {
|
||||
func (s *roFolder) IndexUpdated() {
|
||||
}
|
||||
|
||||
func (s *roFolder) Scan(subs []string) error {
|
||||
req := rescanRequest{
|
||||
subs: subs,
|
||||
err: make(chan error),
|
||||
}
|
||||
s.scanNow <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (s *roFolder) String() string {
|
||||
return fmt.Sprintf("roFolder/%s@%p", s.folder, s)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
const (
|
||||
pauseIntv = 60 * time.Second
|
||||
nextPullIntv = 10 * time.Second
|
||||
shortPullIntv = 5 * time.Second
|
||||
shortPullIntv = time.Second
|
||||
)
|
||||
|
||||
// A pullBlockState is passed to the puller routine for each block that needs
|
||||
@@ -54,6 +54,19 @@ var (
|
||||
errNoDevice = errors.New("no available source device")
|
||||
)
|
||||
|
||||
const (
|
||||
dbUpdateHandleDir = iota
|
||||
dbUpdateDeleteDir
|
||||
dbUpdateHandleFile
|
||||
dbUpdateDeleteFile
|
||||
dbUpdateShortcutFile
|
||||
)
|
||||
|
||||
type dbUpdateJob struct {
|
||||
file protocol.FileInfo
|
||||
jobType int
|
||||
}
|
||||
|
||||
type rwFolder struct {
|
||||
stateTracker
|
||||
|
||||
@@ -73,10 +86,11 @@ type rwFolder struct {
|
||||
|
||||
stop chan struct{}
|
||||
queue *jobQueue
|
||||
dbUpdates chan protocol.FileInfo
|
||||
dbUpdates chan dbUpdateJob
|
||||
scanTimer *time.Timer
|
||||
pullTimer *time.Timer
|
||||
delayScan chan time.Duration
|
||||
scanNow chan rescanRequest
|
||||
remoteIndex chan struct{} // An index update was received, we should re-evaluate needs
|
||||
}
|
||||
|
||||
@@ -105,6 +119,7 @@ func newRWFolder(m *Model, shortID uint64, cfg config.FolderConfiguration) *rwFo
|
||||
pullTimer: time.NewTimer(shortPullIntv),
|
||||
scanTimer: time.NewTimer(time.Millisecond), // The first scan should be done immediately.
|
||||
delayScan: make(chan time.Duration),
|
||||
scanNow: make(chan rescanRequest),
|
||||
remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.
|
||||
}
|
||||
}
|
||||
@@ -189,10 +204,10 @@ func (p *rwFolder) Serve() {
|
||||
}
|
||||
|
||||
// RemoteLocalVersion() is a fast call, doesn't touch the database.
|
||||
curVer := p.model.RemoteLocalVersion(p.folder)
|
||||
if curVer == prevVer {
|
||||
curVer, ok := p.model.RemoteLocalVersion(p.folder)
|
||||
if !ok || curVer == prevVer {
|
||||
if debug {
|
||||
l.Debugln(p, "skip (curVer == prevVer)", prevVer)
|
||||
l.Debugln(p, "skip (curVer == prevVer)", prevVer, ok)
|
||||
}
|
||||
p.pullTimer.Reset(nextPullIntv)
|
||||
continue
|
||||
@@ -216,7 +231,7 @@ func (p *rwFolder) Serve() {
|
||||
// sync. Remember the local version number and
|
||||
// schedule a resync a little bit into the future.
|
||||
|
||||
if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
|
||||
if lv, ok := p.model.RemoteLocalVersion(p.folder); ok && lv < curVer {
|
||||
// There's a corner case where the device we needed
|
||||
// files from disconnected during the puller
|
||||
// iteration. The files will have been removed from
|
||||
@@ -265,7 +280,7 @@ func (p *rwFolder) Serve() {
|
||||
l.Debugln(p, "rescan")
|
||||
}
|
||||
|
||||
if err := p.model.ScanFolder(p.folder); err != nil {
|
||||
if err := p.model.internalScanFolderSubs(p.folder, nil); err != nil {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
@@ -283,6 +298,29 @@ func (p *rwFolder) Serve() {
|
||||
initialScanCompleted = true
|
||||
}
|
||||
|
||||
case req := <-p.scanNow:
|
||||
if err := p.model.CheckFolderHealth(p.folder); err != nil {
|
||||
l.Infoln("Skipping folder", p.folder, "scan due to folder error:", err)
|
||||
req.err <- err
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln(p, "forced rescan")
|
||||
}
|
||||
|
||||
if err := p.model.internalScanFolderSubs(p.folder, req.subs); err != nil {
|
||||
// Potentially sets the error twice, once in the scanner just
|
||||
// by doing a check, and once here, if the error returned is
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by setError.
|
||||
p.setError(err)
|
||||
req.err <- err
|
||||
continue
|
||||
}
|
||||
|
||||
req.err <- nil
|
||||
|
||||
case next := <-p.delayScan:
|
||||
p.scanTimer.Reset(next)
|
||||
}
|
||||
@@ -304,6 +342,15 @@ func (p *rwFolder) IndexUpdated() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *rwFolder) Scan(subs []string) error {
|
||||
req := rescanRequest{
|
||||
subs: subs,
|
||||
err: make(chan error),
|
||||
}
|
||||
p.scanNow <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (p *rwFolder) String() string {
|
||||
return fmt.Sprintf("rwFolder/%s@%p", p.folder, p)
|
||||
}
|
||||
@@ -326,7 +373,7 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
|
||||
l.Debugln(p, "c", p.copiers, "p", p.pullers)
|
||||
}
|
||||
|
||||
p.dbUpdates = make(chan protocol.FileInfo)
|
||||
p.dbUpdates = make(chan dbUpdateJob)
|
||||
updateWg.Add(1)
|
||||
go func() {
|
||||
// dbUpdaterRoutine finishes when p.dbUpdates is closed
|
||||
@@ -528,7 +575,7 @@ nextFile:
|
||||
// handleDir creates or updates the given directory
|
||||
func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "dir",
|
||||
@@ -583,7 +630,7 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
}
|
||||
|
||||
if err = osutil.InWritableDir(mkdir, realName); err == nil {
|
||||
p.dbUpdates <- file
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
|
||||
} else {
|
||||
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
||||
}
|
||||
@@ -600,9 +647,9 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
// It's OK to change mode bits on stuff within non-writable directories.
|
||||
|
||||
if p.ignorePermissions(file) {
|
||||
p.dbUpdates <- file
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
|
||||
} else if err := os.Chmod(realName, mode); err == nil {
|
||||
p.dbUpdates <- file
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
|
||||
} else {
|
||||
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
||||
}
|
||||
@@ -611,7 +658,7 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
// deleteDir attempts to delete the given directory
|
||||
func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "dir",
|
||||
@@ -642,13 +689,13 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
// It was removed or it doesn't exist to start with
|
||||
p.dbUpdates <- file
|
||||
} else if _, err := os.Lstat(realName); err != nil && !os.IsPermission(err) {
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
|
||||
} else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) {
|
||||
// We get an error just looking at the directory, and it's not a
|
||||
// permission problem. Lets assume the error is in fact some variant
|
||||
// of "file does not exist" (possibly expressed as some parent being a
|
||||
// file and not a directory etc) and that the delete is handled.
|
||||
p.dbUpdates <- file
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
|
||||
} else {
|
||||
l.Infof("Puller (folder %q, dir %q): delete: %v", p.folder, file.Name, err)
|
||||
}
|
||||
@@ -657,7 +704,7 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
|
||||
// deleteFile attempts to delete the given file
|
||||
func (p *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
@@ -690,13 +737,13 @@ func (p *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
// It was removed or it doesn't exist to start with
|
||||
p.dbUpdates <- file
|
||||
} else if _, err := os.Lstat(realName); err != nil && !os.IsPermission(err) {
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
|
||||
} else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) {
|
||||
// We get an error just looking at the file, and it's not a permission
|
||||
// problem. Lets assume the error is in fact some variant of "file
|
||||
// does not exist" (possibly expressed as some parent being a file and
|
||||
// not a directory etc) and that the delete is handled.
|
||||
p.dbUpdates <- file
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
|
||||
} else {
|
||||
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
|
||||
}
|
||||
@@ -706,13 +753,13 @@ func (p *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
// and set the right attributes on it.
|
||||
func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
var err error
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": source.Name,
|
||||
"type": "file",
|
||||
"action": "delete",
|
||||
})
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": target.Name,
|
||||
"type": "file",
|
||||
@@ -756,13 +803,15 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// of the source and the creation of the target. Fix-up the metadata,
|
||||
// and update the local index of the target file.
|
||||
|
||||
p.dbUpdates <- source
|
||||
p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile}
|
||||
|
||||
err = p.shortcutFile(target)
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, file %q): rename from %q metadata: %v", p.folder, target.Name, source.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.dbUpdates <- dbUpdateJob{target, dbUpdateHandleFile}
|
||||
} else {
|
||||
// We failed the rename so we have a source file that we still need to
|
||||
// get rid of. Attempt to delete it instead so that we make *some*
|
||||
@@ -774,7 +823,7 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
p.dbUpdates <- source
|
||||
p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -815,13 +864,6 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// handleFile queues the copies and pulls as necessary for a single new or
|
||||
// changed file.
|
||||
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
||||
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
|
||||
curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)
|
||||
|
||||
if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
|
||||
@@ -831,23 +873,47 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
if debug {
|
||||
l.Debugln(p, "taking shortcut on", file.Name)
|
||||
}
|
||||
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
"action": "metadata",
|
||||
})
|
||||
|
||||
p.queue.Done(file.Name)
|
||||
|
||||
var err error
|
||||
if file.IsSymlink() {
|
||||
err = p.shortcutSymlink(file)
|
||||
} else {
|
||||
err = p.shortcutFile(file)
|
||||
}
|
||||
|
||||
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"error": events.Error(err),
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
"action": "metadata",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
l.Infoln("Puller: shortcut:", err)
|
||||
} else {
|
||||
p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
"type": "file",
|
||||
"action": "update",
|
||||
})
|
||||
|
||||
scanner.PopulateOffsets(file.Blocks)
|
||||
|
||||
// Figure out the absolute filenames we need once and for all
|
||||
@@ -944,16 +1010,13 @@ func (p *rwFolder) shortcutFile(file protocol.FileInfo) error {
|
||||
file.Version = file.Version.Merge(cur.Version)
|
||||
}
|
||||
|
||||
p.dbUpdates <- file
|
||||
return nil
|
||||
}
|
||||
|
||||
// shortcutSymlink changes the symlinks type if necessary.
|
||||
func (p *rwFolder) shortcutSymlink(file protocol.FileInfo) (err error) {
|
||||
err = symlinks.ChangeType(filepath.Join(p.dir, file.Name), file.Flags)
|
||||
if err == nil {
|
||||
p.dbUpdates <- file
|
||||
} else {
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, file %q): symlink shortcut: %v", p.folder, file.Name, err)
|
||||
}
|
||||
return
|
||||
@@ -1173,7 +1236,7 @@ func (p *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
}
|
||||
|
||||
// Record the updated file in the index
|
||||
p.dbUpdates <- state.file
|
||||
p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1229,39 +1292,63 @@ func (p *rwFolder) dbUpdaterRoutine() {
|
||||
maxBatchTime = 2 * time.Second
|
||||
)
|
||||
|
||||
batch := make([]protocol.FileInfo, 0, maxBatchSize)
|
||||
batch := make([]dbUpdateJob, 0, maxBatchSize)
|
||||
files := make([]protocol.FileInfo, 0, maxBatchSize)
|
||||
tick := time.NewTicker(maxBatchTime)
|
||||
defer tick.Stop()
|
||||
|
||||
handleBatch := func() {
|
||||
found := false
|
||||
var lastFile protocol.FileInfo
|
||||
|
||||
for _, job := range batch {
|
||||
files = append(files, job.file)
|
||||
if job.file.IsInvalid() || (job.file.IsDirectory() && !job.file.IsSymlink()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if job.jobType&(dbUpdateHandleFile|dbUpdateDeleteFile) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
found = true
|
||||
lastFile = job.file
|
||||
}
|
||||
|
||||
p.model.updateLocals(p.folder, files)
|
||||
|
||||
if found {
|
||||
p.model.receivedFile(p.folder, lastFile)
|
||||
}
|
||||
|
||||
batch = batch[:0]
|
||||
files = files[:0]
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case file, ok := <-p.dbUpdates:
|
||||
case job, ok := <-p.dbUpdates:
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
|
||||
file.LocalVersion = 0
|
||||
batch = append(batch, file)
|
||||
job.file.LocalVersion = 0
|
||||
batch = append(batch, job)
|
||||
|
||||
if len(batch) == maxBatchSize {
|
||||
p.model.updateLocals(p.folder, batch)
|
||||
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
|
||||
batch = batch[:0]
|
||||
handleBatch()
|
||||
}
|
||||
|
||||
case <-tick.C:
|
||||
if len(batch) > 0 {
|
||||
p.model.updateLocals(p.folder, batch)
|
||||
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
|
||||
batch = batch[:0]
|
||||
handleBatch()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
p.model.updateLocals(p.folder, batch)
|
||||
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
|
||||
handleBatch()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Glob implements filepath.Glob, but works with Windows long path prefixes.
|
||||
// Deals with https://github.com/golang/go/issues/10577
|
||||
func Glob(pattern string) (matches []string, err error) {
|
||||
if !hasMeta(pattern) {
|
||||
|
||||
19
internal/rc/debug.go
Normal file
19
internal/rc/debug.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "rc") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
496
internal/rc/rc.go
Normal file
496
internal/rc/rc.go
Normal file
@@ -0,0 +1,496 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package rc provides remote control of a Syncthing process via the REST API.
|
||||
package rc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/sync"
|
||||
)
|
||||
|
||||
// We set the API key via the STGUIAPIKEY variable when we launch the binary,
|
||||
// to ensure that we have API access regardless of authentication settings.
|
||||
const APIKey = "592A47BC-A7DF-4C2F-89E0-A80B3E5094EE"
|
||||
|
||||
type Process struct {
|
||||
// Set at initialization
|
||||
addr string
|
||||
|
||||
// Set by eventLoop()
|
||||
eventMut sync.Mutex
|
||||
id protocol.DeviceID
|
||||
folders []string
|
||||
startComplete bool
|
||||
startCompleteCond *stdsync.Cond
|
||||
stop bool
|
||||
localVersion map[string]map[string]int64 // Folder ID => Device ID => LocalVersion
|
||||
done map[string]bool // Folder ID => 100%
|
||||
|
||||
cmd *exec.Cmd
|
||||
logfd *os.File
|
||||
}
|
||||
|
||||
// NewProcess returns a new Process talking to Syncthing at the specified address.
|
||||
// Example: NewProcess("127.0.0.1:8082")
|
||||
func NewProcess(addr string) *Process {
|
||||
p := &Process{
|
||||
addr: addr,
|
||||
localVersion: make(map[string]map[string]int64),
|
||||
done: make(map[string]bool),
|
||||
eventMut: sync.NewMutex(),
|
||||
}
|
||||
p.startCompleteCond = stdsync.NewCond(p.eventMut)
|
||||
return p
|
||||
}
|
||||
|
||||
// LogTo creates the specified log file and ensures that stdout and stderr
|
||||
// from the Start()ed process is redirected there. Must be called before
|
||||
// Start().
|
||||
func (p *Process) LogTo(filename string) error {
|
||||
if p.cmd != nil {
|
||||
panic("logfd cannot be set with an existing cmd")
|
||||
}
|
||||
|
||||
if p.logfd != nil {
|
||||
p.logfd.Close()
|
||||
}
|
||||
|
||||
fd, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.logfd = fd
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start runs the specified Syncthing binary with the given arguments.
|
||||
// Syncthing should be configured to provide an API on the address given to
|
||||
// NewProcess. Event processing is started.
|
||||
func (p *Process) Start(bin string, args ...string) error {
|
||||
cmd := exec.Command(bin, args...)
|
||||
if p.logfd != nil {
|
||||
cmd.Stdout = p.logfd
|
||||
cmd.Stderr = p.logfd
|
||||
}
|
||||
cmd.Env = append(os.Environ(), "STNORESTART=1", "STGUIAPIKEY="+APIKey)
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.cmd = cmd
|
||||
go p.eventLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AwaitStartup waits for the Syncthing process to start and perform initial
|
||||
// scans of all folders.
|
||||
func (p *Process) AwaitStartup() {
|
||||
p.eventMut.Lock()
|
||||
for !p.startComplete {
|
||||
p.startCompleteCond.Wait()
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Stop stops the running Syncthing process. If the process was logging to a
|
||||
// local file (set by LogTo), the log file will be opened and checked for
|
||||
// panics and data races. The presence of either will be signalled in the form
|
||||
// of a returned error.
|
||||
func (p *Process) Stop() (*os.ProcessState, error) {
|
||||
p.eventMut.Lock()
|
||||
if p.stop {
|
||||
p.eventMut.Unlock()
|
||||
return p.cmd.ProcessState, nil
|
||||
}
|
||||
p.stop = true
|
||||
p.eventMut.Unlock()
|
||||
|
||||
if err := p.cmd.Process.Signal(os.Kill); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cmd.Wait()
|
||||
|
||||
var err error
|
||||
if p.logfd != nil {
|
||||
err = p.checkForProblems(p.logfd)
|
||||
}
|
||||
|
||||
return p.cmd.ProcessState, err
|
||||
}
|
||||
|
||||
// Get performs an HTTP GET and returns the bytes and/or an error. Any non-200
|
||||
// return code is returned as an error.
|
||||
func (p *Process) Get(path string) ([]byte, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("http://%s%s", p.addr, path)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("X-API-Key", APIKey)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.readResponse(resp)
|
||||
}
|
||||
|
||||
// Post performs an HTTP POST and returns the bytes and/or an error. Any
|
||||
// non-200 return code is returned as an error.
|
||||
func (p *Process) Post(path string, data io.Reader) ([]byte, error) {
|
||||
client := &http.Client{
|
||||
Timeout: 600 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
url := fmt.Sprintf("http://%s%s", p.addr, path)
|
||||
req, err := http.NewRequest("POST", url, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("X-API-Key", APIKey)
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.readResponse(resp)
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
ID int
|
||||
Time time.Time
|
||||
Type string
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
func (p *Process) Events(since int) ([]Event, error) {
|
||||
bs, err := p.Get(fmt.Sprintf("/rest/events?since=%d", since))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var evs []Event
|
||||
dec := json.NewDecoder(bytes.NewReader(bs))
|
||||
dec.UseNumber()
|
||||
err = dec.Decode(&evs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Events: %s in %q", err, bs)
|
||||
}
|
||||
return evs, err
|
||||
}
|
||||
|
||||
func (p *Process) Rescan(folder string) error {
|
||||
_, err := p.Post("/rest/db/scan?folder="+folder, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Process) RescanDelay(folder string, delaySeconds int) error {
|
||||
_, err := p.Post(fmt.Sprintf("/rest/db/scan?folder=%s&next=%d", folder, delaySeconds), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func InSync(folder string, ps ...*Process) bool {
|
||||
for _, p := range ps {
|
||||
p.eventMut.Lock()
|
||||
}
|
||||
defer func() {
|
||||
for _, p := range ps {
|
||||
p.eventMut.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for i := range ps {
|
||||
// If our latest FolderSummary didn't report 100%, then we are not done.
|
||||
|
||||
if !ps[i].done[folder] {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check LocalVersion for each device. The local version seen by remote
|
||||
// devices should be the same as what it has locally, or the index
|
||||
// hasn't been sent yet.
|
||||
|
||||
sourceID := ps[i].id.String()
|
||||
sourceVersion := ps[i].localVersion[folder][sourceID]
|
||||
for j := range ps {
|
||||
if i != j {
|
||||
remoteVersion := ps[j].localVersion[folder][sourceID]
|
||||
if remoteVersion != sourceVersion {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func AwaitSync(folder string, ps ...*Process) {
|
||||
for {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
if InSync(folder, ps...) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
GlobalBytes int
|
||||
GlobalDeleted int
|
||||
GlobalFiles int
|
||||
InSyncBytes int
|
||||
InSyncFiles int
|
||||
Invalid string
|
||||
LocalBytes int
|
||||
LocalDeleted int
|
||||
LocalFiles int
|
||||
NeedBytes int
|
||||
NeedFiles int
|
||||
State string
|
||||
StateChanged time.Time
|
||||
Version int
|
||||
}
|
||||
|
||||
func (p *Process) Model(folder string) (Model, error) {
|
||||
bs, err := p.Get("/rest/db/status?folder=" + folder)
|
||||
if err != nil {
|
||||
return Model{}, err
|
||||
}
|
||||
|
||||
var res Model
|
||||
if err := json.Unmarshal(bs, &res); err != nil {
|
||||
return Model{}, err
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("%+v", res)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *Process) readResponse(resp *http.Response) ([]byte, error) {
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return bs, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return bs, fmt.Errorf("%s", resp.Status)
|
||||
}
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (p *Process) checkForProblems(logfd *os.File) error {
|
||||
fd, err := os.Open(logfd.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
raceConditionStart := []byte("WARNING: DATA RACE")
|
||||
raceConditionSep := []byte("==================")
|
||||
panicConditionStart := []byte("panic:")
|
||||
panicConditionSep := []byte(p.id.String()[:5])
|
||||
sc := bufio.NewScanner(fd)
|
||||
race := false
|
||||
_panic := false
|
||||
|
||||
for sc.Scan() {
|
||||
line := sc.Bytes()
|
||||
if race || _panic {
|
||||
if bytes.Contains(line, panicConditionSep) {
|
||||
_panic = false
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", line)
|
||||
if bytes.Contains(line, raceConditionSep) {
|
||||
race = false
|
||||
}
|
||||
} else if bytes.Contains(line, raceConditionStart) {
|
||||
fmt.Printf("%s\n", raceConditionSep)
|
||||
fmt.Printf("%s\n", raceConditionStart)
|
||||
race = true
|
||||
if err == nil {
|
||||
err = errors.New("Race condition detected")
|
||||
}
|
||||
} else if bytes.Contains(line, panicConditionStart) {
|
||||
_panic = true
|
||||
if err == nil {
|
||||
err = errors.New("Panic detected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Process) eventLoop() {
|
||||
since := 0
|
||||
notScanned := make(map[string]struct{})
|
||||
start := time.Now()
|
||||
for {
|
||||
p.eventMut.Lock()
|
||||
if p.stop {
|
||||
p.eventMut.Unlock()
|
||||
return
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
events, err := p.Events(since)
|
||||
if err != nil {
|
||||
if time.Since(start) < 5*time.Second {
|
||||
// The API has probably not started yet, lets give it some time.
|
||||
continue
|
||||
}
|
||||
|
||||
// If we're stopping, no need to print the error.
|
||||
p.eventMut.Lock()
|
||||
if p.stop {
|
||||
p.eventMut.Unlock()
|
||||
return
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
|
||||
log.Println("eventLoop: events:", err)
|
||||
continue
|
||||
}
|
||||
since = events[len(events)-1].ID
|
||||
|
||||
for _, ev := range events {
|
||||
switch ev.Type {
|
||||
case "Starting":
|
||||
// The Starting event tells us where the configuration is. Load
|
||||
// it and populate our list of folders.
|
||||
|
||||
data := ev.Data.(map[string]interface{})
|
||||
id, err := protocol.DeviceIDFromString(data["myID"].(string))
|
||||
if err != nil {
|
||||
log.Println("eventLoop: DeviceIdFromString:", err)
|
||||
continue
|
||||
}
|
||||
p.id = id
|
||||
|
||||
home := data["home"].(string)
|
||||
w, err := config.Load(filepath.Join(home, "config.xml"), protocol.LocalDeviceID)
|
||||
if err != nil {
|
||||
log.Println("eventLoop: Starting:", err)
|
||||
continue
|
||||
}
|
||||
for id := range w.Folders() {
|
||||
p.eventMut.Lock()
|
||||
p.folders = append(p.folders, id)
|
||||
p.eventMut.Unlock()
|
||||
notScanned[id] = struct{}{}
|
||||
}
|
||||
|
||||
case "StateChanged":
|
||||
// When a folder changes to idle, we tick it off by removing
|
||||
// it from p.notScanned.
|
||||
|
||||
if !p.startComplete {
|
||||
data := ev.Data.(map[string]interface{})
|
||||
to := data["to"].(string)
|
||||
if to == "idle" {
|
||||
folder := data["folder"].(string)
|
||||
delete(notScanned, folder)
|
||||
if len(notScanned) == 0 {
|
||||
p.eventMut.Lock()
|
||||
p.startComplete = true
|
||||
p.startCompleteCond.Broadcast()
|
||||
p.eventMut.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "LocalIndexUpdated":
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
version, _ := data["version"].(json.Number).Int64()
|
||||
p.eventMut.Lock()
|
||||
m := p.localVersion[folder]
|
||||
if m == nil {
|
||||
m = make(map[string]int64)
|
||||
}
|
||||
m[p.id.String()] = version
|
||||
p.localVersion[folder] = m
|
||||
p.done[folder] = false
|
||||
if debug {
|
||||
l.Debugf("LocalIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
|
||||
case "RemoteIndexUpdated":
|
||||
data := ev.Data.(map[string]interface{})
|
||||
device := data["device"].(string)
|
||||
folder := data["folder"].(string)
|
||||
version, _ := data["version"].(json.Number).Int64()
|
||||
p.eventMut.Lock()
|
||||
m := p.localVersion[folder]
|
||||
if m == nil {
|
||||
m = make(map[string]int64)
|
||||
}
|
||||
m[device] = version
|
||||
p.localVersion[folder] = m
|
||||
p.done[folder] = false
|
||||
if debug {
|
||||
l.Debugf("RemoteIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
|
||||
case "FolderSummary":
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
summary := data["summary"].(map[string]interface{})
|
||||
need, _ := summary["needBytes"].(json.Number).Int64()
|
||||
done := need == 0
|
||||
p.eventMut.Lock()
|
||||
p.done[folder] = done
|
||||
if debug {
|
||||
l.Debugf("Foldersummary %v %v\n\t%+v", p.id, folder, p.done)
|
||||
}
|
||||
p.eventMut.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,8 @@ package stats
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
@@ -25,6 +27,7 @@ type FolderStatisticsReference struct {
|
||||
type LastFile struct {
|
||||
At time.Time `json:"at"`
|
||||
Filename string `json:"filename"`
|
||||
Deleted bool `json:"deleted"`
|
||||
}
|
||||
|
||||
func NewFolderStatisticsReference(ldb *leveldb.DB, folder string) *FolderStatisticsReference {
|
||||
@@ -44,18 +47,21 @@ func (s *FolderStatisticsReference) GetLastFile() LastFile {
|
||||
if !ok {
|
||||
return LastFile{}
|
||||
}
|
||||
deleted, ok := s.ns.Bool("lastFileDeleted")
|
||||
return LastFile{
|
||||
At: at,
|
||||
Filename: file,
|
||||
Deleted: deleted,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FolderStatisticsReference) ReceivedFile(filename string) {
|
||||
func (s *FolderStatisticsReference) ReceivedFile(file protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, filename)
|
||||
l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, file)
|
||||
}
|
||||
s.ns.PutTime("lastFileAt", time.Now())
|
||||
s.ns.PutString("lastFileName", filename)
|
||||
s.ns.PutString("lastFileName", file.Name)
|
||||
s.ns.PutBool("lastFileDeleted", file.IsDeleted())
|
||||
}
|
||||
|
||||
func (s *FolderStatisticsReference) GetStatistics() FolderStatistics {
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
FSCTL_GET_REPARSE_POINT = 0x900a8
|
||||
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
|
||||
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
|
||||
IO_REPARSE_TAG_SYMLINK = 0xA000000C
|
||||
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
|
||||
Win32FsctlGetReparsePoint = 0x900a8
|
||||
Win32FileFlagOpenReparsePoint = 0x00200000
|
||||
Win32FileAttributeReparsePoint = 0x400
|
||||
Win32IOReparseTagSymlink = 0xA000000C
|
||||
Win32SymbolicLinkFlagDirectory = 0x1
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -106,7 +106,7 @@ func Read(path string) (string, uint32, error) {
|
||||
if err != nil {
|
||||
return "", protocol.FlagSymlinkMissingTarget, err
|
||||
}
|
||||
handle, err := syscall.CreateFile(ptr, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
handle, err := syscall.CreateFile(ptr, 0, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|Win32FileFlagOpenReparsePoint, 0)
|
||||
if err != nil || handle == syscall.InvalidHandle {
|
||||
return "", protocol.FlagSymlinkMissingTarget, err
|
||||
}
|
||||
@@ -114,12 +114,12 @@ func Read(path string) (string, uint32, error) {
|
||||
var ret uint16
|
||||
var data reparseData
|
||||
|
||||
r1, _, err := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), FSCTL_GET_REPARSE_POINT, 0, 0, uintptr(unsafe.Pointer(&data)), unsafe.Sizeof(data), uintptr(unsafe.Pointer(&ret)), 0, 0)
|
||||
r1, _, err := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), Win32FsctlGetReparsePoint, 0, 0, uintptr(unsafe.Pointer(&data)), unsafe.Sizeof(data), uintptr(unsafe.Pointer(&ret)), 0, 0)
|
||||
if r1 == 0 {
|
||||
return "", protocol.FlagSymlinkMissingTarget, err
|
||||
}
|
||||
|
||||
var flags uint32 = 0
|
||||
var flags uint32
|
||||
attr, err := syscall.GetFileAttributes(ptr)
|
||||
if err != nil {
|
||||
flags = protocol.FlagSymlinkMissingTarget
|
||||
@@ -154,10 +154,10 @@ func Create(source, target string, flags uint32) error {
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err == nil && stat.IsDir() {
|
||||
mode = SYMBOLIC_LINK_FLAG_DIRECTORY
|
||||
mode = Win32SymbolicLinkFlagDirectory
|
||||
}
|
||||
} else if flags&protocol.FlagDirectory != 0 {
|
||||
mode = SYMBOLIC_LINK_FLAG_DIRECTORY
|
||||
mode = Win32SymbolicLinkFlagDirectory
|
||||
}
|
||||
|
||||
r0, _, err := syscall.Syscall(procCreateSymbolicLink.Addr(), 3, uintptr(unsafe.Pointer(srcp)), uintptr(unsafe.Pointer(trgp)), uintptr(mode))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "June 14, 2015" "v0.11" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "June 28, 2015" "v0.11" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
@@ -30,21 +30,13 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
|
||||
..
|
||||
.sp
|
||||
\fBWARNING:\fP
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
This page may be outdated and requires review.
|
||||
Attributes have been added that are not documented.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SH SYNOPSIS
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
$HOME/.config/syncthing/config.xml
|
||||
$HOME/.config/syncthing
|
||||
$HOME/Library/Application Support/Syncthing
|
||||
%AppData%/Syncthing
|
||||
%localappdata%/Syncthing
|
||||
@@ -57,31 +49,27 @@ $HOME/Library/Application Support/Syncthing
|
||||
Syncthing uses a single directory to store configuration, crypto keys
|
||||
and index caches. The location defaults to \fB$HOME/.config/syncthing\fP
|
||||
(Unix\-like), \fB$HOME/Library/Application Support/Syncthing\fP (Mac),
|
||||
\fB%AppData%/Syncthing\fP (Windows XP) or \fB%localappdata%/Syncthing\fP
|
||||
(Windows 7/8). It can be changed at runtime using the \fB\-home\fP flag. In this
|
||||
\fB%AppData%/Syncthing\fP (Windows XP) or \fB%LocalAppData%/Syncthing\fP
|
||||
(Windows 7+). It can be changed at runtime using the \fB\-home\fP flag. In this
|
||||
directory the following files are located:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B cert.pem
|
||||
The device\(aqs RSA public key, named "cert" for legacy reasons.
|
||||
.TP
|
||||
.B key.pem
|
||||
The device\(aqs RSA private key. This needs to be protected.
|
||||
.TP
|
||||
.B config.xml
|
||||
.B \fBconfig.xml\fP
|
||||
The configuration file, in XML format.
|
||||
.TP
|
||||
.B https\-cert.pem
|
||||
The certificate for HTTPS GUI connections.
|
||||
.B \fBcert.pem\fP, \fBkey.pem\fP
|
||||
The device\(aqs RSA public and private key. These form the basis for the
|
||||
device ID. The key must be kept private.
|
||||
.TP
|
||||
.B https\-key.pem
|
||||
The key for HTTPS GUI connections.
|
||||
.B \fBhttps\-cert.pem\fP, \fBhttps\-key.pem\fP
|
||||
The certificate and key for HTTPS GUI connections. These may be replaced
|
||||
with a custom certificate for HTTPS as desired.
|
||||
.TP
|
||||
.B index/
|
||||
.B \fBindex\-\fI*\fP\&.db\fP
|
||||
A directory holding the database with metadata and hashes of the files
|
||||
currently on disk and available from peers.
|
||||
.TP
|
||||
.B csrftokens.txt
|
||||
.B \fBcsrftokens.txt\fP
|
||||
A list of recently issued CSRF tokens (for protection against browser cross
|
||||
site request forgery).
|
||||
.UNINDENT
|
||||
@@ -93,80 +81,170 @@ The following is shows the default configuration file:
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<configuration version="2">
|
||||
<folder id="default" directory="/Users/jb/Sync" ro="false" ignorePerms="false">
|
||||
<device id="GXN5ECCWTA2B7EB5FXYL5OWGOADX5EF5VNJAQSIBAY6XHJ24BNOA"></device>
|
||||
<configuration version="10">
|
||||
<folder id="default" path="/Users/jb/Sync" ro="false" rescanIntervalS="60" ignorePerms="false" autoNormalize="false">
|
||||
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU"></device>
|
||||
<versioning></versioning>
|
||||
<copiers>0</copiers>
|
||||
<pullers>0</pullers>
|
||||
<hashers>0</hashers>
|
||||
<order>random</order>
|
||||
</folder>
|
||||
<device id="GXN5ECCWTA2B7EB5FXYL5OWGOADX5EF5VNJAQSIBAY6XHJ24BNOA" name="jborg\-mbp">
|
||||
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="true">
|
||||
<address>127.0.0.1:54096</address>
|
||||
<user>jb</user>
|
||||
<password>$2a$10$EKaTIcpz2...</password>
|
||||
<apikey>O80CDOJ9LVUVCMHFK2OJDO4T882735</apikey>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8384</address>
|
||||
<apikey>l7jSbCqPD95JYZ0g8vi4ZLAMg3ulnN1b</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>:54097</listenAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<listenAddress>0.0.0.0:56847</listenAddress>
|
||||
<globalAnnounceServer>udp4://announce.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceServer>udp6://announce\-v6.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>true</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>60</rescanIntervalS>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<reconnectionIntervalS>60</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>true</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
<upnpLeaseMinutes>60</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>30</upnpRenewalMinutes>
|
||||
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
|
||||
<urAccepted>0</urAccepted>
|
||||
<urUniqueID></urUniqueID>
|
||||
<restartOnWakeup>true</restartOnWakeup>
|
||||
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
|
||||
<keepTemporariesH>24</keepTemporariesH>
|
||||
<cacheIgnoredFiles>true</cacheIgnoredFiles>
|
||||
<progressUpdateIntervalS>5</progressUpdateIntervalS>
|
||||
<symlinksEnabled>true</symlinksEnabled>
|
||||
<limitBandwidthInLan>false</limitBandwidthInLan>
|
||||
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
|
||||
</options>
|
||||
</configuration>
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS configuration
|
||||
.SH CONFIGURATION ELEMENT
|
||||
.sp
|
||||
This is the root element.
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B version
|
||||
The config version. The current version is \fB2\fP\&.
|
||||
The config version. Increments whenever a change is made that requires migration from previous formats.
|
||||
.UNINDENT
|
||||
.SS folder
|
||||
.SH FOLDER ELEMENT
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
One or more \fBfolder\fP elements must be present in the file. Each
|
||||
element describes one folder.
|
||||
.nf
|
||||
.ft C
|
||||
<folder id="default" path="/Users/jb/Sync" ro="false" rescanIntervalS="60" ignorePerms="false" autoNormalize="false">
|
||||
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU"></device>
|
||||
<versioning></versioning>
|
||||
<copiers>0</copiers>
|
||||
<pullers>0</pullers>
|
||||
<hashers>0</hashers>
|
||||
<order>random</order>
|
||||
</folder>
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
Within the \fBfolder\fP element one or more \fBdevice\fP element should be
|
||||
present. These must contain the \fBid\fP attribute and nothing else.
|
||||
Mentioned devices are those that will be sharing the folder in question.
|
||||
Each mentioned device must have a separate \fBdevice\fP element later in
|
||||
the file. It is customary that the local device ID is included in all
|
||||
repositories. Syncthing will currently add this automatically if it is
|
||||
not present in the configuration file.
|
||||
One or more \fBfolder\fP elements must be present in the file. Each element
|
||||
describes one folder. The following attributes may be set on the \fBfolder\fP
|
||||
element:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B id
|
||||
The folder ID, must be unique. (mandatory)
|
||||
.TP
|
||||
.B directory
|
||||
The directory where the folder is stored on this
|
||||
.B path
|
||||
The oath to the directory where the folder is stored on this
|
||||
device; not sent to other devices. (mandatory)
|
||||
.TP
|
||||
.B ro
|
||||
True if the folder is read only (will not be modified by Syncthing) on this
|
||||
device. (optional, defaults to \fBfalse\fP)
|
||||
True if the folder is read only (Master mode; will not be modified by
|
||||
Syncthing) on this device.
|
||||
.TP
|
||||
.B rescanIntervalS
|
||||
The rescan interval, in seconds.
|
||||
.TP
|
||||
.B ignorePerms
|
||||
True if the folder should \fI\%ignore permissions\fP <\fBhttp://forum.syncthing.net/t/263\fP>\&.
|
||||
True if the folder should ignore permissions.
|
||||
.TP
|
||||
.B autoNormalize
|
||||
Automatically correct UTF\-8 normalization errors found in file names.
|
||||
.UNINDENT
|
||||
.SS device
|
||||
.sp
|
||||
One or more \fBdevice\fP elements must be present in the file. Each
|
||||
element describes a device participating in the cluster. It is customary
|
||||
to include a \fBdevice\fP element for the local device; Syncthing will
|
||||
currently add one if it is not present.
|
||||
The following child elements may exist:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B device
|
||||
These must have the \fBid\fP attribute and nothing else. Mentioned devices
|
||||
are those that will be sharing the folder in question. Each mentioned
|
||||
device must have a separate \fBdevice\fP element later in the file. It is
|
||||
customary that the local device ID is included in all repositories.
|
||||
Syncthing will currently add this automatically if it is not present in
|
||||
the configuration file.
|
||||
.TP
|
||||
.B versioning
|
||||
Specifies a versioning configuration.
|
||||
.sp
|
||||
\fBNOTE:\fP
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
Needs explanation.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B copiers, pullers, hashers
|
||||
The number of copier, puller and hasher routines to use, or zero for the
|
||||
system determined optimum. These are low level performance options for
|
||||
advanced users only; do not change unless requested to or you\(aqve actually
|
||||
read and understood the code yourself. :)
|
||||
.TP
|
||||
.B order
|
||||
The order in which needed files should be pulled from the cluster. The possibles values are:
|
||||
.INDENT 7.0
|
||||
.TP
|
||||
.B random
|
||||
Pull files in random order. This optimizes for balancing resources among the devices in a cluster.
|
||||
.TP
|
||||
.B alphabetic
|
||||
Pull files ordered by file name alphabetically.
|
||||
.TP
|
||||
.B smallestFirst, largestFirst
|
||||
Pull files ordered by file size; smallest and largest first respectively.
|
||||
.TP
|
||||
.B oldestFirst, newestFirst
|
||||
Pull files ordered by modification time; oldest and newest first respectively.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SH DEVICE ELEMENT
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
One or more \fBdevice\fP elements must be present in the file. Each element
|
||||
describes a device participating in the cluster. It is customary to include a
|
||||
\fBdevice\fP element for the local device; Syncthing will currently add one if
|
||||
it is not present. The following attributes may be set on the \fBdevice\fP
|
||||
element:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B id
|
||||
@@ -176,31 +254,95 @@ spaces or dashes. (mandatory)
|
||||
.B name
|
||||
A friendly name for the device. (optional)
|
||||
.TP
|
||||
.B address
|
||||
The address section is only valid inside of \fBdevice\fP elements. It contains
|
||||
a single address, on one of the following forms:
|
||||
.B compression
|
||||
Whether to use protocol compression when sending messages to this device.
|
||||
The possible values are:
|
||||
.INDENT 7.0
|
||||
.IP \(bu 2
|
||||
IPv4 addresses, IPv6 addresses within brackets, or DNS names, all
|
||||
optionally followed by a port number.
|
||||
.IP \(bu 2
|
||||
\fBdynamic\fP: The address will be resolved using discovery.
|
||||
.TP
|
||||
.B metadata
|
||||
Compress metadata packets, such as index information. Metadata is
|
||||
usually very compression friendly so this is a good default.
|
||||
.TP
|
||||
.B always
|
||||
Compress all packets, including file data. This is recommended if the
|
||||
folders contents are mainly compressible data such as documents or
|
||||
text files.
|
||||
.TP
|
||||
.B never
|
||||
Disable all compression.
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B introducer
|
||||
Set to true if this device should be trusted as an introducer, i.e. we
|
||||
should copy their list of devices per folder when connecting.
|
||||
.UNINDENT
|
||||
.SS gui
|
||||
.sp
|
||||
There must be \fIexactly one\fP \fBgui\fP element.
|
||||
In addition, one or more \fBaddress\fP child elements must be present. Each
|
||||
contains an address to use when attempting to connect to this device and will
|
||||
be tried in order. Accepted formats are:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B IPv4 address (\fB192.0.2.42\fP)
|
||||
The default port (22000) is used.
|
||||
.TP
|
||||
.B IPv4 address and port (\fB192.0.2.42:12345\fP)
|
||||
The address and port is used as given.
|
||||
.TP
|
||||
.B IPv6 address (\fB2001:db8::23:42\fP)
|
||||
The default port (22000) is used.
|
||||
.TP
|
||||
.B IPv6 address and port (\fB[2001:db8::23:42]:12345\fP)
|
||||
The address and port is used as given. The address must be enclosed in angled brackets.
|
||||
.TP
|
||||
.B \fBdynamic\fP
|
||||
The word \fBdynamic\fP means to use local and global discovery to find the device.
|
||||
.UNINDENT
|
||||
.SH GUI ELEMENT
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8384</address>
|
||||
<apikey>l7jSbCqPD95JYZ0g8vi4ZLAMg3ulnN1b</apikey>
|
||||
</gui>
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
There must be exactly one \fBgui\fP element. The GUI configuration is also used
|
||||
by the rest\-api and the event\-api\&. The following attributes may
|
||||
be set on the \fBgui\fP element:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B enabled
|
||||
\fBtrue\fP/\fBfalse\fP
|
||||
If not \fBtrue\fP, the GUI and API will not be started.
|
||||
.TP
|
||||
.B tls
|
||||
\fBtrue\fP/\fBfalse\fP: If true then the GUI will use HTTPS.
|
||||
If set to \fBtrue\fP, TLS (HTTPS) will be enforced. Non\-HTTPS requests will
|
||||
be redirected to HTTPS. When this is set to \fBfalse\fP, TLS connections are
|
||||
still possible but it is not mandatory.
|
||||
.UNINDENT
|
||||
.sp
|
||||
The following child elements may be present:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B address
|
||||
One or more address elements must be present, containing an \fBip:port\fP
|
||||
listen address.
|
||||
Set the listen addresses. One or more address elements must be present.
|
||||
Allowed address formats are:
|
||||
.INDENT 7.0
|
||||
.TP
|
||||
.B IPv4 address and port (\fB127.0.0.1:8384\fP)
|
||||
The address and port is used as given.
|
||||
.TP
|
||||
.B IPv6 address and port (\fB[::1]:8384\fP)
|
||||
The address and port is used as given. The address must be enclosed in angled brackets.
|
||||
.TP
|
||||
.B Wildcard and port (\fB0.0.0.0:12345\fP, \fB[::]:12345\fP, \fB:12345\fP)
|
||||
These are equivalent and will result in Syncthing listening on all interfaces and both IPv4 and IPv6.
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B username
|
||||
Set to require authentication.
|
||||
@@ -211,56 +353,141 @@ Contains the bcrypt hash of the real password.
|
||||
.B apikey
|
||||
If set, this is the API key that enables usage of the REST interface.
|
||||
.UNINDENT
|
||||
.SH OPTIONS ELEMENT
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
Additionally, there must be \fIexactly one\fP \fBoptions\fP element. It contains the
|
||||
following configuration settings as children:
|
||||
.nf
|
||||
.ft C
|
||||
<options>
|
||||
<listenAddress>0.0.0.0:56847</listenAddress>
|
||||
<globalAnnounceServer>udp4://announce.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceServer>udp6://announce\-v6.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>true</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<reconnectionIntervalS>60</reconnectionIntervalS>
|
||||
<startBrowser>true</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
<upnpLeaseMinutes>60</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>30</upnpRenewalMinutes>
|
||||
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
|
||||
<urAccepted>0</urAccepted>
|
||||
<urUniqueID></urUniqueID>
|
||||
<restartOnWakeup>true</restartOnWakeup>
|
||||
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
|
||||
<keepTemporariesH>24</keepTemporariesH>
|
||||
<cacheIgnoredFiles>true</cacheIgnoredFiles>
|
||||
<progressUpdateIntervalS>5</progressUpdateIntervalS>
|
||||
<symlinksEnabled>true</symlinksEnabled>
|
||||
<limitBandwidthInLan>false</limitBandwidthInLan>
|
||||
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
|
||||
</options>
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
The \fBoptions\fP element contains all other global configuration options.
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B listenAddress
|
||||
\fBhost:port\fP or \fB:port\fP string denoting an address to listen for BEP
|
||||
connections. More than one \fBlistenAddress\fP may be given.
|
||||
(default: \fB0.0.0.0:22000\fP)
|
||||
The listen address for incoming sync connections. See the \fBaddress\fP
|
||||
element under the \fI\%GUI Element\fP for allowed syntax.
|
||||
.TP
|
||||
.B globalAnnounceServer
|
||||
\fBhost:port\fP string denoting where a global announce server may be
|
||||
reached. (default: \fBannounce.syncthing.net:22025\fP)
|
||||
A URI to a global announce (discvoery) server. Allowed protocol prefixes
|
||||
are \fBudp4://\fP (UDP over IPv4), \fBudp6://\fP (UDP over IPv6) and
|
||||
\fBudp://\fP (UDP over any available protocol).
|
||||
.TP
|
||||
.B globalAnnounceEnabled
|
||||
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
|
||||
Whether to announce this device to the global announce (discovery) server,
|
||||
and also use it to look up other device.
|
||||
.TP
|
||||
.B localAnnounceEnabled
|
||||
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
|
||||
Whether to send announcements to the local LAN, also use such
|
||||
announcements to find other devices.
|
||||
.TP
|
||||
.B parallelRequests
|
||||
The maximum number of outstanding block requests to have against any given
|
||||
peer. (default: \fB16\fP)
|
||||
.B localAnnouncePort
|
||||
The port on which to listen and send IPv4 broadcast announcements to.
|
||||
.TP
|
||||
.B localAnnounceMCAddr
|
||||
The group address and port to join and send IPv6 multicast announcements on.
|
||||
.TP
|
||||
.B maxSendKbps
|
||||
Rate limit
|
||||
Outgoing data rate limit, in kibibits per second.
|
||||
.TP
|
||||
.B rescanIntervalS
|
||||
The number of seconds to wait between each scan for modification of the
|
||||
local repositories. A value of \fB0\fP disables the scanner. (default: \fB60\fP)
|
||||
.B maxRecvKbps
|
||||
Incoming data rate limits, in kibibits per second.
|
||||
.TP
|
||||
.B reconnectionIntervalS
|
||||
The number of seconds to wait between each attempt to connect to currently
|
||||
unconnected devices. (default: \fB60\fP)
|
||||
.TP
|
||||
.B maxChangeKbps
|
||||
The maximum rate of change allowed for a single file. When this rate is
|
||||
exceeded, further changes to the file are not announced, until the rate is
|
||||
reduced below the limit. (default: \fB10000\fP)
|
||||
unconnected devices.
|
||||
.TP
|
||||
.B startBrowser
|
||||
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
|
||||
Whether to attempt to start a browser to show the GUI when Syncthing starts.
|
||||
.TP
|
||||
.B upnpEnabled
|
||||
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
|
||||
Whether to attempt to perform an UPnP port mapping for incoming sync connections.
|
||||
.TP
|
||||
.B upnpLeaseMinutes
|
||||
Request a lease for this many minutes; zero to request a permanent lease.
|
||||
.TP
|
||||
.B upnpRenewalMinutes
|
||||
Attempt to renew the lease after this many minutes.
|
||||
.TP
|
||||
.B upnpTimeoutSeconds
|
||||
When scanning for UPnP devices, wait this long for responses.
|
||||
.TP
|
||||
.B urAccepted
|
||||
Whether the user as accepted to submit anonymous usage data. The default,
|
||||
\fB0\fP, mean the user has not made a choice, and Syncthing will ask at some
|
||||
point in the future. \fB\-1\fP means no, \fB1\fP means yes.
|
||||
point in the future. \fB\-1\fP means no, a number above zero means that that
|
||||
version of usage reporting has been accepted.
|
||||
.TP
|
||||
.B urUniqueID
|
||||
The unique ID sent together with the usage report. Generated when usage
|
||||
reporting is enabled.
|
||||
.TP
|
||||
.B restartOnWakeup
|
||||
Whether to perform a restart of Syncthing when it is detected that we are
|
||||
waking from sleep mode (i.e. a folded up laptop).
|
||||
.TP
|
||||
.B autoUpgradeIntervalH
|
||||
Check for a newer version after this many hours. Set to zero to disable
|
||||
automatic upgrades.
|
||||
.TP
|
||||
.B keepTemporariesH
|
||||
Keep temporary failed transfers for this many hours. While the temporaries
|
||||
are kept, the data they contain need not be transferred again.
|
||||
.TP
|
||||
.B cacheIgnoredFiles
|
||||
Whether to cache the results of ignore pattern evaluation. Performance at
|
||||
the price of memory.
|
||||
.TP
|
||||
.B progressUpdateIntervalS
|
||||
.
|
||||
\fBNOTE:\fP
|
||||
.INDENT 7.0
|
||||
.INDENT 3.5
|
||||
Requires explanation.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.TP
|
||||
.B symlinksEnabled
|
||||
Whether to sync symlinks, if supported by the system.
|
||||
.TP
|
||||
.B limitBandwidthInLan
|
||||
Whether to apply bandwidth limits to devices in the same broadcast domain
|
||||
as the local device.
|
||||
.TP
|
||||
.B databaseBlockCacheMiB
|
||||
Override the automatically calculated database block cache size. Don\(aqt,
|
||||
unless you\(aqre very short on memory, in which case you want to set this to
|
||||
\fB8\fP\&.
|
||||
.UNINDENT
|
||||
.SH AUTHOR
|
||||
The Syncthing Authors
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "June 14, 2015" "v0.11" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "June 28, 2015" "v0.11" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "June 14, 2015" "v0.11" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "June 28, 2015" "v0.11" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
@@ -444,7 +444,10 @@ An unsuccessful operation:
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
The \fBaction\fP field is either \fBupdate\fP or \fBdelete\fP\&.
|
||||
The \fBaction\fP field is either \fBupdate\fP (contents changed), \fBmetadata\fP (file metadata changed but not contents), or \fBdelete\fP\&.
|
||||
.sp
|
||||
New in version 0.11.10: The \fBmetadata\fP action.
|
||||
|
||||
.SS ItemStarted
|
||||
.sp
|
||||
Generated when Syncthing begins synchronizing a file to a newer version.
|
||||
@@ -469,7 +472,10 @@ Generated when Syncthing begins synchronizing a file to a newer version.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
The \fBaction\fP field is either \fBupdate\fP or \fBdelete\fP\&.
|
||||
The \fBaction\fP field is either \fBupdate\fP (contents changed), \fBmetadata\fP (file metadata changed but not contents), or \fBdelete\fP\&.
|
||||
.sp
|
||||
New in version 0.11.10: The \fBmetadata\fP action.
|
||||
|
||||
.SS LocalIndexUpdated
|
||||
.sp
|
||||
Generated when the local index information has changed, due to
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "June 14, 2015" "v0.11" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "June 28, 2015" "v0.11" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user