Compare commits

...

46 Commits

Author SHA1 Message Date
Jakob Borg
2463819a3d Update translations and docs 2015-06-21 11:45:54 +02:00
Zillode
0f1b40da71 Merge pull request #1982 from calmh/fix-1978
Sanitize rescan interval values (fixes #1978)
2015-06-20 23:28:18 +02:00
Jakob Borg
4eb0e24c6e Sanitize rescan interval values (fixes #1978) 2015-06-20 23:01:30 +02:00
Jakob Borg
1d2235abe7 Model must be running for tests 2015-06-20 23:00:33 +02:00
Jakob Borg
d347e54acb Don't start model until services have been added (fixes #1969) 2015-06-20 20:04:47 +02:00
Jakob Borg
b5198d8119 Merge pull request #1968 from calmh/newtests
Refactored integration tests
2015-06-20 19:24:04 +02:00
Jakob Borg
b8b5c5ff34 Merge pull request #1913 from Zillode/fix-reset
Fix 'reset' Rest API on windows
2015-06-20 11:43:05 +02:00
Jakob Borg
a738490a3b Update translation strings 2015-06-20 11:40:05 +02:00
Jakob Borg
54a8de2059 Merge remote-tracking branch 'syncthing/pr/1979'
* syncthing/pr/1979:
  Display Local State Summary (All Folders)
2015-06-20 11:39:31 +02:00
Jakob Borg
cb2c0e7ac5 Add fti7 2015-06-20 11:32:55 +02:00
Frank Isemann
510d309b8a Display Local State Summary (All Folders) 2015-06-19 21:52:19 +02:00
Jakob Borg
a7ce2a7aa5 Merge pull request #1963 from wsgcsysadmin/master
Put thisDeviceName first in page tile to make small browser tabs distinguishable
2015-06-19 08:51:24 +02:00
Jakob Borg
cfe24ecdd9 Add wsgcsysadmin 2015-06-19 08:50:36 +02:00
Jakob Borg
c5e9cb025c Merge pull request #1977 from Zillode/fix-1976
Corrected API response when resetting folder (fixes #1976)
2015-06-19 08:48:40 +02:00
Jakob Borg
c3d07d60ca Refactored integration tests
Added internal/rc to remote control a Syncthing process and made the
"awaiting sync" determination reliable.
2015-06-19 08:47:47 +02:00
Lode Hoste
a0897a7456 Corrected API response when resetting folder (fixes #1976) 2015-06-19 08:30:19 +02:00
WSGCSysadmin
c50ba9267c Put thisDeviceName first in page title 2015-06-18 10:53:37 -05:00
Jakob Borg
423e69916c Merge pull request #1962 from Zillode/fix-pull-order-gui
Set default pull order in webGUI
2015-06-18 16:51:10 +02:00
Lode Hoste
b56c76f8ad Fix 'reset' Rest API on windows 2015-06-18 12:45:08 +02:00
Lode Hoste
cb2d2f000f Set default pull order in webGUI 2015-06-18 12:42:00 +02:00
Audrius Butkevicius
69af77a3bd Merge pull request #1967 from calmh/woops
Incorrect error condition on shortcuts
2015-06-18 11:07:07 +01:00
Jakob Borg
7767746d3e Incorrect error condition on shortcuts 2015-06-18 11:55:43 +02:00
Audrius Butkevicius
7219aaeb89 Merge pull request #1966 from calmh/overrideevents
Generate LocalIndexUpdated events in Override
2015-06-18 10:17:38 +01:00
Jakob Borg
7af1863e81 Generate LocalIndexUpdated events in Override
The override should look like we detected the changes locally, or the
GUI and other things won't update correctly.

This is/was caught by the Override integration test, on my newly
refactored integration test suite which is soon ready for prime time, so
a test is coming. :)
2015-06-18 10:49:24 +02:00
Jakob Borg
4beb42bf45 Merge pull request #1959 from AudriusButkevicius/lastfile
Add label next to "Last file received" (fixes #1952)
2015-06-18 10:45:27 +02:00
Audrius Butkevicius
12a3086a9e Add label next to "Last file received" (fixes #1952) 2015-06-16 12:12:34 +01:00
Audrius Butkevicius
198725216f Merge pull request #1957 from calmh/myid
Include myID in the StartupComplete event
2015-06-16 08:46:50 +01:00
Audrius Butkevicius
08647f1267 Merge pull request #1956 from calmh/earlyevents
Fix API event subscription
2015-06-16 08:46:29 +01:00
Audrius Butkevicius
87811efc30 Merge pull request #1955 from calmh/localver
Add version to LocalIndexUpdate event.
2015-06-16 08:45:09 +01:00
Jakob Borg
82c3e6f87f Include myID in the StartupComplete event
Nice to have...
2015-06-16 09:27:06 +02:00
Jakob Borg
1ac40a3043 Fix API event subscription
The API never got the first few events ("Starting" etc) as it subscribed
too late. Instead, set up a subscription for it early on. If the API is
configured not to run this is unnecessary but doesn't hurt very much.
2015-06-16 09:17:58 +02:00
Jakob Borg
127b0c3332 Add version to LocalIndexUpdate event.
Allows correlating LocalIndexUpdate events on one device with RemoteIndexUpdated on another to make sure the cluster has converged.
2015-06-16 08:30:15 +02:00
Jakob Borg
a6d9150b14 Skip extra newline between assets 2015-06-15 23:13:43 +02:00
Jakob Borg
7e5197c566 Help link for folder master 2015-06-15 22:34:39 +02:00
Jakob Borg
2d217e72bd Dependency update 2015-06-15 21:10:33 +02:00
Jakob Borg
12331cc62b Merge pull request #1943 from ralder/webgui-events-in-service
webgui: moved events controller to events service
2015-06-15 20:33:27 +02:00
Sergey Mishin
2449f1e1b6 webgui: moved events controller to events service 2015-06-15 19:05:46 +03:00
Audrius Butkevicius
6a6593c656 Merge pull request #1948 from calmh/symwarning
Dont warn about irrelevant symlinks, print path to culprit (ref #1945)
2015-06-15 10:41:51 +01:00
Jakob Borg
ad220d61f9 Merge pull request #1951 from AudriusButkevicius/voodoo
Voodoo
2015-06-15 11:31:03 +02:00
Audrius Butkevicius
1e35383b4d Merge pull request #1947 from calmh/metadata
Differentiate between content and metadata updates in ItemStarted/ItemFinished
2015-06-15 10:26:09 +01:00
Audrius Butkevicius
c8457ab005 Voodoo 2015-06-15 10:22:44 +01:00
Jakob Borg
070a308217 Dont warn about irrelevant symlinks, print path to culprit (ref #1945)
This skips the warning about "unsupported symlinks" for invalid or
deleted symlinks (and as a side effect also accepts them into the index,
which should be fine). It also prints the affected file paths to the
log. This should be in the hypothetical list of "errored files" we
should present instead of the cryptic "puller stopped" message in the
future...
2015-06-15 00:47:13 +02:00
Jakob Borg
c1f4477376 ItemStarted can be map[string]string 2015-06-14 22:59:21 +02:00
Jakob Borg
d728320ece New ItemStart/Finished type 'metadata' for shortcut updates 2015-06-14 22:56:41 +02:00
Audrius Butkevicius
fee0d7168a Merge pull request #1946 from calmh/guiassets
Default GUI override dir
2015-06-14 21:40:56 +01:00
Jakob Borg
7c23b32de3 Default GUI override dir
If STGUIASSETS is not set, look for assets in $confdir/gui by default.
Simplifies deploying overrides and stuff.
2015-06-14 22:28:40 +02:00
112 changed files with 2811 additions and 2189 deletions

View File

@@ -22,10 +22,12 @@ Dennis Wilson <dw@risu.io>
Dominik Heidler <dominik@heidler.eu>
Elias Jarlebring <jarlebring@gmail.com>
Emil Hessman <emil@hessman.se>
Erik Meitner <e.meitner@willystreet.coop>
Federico Castagnini <federico.castagnini@gmail.com>
Felix Ableitner <me@nutomic.com>
Felix Unterpaintner <bigbear2nd@gmail.com>
Francois-Xavier Gsell <fxgsell@gmail.com>
Frank Isemann <frank@isemann.name>
Gilli Sigurdsson <gilli@vx.is>
Jakob Borg <jakob@nym.se>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>

24
Godeps/Godeps.json generated
View File

@@ -7,7 +7,7 @@
"Deps": [
{
"ImportPath": "github.com/bkaradzic/go-lz4",
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
"Rev": "4f7c2045dbd17b802370e2e6022200468abf02ba"
},
{
"ImportPath": "github.com/calmh/logger",
@@ -21,13 +21,17 @@
"ImportPath": "github.com/calmh/xdr",
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
},
{
"ImportPath": "github.com/google/go-snappy/snappy",
"Rev": "eaa750b9bf4dcb7cb20454be850613b66cda3273"
},
{
"ImportPath": "github.com/juju/ratelimit",
"Rev": "c5abe513796336ee2869745bff0638508450e9c5"
"Rev": "faa59ce93750e747b2997635e8b7daf30024b1ac"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
},
{
"ImportPath": "github.com/syncthing/protocol",
@@ -35,11 +39,7 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "87e4e645d80ae9c537e8f2dee52b28036a5dd75e"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
"Rev": "a06509502ca32565bdf74afc1e573050023f261c"
},
{
"ImportPath": "github.com/thejerf/suture",
@@ -59,19 +59,19 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
"Rev": "1e856cbfdf9bc25eefca75f83f25d55e35ae72e0"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
"Rev": "1e856cbfdf9bc25eefca75f83f25d55e35ae72e0"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
"Rev": "df923bbb63f8ea3a26bb743e2a497abd0ab585f7"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
"Rev": "df923bbb63f8ea3a26bb743e2a497abd0ab585f7"
}
]
}

View File

@@ -0,0 +1,23 @@
// +build gofuzz
package lz4
import "encoding/binary"
func Fuzz(data []byte) int {
if len(data) < 4 {
return 0
}
ln := binary.LittleEndian.Uint32(data)
if ln > (1 << 21) {
return 0
}
if _, err := Decode(nil, data); err != nil {
return 0
}
return 1
}

View File

@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
length += ln
}
if int(d.spos+length) > len(d.src) {
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
return nil, ErrCorrupt
}
@@ -179,7 +179,12 @@ func Decode(dst, src []byte) ([]byte, error) {
}
literal := d.dpos - d.ref
if literal < 4 {
if int(d.dpos+4) > len(d.dst) {
return nil, ErrCorrupt
}
d.cp(4, decr[literal])
} else {
length += 4

View File

@@ -25,8 +25,10 @@
package lz4
import "encoding/binary"
import "errors"
import (
"encoding/binary"
"errors"
)
const (
minMatch = 4

View File

@@ -1,5 +1,8 @@
This package contains an efficient token-bucket-based rate limiter.
Copyright (C) 2015 Canonical Ltd.
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This software is licensed under the LGPLv3, included below.

View File

@@ -7,6 +7,7 @@
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
@@ -55,7 +56,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
continue
}
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
@@ -217,10 +218,3 @@ func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
tb.availTick = currentTick
return
}
func abs(f float64) float64 {
if f < 0 {
return -f
}
return f
}

View File

@@ -4,7 +4,9 @@
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file.
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
Multi-platform and supports:
* Linux

View File

@@ -16,12 +16,12 @@ func Executable() (string, error) {
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name.
// path. Excludes the executable name and any trailing slash.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
folder, _ := filepath.Split(p)
return folder, nil
return filepath.Dir(p), nil
}

View File

@@ -17,12 +17,14 @@ import (
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedSuffix = " (deleted)"
const deletedTag = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
return strings.TrimSuffix(execpath, deletedSuffix), nil
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "openbsd", "dragonfly":

View File

@@ -24,6 +24,29 @@ const (
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {

View File

@@ -63,13 +63,14 @@ type DB struct {
journalAckC chan error
// Compaction.
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compStats []cStats
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compWriteLocking bool
compStats []cStats
// Close.
closeW sync.WaitGroup
@@ -108,28 +109,44 @@ func openDB(s *session) (*DB, error) {
closeC: make(chan struct{}),
}
if err := db.recoverJournal(); err != nil {
return nil, err
}
// Read-only mode.
readOnly := s.o.GetReadOnly()
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
if readOnly {
// Recover journals (read-only mode).
if err := db.recoverJournalRO(); err != nil {
return nil, err
}
return nil, err
} else {
// Recover journals.
if err := db.recoverJournal(); err != nil {
return nil, err
}
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
return nil, err
}
}
// Doesn't need to be included in the wait group.
go db.compactionError()
go db.mpoolDrain()
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
if readOnly {
db.SetReadOnly()
} else {
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
}
s.logf("db@open done T·%v", time.Since(start))
@@ -275,7 +292,7 @@ func recoverTable(s *session, o *opt.Options) error {
// We will drop corrupted table.
strict = o.GetStrict(opt.StrictRecovery)
rec = &sessionRecord{numLevel: o.GetNumLevel()}
rec = &sessionRecord{}
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
@@ -450,132 +467,136 @@ func recoverTable(s *session, o *opt.Options) error {
}
func (db *DB) recoverJournal() error {
// Get all tables and sort it by file number.
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
// Get all journals and sort it by file number.
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
journalFiles := files(journalFiles_)
journalFiles.sort()
files(allJournalFiles).sort()
// Discard older journal.
prev := -1
for i, file := range journalFiles {
if file.Num() >= db.s.stJournalNum {
if prev >= 0 {
i--
journalFiles[i] = journalFiles[prev]
}
journalFiles = journalFiles[i:]
break
} else if file.Num() == db.s.stPrevJournalNum {
prev = i
// Journals that will be recovered.
var recJournalFiles []storage.File
for _, jf := range allJournalFiles {
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
recJournalFiles = append(recJournalFiles, jf)
}
}
var jr *journal.Reader
var of storage.File
var mem *memdb.DB
batch := new(Batch)
cm := newCMem(db.s)
buf := new(util.Buffer)
// Options.
strict := db.s.o.GetStrict(opt.StrictJournal)
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer := db.s.o.GetWriteBuffer()
recoverJournal := func(file storage.File) error {
db.logf("journal@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
var (
of storage.File // Obsolete file.
rec = &sessionRecord{}
)
// Create/reset journal reader instance.
if jr == nil {
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
} else {
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
}
// Flush memdb and remove obsolete journal file.
if of != nil {
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
if err := cm.commit(file.Num(), db.seq); err != nil {
return err
}
cm.reset()
of.Remove()
of = nil
}
// Replay journal to memdb.
mem.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
return errors.SetFile(err, file)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
} else {
return errors.SetFile(err, file)
}
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
if strict || !errors.IsCorrupted(err) {
return errors.SetFile(err, file)
} else {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mem.Size() >= writeBuffer {
if err := cm.flush(mem, 0); err != nil {
return err
}
mem.Reset()
}
}
of = file
return nil
}
// Recover all journals.
if len(journalFiles) > 0 {
db.logf("journal@recovery F·%d", len(journalFiles))
// Recover journals.
if len(recJournalFiles) > 0 {
db.logf("journal@recovery F·%d", len(recJournalFiles))
// Mark file number as used.
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
mem = memdb.New(db.s.icmp, writeBuffer)
for _, file := range journalFiles {
if err := recoverJournal(file); err != nil {
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batch = &Batch{}
)
for _, jf := range recJournalFiles {
db.logf("journal@recovery recovering @%d", jf.Num())
fr, err := jf.Open()
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
}
// Flush memdb and remove obsolete journal file.
if of != nil {
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
fr.Close()
return err
}
}
rec.setJournalNum(jf.Num())
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
fr.Close()
return err
}
rec.resetAddedTables()
of.Remove()
of = nil
}
// Replay journal to memdb.
mdb.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFile(err, jf)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mdb.Size() >= writeBuffer {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
mdb.Reset()
}
}
fr.Close()
of = jf
}
// Flush the last journal.
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
// Flush the last memdb.
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
return err
}
}
@@ -587,8 +608,10 @@ func (db *DB) recoverJournal() error {
}
// Commit.
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
// Close journal.
rec.setJournalNum(db.journalFile.Num())
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
// Close journal on error.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -604,6 +627,103 @@ func (db *DB) recoverJournal() error {
return nil
}
func (db *DB) recoverJournalRO() error {
// Get all journals and sort it by file number.
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
files(allJournalFiles).sort()
// Journals that will be recovered.
var recJournalFiles []storage.File
for _, jf := range allJournalFiles {
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
recJournalFiles = append(recJournalFiles, jf)
}
}
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
mdb = memdb.New(db.s.icmp, writeBuffer)
)
// Recover journals.
if len(recJournalFiles) > 0 {
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
var (
jr *journal.Reader
buf = &util.Buffer{}
batch = &Batch{}
)
for _, jf := range recJournalFiles {
db.logf("journal@recovery recovering @%d", jf.Num())
fr, err := jf.Open()
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
}
// Replay journal to memdb.
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFile(err, jf)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
}
fr.Close()
}
}
// Set memDB.
db.mem = &memDB{db: db, DB: mdb, ref: 1}
return nil
}
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := newIkey(key, seq, ktSeek)
@@ -614,7 +734,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
}
defer m.decref()
mk, mv, me := m.mdb.Find(ikey)
mk, mv, me := m.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
@@ -652,7 +772,7 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
}
defer m.decref()
mk, _, me := m.mdb.Find(ikey)
mk, _, me := m.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
@@ -784,7 +904,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
const prefix = "leveldb."
if !strings.HasPrefix(name, prefix) {
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
return "", ErrNotFound
}
p := name[len(prefix):]
@@ -798,7 +918,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
var rest string
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
err = errors.New("leveldb: GetProperty: invalid property: " + name)
err = ErrNotFound
} else {
value = fmt.Sprint(v.tLen(int(level)))
}
@@ -837,7 +957,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = errors.New("leveldb: GetProperty: unknown property: " + name)
err = ErrNotFound
}
return
@@ -900,6 +1020,9 @@ func (db *DB) Close() error {
var err error
select {
case err = <-db.compErrC:
if err == ErrReadOnly {
err = nil
}
default:
}

View File

@@ -11,7 +11,6 @@ import (
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
@@ -62,58 +61,8 @@ func (p *cStatsStaging) stopTimer() {
}
}
type cMem struct {
s *session
level int
rec *sessionRecord
}
func newCMem(s *session) *cMem {
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
}
func (c *cMem) flush(mem *memdb.DB, level int) error {
s := c.s
// Write memdb to table.
iter := mem.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return err
}
// Pick level.
if level < 0 {
v := s.version()
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
v.release()
}
c.rec.addTableFile(level, t)
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
c.level = level
return nil
}
func (c *cMem) reset() {
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
}
func (c *cMem) commit(journal, seq uint64) error {
c.rec.setJournalNum(journal)
c.rec.setSeqNum(seq)
// Commit changes.
return c.s.commit(c.rec)
}
func (db *DB) compactionError() {
var (
err error
wlocked bool
)
var err error
noerr:
// No error.
for {
@@ -121,7 +70,7 @@ noerr:
case err = <-db.compErrSetC:
switch {
case err == nil:
case errors.IsCorrupted(err):
case err == ErrReadOnly, errors.IsCorrupted(err):
goto hasperr
default:
goto haserr
@@ -139,7 +88,7 @@ haserr:
switch {
case err == nil:
goto noerr
case errors.IsCorrupted(err):
case err == ErrReadOnly, errors.IsCorrupted(err):
goto hasperr
default:
}
@@ -155,9 +104,9 @@ hasperr:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
wlocked = true
db.compWriteLocking = true
case _, _ = <-db.closeC:
if wlocked {
if db.compWriteLocking {
// We should release the lock or Close will hang.
<-db.writeLockC
}
@@ -287,21 +236,18 @@ func (db *DB) compactionExitTransact() {
}
func (db *DB) memCompaction() {
mem := db.getFrozenMem()
if mem == nil {
mdb := db.getFrozenMem()
if mdb == nil {
return
}
defer mem.decref()
defer mdb.decref()
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
// Don't compact empty memdb.
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
db.dropFrozenMem()
return
}
@@ -317,13 +263,20 @@ func (db *DB) memCompaction() {
return
}
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
var (
rec = &sessionRecord{}
stats = &cStatsStaging{}
flushLevel int
)
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.flush(mem.mdb, -1)
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
stats.stopTimer()
return
}, func() error {
for _, r := range c.rec.addedTables {
db.logf("mem@flush revert @%d", r.num)
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
f := db.s.getTableFile(r.num)
if err := f.Remove(); err != nil {
return err
@@ -332,20 +285,23 @@ func (db *DB) memCompaction() {
return nil
})
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.commit(db.journalFile.Num(), db.frozenSeq)
rec.setJournalNum(db.journalFile.Num())
rec.setSeqNum(db.frozenSeq)
err = db.s.commit(rec)
stats.stopTimer()
return
}, nil)
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
for _, r := range c.rec.addedTables {
for _, r := range rec.addedTables {
stats.write += r.size
}
db.compStats[c.level].add(stats)
db.compStats[flushLevel].add(stats)
// Drop frozen mem.
// Drop frozen memdb.
db.dropFrozenMem()
// Resume table compaction.
@@ -557,7 +513,7 @@ func (b *tableCompactionBuilder) revert() error {
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
rec := &sessionRecord{}
rec.addCompPtr(c.level, c.imax)
if !noTrivial && c.trivial() {

View File

@@ -8,6 +8,7 @@ package leveldb
import (
"errors"
"math/rand"
"runtime"
"sync"
"sync/atomic"
@@ -39,11 +40,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
emi := em.mdb.NewIterator(slice)
emi := em.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
i = append(i, emi)
if fm != nil {
fmi := fm.mdb.NewIterator(slice)
fmi := fm.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
i = append(i, fmi)
}
@@ -80,6 +81,10 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
return iter
}
func (db *DB) iterSamplingRate() int {
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
}
type dir int
const (
@@ -98,11 +103,21 @@ type dbIter struct {
seq uint64
strict bool
dir dir
key []byte
value []byte
err error
releaser util.Releaser
smaplingGap int
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) sampleSeek() {
ikey := i.iter.Key()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
}
func (i *dbIter) setErr(err error) {
@@ -175,6 +190,7 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
switch kt {
case ktDel:
@@ -225,6 +241,7 @@ func (i *dbIter) prev() bool {
if i.iter.Valid() {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
@@ -266,6 +283,7 @@ func (i *dbIter) Prev() bool {
case dirForward:
for i.iter.Prev() {
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}

View File

@@ -15,8 +15,8 @@ import (
)
type memDB struct {
db *DB
mdb *memdb.DB
db *DB
*memdb.DB
ref int32
}
@@ -27,12 +27,12 @@ func (m *memDB) incref() {
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
// Only put back memdb with std capacity.
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
m.mdb.Reset()
m.db.mpoolPut(m.mdb)
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
m.Reset()
m.db.mpoolPut(m.DB)
}
m.db = nil
m.mdb = nil
m.DB = nil
} else if ref < 0 {
panic("negative memdb ref")
}
@@ -48,6 +48,15 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) sampleSeek(ikey iKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
db.compSendTrigger(db.tcompCmdC)
}
v.release()
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()
@@ -117,7 +126,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
}
mem = &memDB{
db: db,
mdb: mdb,
DB: mdb,
ref: 2,
}
db.mem = mem

View File

@@ -405,19 +405,21 @@ func (h *dbHarness) compactRange(min, max string) {
t.Log("DB range compaction done")
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
t := h.t
db := h.db
s, err := db.SizeOf([]util.Range{
func (h *dbHarness) sizeOf(start, limit string) uint64 {
sz, err := h.db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)},
})
if err != nil {
t.Error("SizeOf: got error: ", err)
h.t.Error("SizeOf: got error: ", err)
}
if s.Sum() < low || s.Sum() > hi {
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, s.Sum())
return sz.Sum()
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
sz := h.sizeOf(start, limit)
if sz < low || sz > hi {
h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, sz)
}
}
@@ -2443,7 +2445,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
if err != nil {
t.Fatal(err)
}
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
rec := &sessionRecord{}
rec.addTableFile(i, tf)
if err := s.commit(rec); err != nil {
t.Fatal(err)
@@ -2453,7 +2455,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Build grandparent.
v := s.version()
c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
rec := &sessionRecord{}
b := &tableCompactionBuilder{
s: s,
c: c,
@@ -2477,7 +2479,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Build level-1.
v = s.version()
c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
rec = &sessionRecord{}
b = &tableCompactionBuilder{
s: s,
c: c,
@@ -2521,7 +2523,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Compaction with transient error.
v = s.version()
c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
rec = &sessionRecord{}
b = &tableCompactionBuilder{
s: s,
c: c,
@@ -2577,3 +2579,123 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
}
v.release()
}
func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
const (
vSize = 200 * opt.KiB
tSize = 100 * opt.MiB
mIter = 100
n = tSize / vSize
)
h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression,
DisableBlockCache: true,
})
defer h.close()
key := func(x int) string {
return fmt.Sprintf("v%06d", x)
}
// Fill.
value := strings.Repeat("x", vSize)
for i := 0; i < n; i++ {
h.put(key(i), value)
}
h.compactMem()
// Delete all.
for i := 0; i < n; i++ {
h.delete(key(i))
}
h.compactMem()
var (
limit = n / limitDiv
startKey = key(0)
limitKey = key(limit)
maxKey = key(n)
slice = &util.Range{Limit: []byte(limitKey)}
initialSize0 = h.sizeOf(startKey, limitKey)
initialSize1 = h.sizeOf(limitKey, maxKey)
)
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
for r := 0; true; r++ {
if r >= mIter {
t.Fatal("taking too long to compact")
}
// Iterates.
iter := h.db.NewIterator(slice, h.ro)
for iter.Next() {
}
if err := iter.Error(); err != nil {
t.Fatalf("Iter err: %v", err)
}
iter.Release()
// Wait compaction.
h.waitCompaction()
// Check size.
size0 := h.sizeOf(startKey, limitKey)
size1 := h.sizeOf(limitKey, maxKey)
t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
if size0 < initialSize0/10 {
break
}
}
if initialSize1 > 0 {
h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
}
}
func TestDB_IterTriggeredCompaction(t *testing.T) {
testDB_IterTriggeredCompaction(t, 1)
}
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
testDB_IterTriggeredCompaction(t, 2)
}
func TestDB_ReadOnly(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("bar", "v2")
h.compactMem()
h.put("xfoo", "v1")
h.put("xbar", "v2")
t.Log("Trigger read-only")
if err := h.db.SetReadOnly(); err != nil {
h.close()
t.Fatalf("SetReadOnly error: %v", err)
}
h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
ro := func(key, value, wantValue string) {
if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
t.Fatalf("unexpected error: %v", err)
}
h.getVal(key, wantValue)
}
ro("foo", "vx", "v1")
h.o.ReadOnly = true
h.reopenDB()
ro("foo", "vx", "v1")
ro("bar", "vx", "v2")
h.assertNumKeys(4)
}

View File

@@ -63,24 +63,24 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
return
}
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
delayed := false
flush := func() (retry bool) {
v := db.s.version()
defer v.release()
mem = db.getEffectiveMem()
mdb = db.getEffectiveMem()
defer func() {
if retry {
mem.decref()
mem = nil
mdb.decref()
mdb = nil
}
}()
nn = mem.mdb.Free()
mdbFree = mdb.Free()
switch {
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
delayed = true
time.Sleep(time.Millisecond)
case nn >= n:
case mdbFree >= n:
return false
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
delayed = true
@@ -90,15 +90,15 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
}
default:
// Allow memdb to grow if it has no entry.
if mem.mdb.Len() == 0 {
nn = n
if mdb.Len() == 0 {
mdbFree = n
} else {
mem.decref()
mem, err = db.rotateMem(n)
mdb.decref()
mdb, err = db.rotateMem(n)
if err == nil {
nn = mem.mdb.Free()
mdbFree = mdb.Free()
} else {
nn = 0
mdbFree = 0
}
}
return false
@@ -157,18 +157,18 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
}
}()
mem, memFree, err := db.flush(b.size())
mdb, mdbFree, err := db.flush(b.size())
if err != nil {
return
}
defer mem.decref()
defer mdb.decref()
// Calculate maximum size of the batch.
m := 1 << 20
if x := b.size(); x <= 128<<10 {
m = x + (128 << 10)
}
m = minInt(m, memFree)
m = minInt(m, mdbFree)
// Merge with other batch.
drain:
@@ -197,7 +197,7 @@ drain:
select {
case db.journalC <- b:
// Write into memdb
if berr := b.memReplay(mem.mdb); berr != nil {
if berr := b.memReplay(mdb.DB); berr != nil {
panic(berr)
}
case err = <-db.compPerErrC:
@@ -211,7 +211,7 @@ drain:
case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
if berr := b.revertMemReplay(mem.mdb); berr != nil {
if berr := b.revertMemReplay(mdb.DB); berr != nil {
panic(berr)
}
return
@@ -225,7 +225,7 @@ drain:
if err != nil {
return
}
if berr := b.memReplay(mem.mdb); berr != nil {
if berr := b.memReplay(mdb.DB); berr != nil {
panic(berr)
}
}
@@ -233,7 +233,7 @@ drain:
// Set last seq number.
db.addSeq(uint64(b.Len()))
if b.size() >= memFree {
if b.size() >= mdbFree {
db.rotateMem(0)
}
return
@@ -249,8 +249,7 @@ func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
return db.Write(b, wo)
}
// Delete deletes the value for the given key. It returns ErrNotFound if
// the DB does not contain the key.
// Delete deletes the value for the given key.
//
// It is safe to modify the contents of the arguments after Delete returns.
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
@@ -290,9 +289,9 @@ func (db *DB) CompactRange(r util.Range) error {
}
// Check for overlaps in memdb.
mem := db.getEffectiveMem()
defer mem.decref()
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
mdb := db.getEffectiveMem()
defer mdb.decref()
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0); err != nil {
<-db.writeLockC
@@ -309,3 +308,31 @@ func (db *DB) CompactRange(r util.Range) error {
// Table compaction.
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
}
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
func (db *DB) SetReadOnly() error {
if err := db.ok(); err != nil {
return err
}
// Lock writer.
select {
case db.writeLockC <- struct{}{}:
db.compWriteLocking = true
case err := <-db.compPerErrC:
return err
case _, _ = <-db.closeC:
return ErrClosed
}
// Set compaction read-only.
select {
case db.compErrSetC <- ErrReadOnly:
case perr := <-db.compPerErrC:
return perr
case _, _ = <-db.closeC:
return ErrClosed
}
return nil
}

View File

@@ -12,6 +12,7 @@ import (
var (
ErrNotFound = errors.ErrNotFound
ErrReadOnly = errors.New("leveldb: read-only mode")
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
ErrIterReleased = errors.New("leveldb: iterator released")
ErrClosed = errors.New("leveldb: closed")

View File

@@ -206,6 +206,7 @@ func (p *DB) randHeight() (h int) {
return
}
// Must hold RW-lock if prev == true, as it use shared prevNode slice.
func (p *DB) findGE(key []byte, prev bool) (int, bool) {
node := 0
h := p.maxHeight - 1
@@ -302,7 +303,7 @@ func (p *DB) Put(key []byte, value []byte) error {
node := len(p.nodeData)
p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
for i, n := range p.prevNode[:h] {
m := n + 4 + i
m := n + nNext + i
p.nodeData = append(p.nodeData, p.nodeData[m])
p.nodeData[m] = node
}
@@ -434,20 +435,22 @@ func (p *DB) Len() int {
// Reset resets the DB to initial empty state. Allows reuse the buffer.
func (p *DB) Reset() {
p.mu.Lock()
p.rnd = rand.New(rand.NewSource(0xdeadbeef))
p.maxHeight = 1
p.n = 0
p.kvSize = 0
p.kvData = p.kvData[:0]
p.nodeData = p.nodeData[:4+tMaxHeight]
p.nodeData = p.nodeData[:nNext+tMaxHeight]
p.nodeData[nKV] = 0
p.nodeData[nKey] = 0
p.nodeData[nVal] = 0
p.nodeData[nHeight] = tMaxHeight
for n := 0; n < tMaxHeight; n++ {
p.nodeData[4+n] = 0
p.nodeData[nNext+n] = 0
p.prevNode[n] = 0
}
p.mu.Unlock()
}
// New creates a new initalized in-memory key/value DB. The capacity

View File

@@ -34,10 +34,11 @@ var (
DefaultCompactionTotalSize = 10 * MiB
DefaultCompactionTotalSizeMultiplier = 10.0
DefaultCompressionType = SnappyCompression
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultIteratorSamplingRate = 1 * MiB
DefaultMaxMemCompationLevel = 2
DefaultNumLevel = 7
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultWriteBuffer = 4 * MiB
DefaultWriteL0PauseTrigger = 12
DefaultWriteL0SlowdownTrigger = 8
@@ -249,6 +250,11 @@ type Options struct {
// The default value (DefaultCompression) uses snappy compression.
Compression Compression
// DisableBufferPool allows disable use of util.BufferPool functionality.
//
// The default value is false.
DisableBufferPool bool
// DisableBlockCache allows disable use of cache.Cache functionality on
// 'sorted table' block.
//
@@ -288,6 +294,13 @@ type Options struct {
// The default value is nil.
Filter filter.Filter
// IteratorSamplingRate defines approximate gap (in bytes) between read
// sampling of an iterator. The samples will be used to determine when
// compaction should be triggered.
//
// The default is 1MiB.
IteratorSamplingRate int
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
// will be pushed into if doesn't creates overlap. This should less than
// NumLevel. Use -1 for level-0.
@@ -313,6 +326,11 @@ type Options struct {
// The default value is 500.
OpenFilesCacheCapacity int
// If true then opens DB in read-only mode.
//
// The default value is false.
ReadOnly bool
// Strict defines the DB strict level.
Strict Strict
@@ -464,6 +482,20 @@ func (o *Options) GetCompression() Compression {
return o.Compression
}
func (o *Options) GetDisableBufferPool() bool {
if o == nil {
return false
}
return o.DisableBufferPool
}
func (o *Options) GetDisableBlockCache() bool {
if o == nil {
return false
}
return o.DisableBlockCache
}
func (o *Options) GetDisableCompactionBackoff() bool {
if o == nil {
return false
@@ -492,6 +524,13 @@ func (o *Options) GetFilter() filter.Filter {
return o.Filter
}
func (o *Options) GetIteratorSamplingRate() int {
if o == nil || o.IteratorSamplingRate <= 0 {
return DefaultIteratorSamplingRate
}
return o.IteratorSamplingRate
}
func (o *Options) GetMaxMemCompationLevel() int {
level := DefaultMaxMemCompationLevel
if o != nil {
@@ -533,6 +572,13 @@ func (o *Options) GetOpenFilesCacheCapacity() int {
return o.OpenFilesCacheCapacity
}
func (o *Options) GetReadOnly() bool {
if o == nil {
return false
}
return o.ReadOnly
}
func (o *Options) GetStrict(strict Strict) bool {
if o == nil || o.Strict == 0 {
return DefaultStrict&strict != 0

View File

@@ -11,10 +11,8 @@ import (
"io"
"os"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
@@ -127,11 +125,16 @@ func (s *session) recover() (err error) {
return
}
defer reader.Close()
strict := s.o.GetStrict(opt.StrictManifest)
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
staging := s.stVersion.newStaging()
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
var (
// Options.
numLevel = s.o.GetNumLevel()
strict = s.o.GetStrict(opt.StrictManifest)
jr = journal.NewReader(reader, dropper{s, m}, strict, true)
rec = &sessionRecord{}
staging = s.stVersion.newStaging()
)
for {
var r io.Reader
r, err = jr.Next()
@@ -143,7 +146,7 @@ func (s *session) recover() (err error) {
return errors.SetFile(err, m)
}
err = rec.decode(r)
err = rec.decode(r, numLevel)
if err == nil {
// save compact pointers
for _, r := range rec.compPtrs {
@@ -206,250 +209,3 @@ func (s *session) commit(r *sessionRecord) (err error) {
return
}
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
cptr := s.stCompPtrs[level]
tables := v.tables[level]
for _, t := range tables {
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
level = ts.level
t0 = append(t0, ts.table)
} else {
v.release()
return nil
}
}
return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
v := s.version()
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
v.release()
return nil
}
// Avoid compacting too much in one shot in case the range is large.
// But we cannot do this for level-0 since level-0 files can overlap
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
if total >= limit {
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
t0 = t0[:i+1]
break
}
}
}
return newCompaction(s, v, level, t0)
}
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
level: level,
tables: [2]tFiles{t0, nil},
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
tPtrs: make([]int, s.o.GetNumLevel()),
}
c.expand()
c.save()
return c
}
// compaction represent a compaction state.
type compaction struct {
s *session
v *version
level int
tables [2]tFiles
maxGPOverlaps uint64
gp tFiles
gpi int
seenKey bool
gpOverlappedBytes uint64
imin, imax iKey
tPtrs []int
released bool
snapGPI int
snapSeenKey bool
snapGPOverlappedBytes uint64
snapTPtrs []int
}
func (c *compaction) save() {
c.snapGPI = c.gpi
c.snapSeenKey = c.seenKey
c.snapGPOverlappedBytes = c.gpOverlappedBytes
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
}
func (c *compaction) restore() {
c.gpi = c.snapGPI
c.seenKey = c.snapSeenKey
c.gpOverlappedBytes = c.snapGPOverlappedBytes
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
}
func (c *compaction) release() {
if !c.released {
c.released = true
c.v.release()
}
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
imin, imax := t0.getRange(c.s.icmp)
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
if len(t0) != len(c.tables[0]) {
imin, imax = t0.getRange(c.s.icmp)
}
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
// Get entire range covered by compaction.
amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
xmin, xmax := exp0.getRange(c.s.icmp)
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
imin, imax = xmin, xmax
t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
if c.level+2 < c.s.o.GetNumLevel() {
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
func (c *compaction) baseLevelForKey(ukey []byte) bool {
for level, tables := range c.v.tables[c.level+2:] {
for c.tPtrs[level] < len(tables) {
t := tables[c.tPtrs[level]]
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
// We've advanced far enough.
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level.
return false
}
break
}
c.tPtrs[level]++
}
}
return true
}
func (c *compaction) shouldStopBefore(ikey iKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
if c.gpOverlappedBytes > c.maxGPOverlaps {
// Too much overlap for current output; start new output.
c.gpOverlappedBytes = 0
return true
}
return false
}
// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
// Creates iterator slice.
icap := len(c.tables)
if c.level == 0 {
// Special case for level-0
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
// Options.
ro := &opt.ReadOptions{
DontFillCache: true,
Strict: opt.StrictOverride,
}
strict := c.s.o.GetStrict(opt.StrictCompaction)
if strict {
ro.Strict |= opt.StrictReader
}
for i, tables := range c.tables {
if len(tables) == 0 {
continue
}
// Level-0 is not sorted and may overlaps each other.
if c.level+i == 0 {
for _, t := range tables {
its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
return iterator.NewMergedIterator(its, c.s.icmp, strict)
}

View File

@@ -0,0 +1,287 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func (s *session) pickMemdbLevel(umin, umax []byte) int {
v := s.version()
defer v.release()
return v.pickMemdbLevel(umin, umax)
}
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
// Create sorted table.
iter := mdb.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return level, err
}
// Pick level and add to record.
if level < 0 {
level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
}
rec.addTableFile(level, t)
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
return level, nil
}
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
cptr := s.stCompPtrs[level]
tables := v.tables[level]
for _, t := range tables {
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
level = ts.level
t0 = append(t0, ts.table)
} else {
v.release()
return nil
}
}
return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
v := s.version()
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
v.release()
return nil
}
// Avoid compacting too much in one shot in case the range is large.
// But we cannot do this for level-0 since level-0 files can overlap
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
if total >= limit {
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
t0 = t0[:i+1]
break
}
}
}
return newCompaction(s, v, level, t0)
}
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
level: level,
tables: [2]tFiles{t0, nil},
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
tPtrs: make([]int, s.o.GetNumLevel()),
}
c.expand()
c.save()
return c
}
// compaction represent a compaction state.
type compaction struct {
s *session
v *version
level int
tables [2]tFiles
maxGPOverlaps uint64
gp tFiles
gpi int
seenKey bool
gpOverlappedBytes uint64
imin, imax iKey
tPtrs []int
released bool
snapGPI int
snapSeenKey bool
snapGPOverlappedBytes uint64
snapTPtrs []int
}
func (c *compaction) save() {
c.snapGPI = c.gpi
c.snapSeenKey = c.seenKey
c.snapGPOverlappedBytes = c.gpOverlappedBytes
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
}
func (c *compaction) restore() {
c.gpi = c.snapGPI
c.seenKey = c.snapSeenKey
c.gpOverlappedBytes = c.snapGPOverlappedBytes
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
}
func (c *compaction) release() {
if !c.released {
c.released = true
c.v.release()
}
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
imin, imax := t0.getRange(c.s.icmp)
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
if len(t0) != len(c.tables[0]) {
imin, imax = t0.getRange(c.s.icmp)
}
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
// Get entire range covered by compaction.
amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
xmin, xmax := exp0.getRange(c.s.icmp)
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
imin, imax = xmin, xmax
t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
if c.level+2 < c.s.o.GetNumLevel() {
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
func (c *compaction) baseLevelForKey(ukey []byte) bool {
for level, tables := range c.v.tables[c.level+2:] {
for c.tPtrs[level] < len(tables) {
t := tables[c.tPtrs[level]]
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
// We've advanced far enough.
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level.
return false
}
break
}
c.tPtrs[level]++
}
}
return true
}
func (c *compaction) shouldStopBefore(ikey iKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
if c.gpOverlappedBytes > c.maxGPOverlaps {
// Too much overlap for current output; start new output.
c.gpOverlappedBytes = 0
return true
}
return false
}
// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
// Creates iterator slice.
icap := len(c.tables)
if c.level == 0 {
// Special case for level-0.
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
// Options.
ro := &opt.ReadOptions{
DontFillCache: true,
Strict: opt.StrictOverride,
}
strict := c.s.o.GetStrict(opt.StrictCompaction)
if strict {
ro.Strict |= opt.StrictReader
}
for i, tables := range c.tables {
if len(tables) == 0 {
continue
}
// Level-0 is not sorted and may overlaps each other.
if c.level+i == 0 {
for _, t := range tables {
its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
return iterator.NewMergedIterator(its, c.s.icmp, strict)
}

View File

@@ -52,8 +52,6 @@ type dtRecord struct {
}
type sessionRecord struct {
numLevel int
hasRec int
comparer string
journalNum uint64
@@ -230,7 +228,7 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
return x
}
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
if p.err != nil {
return 0
}
@@ -238,14 +236,14 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
if p.err != nil {
return 0
}
if x >= uint64(p.numLevel) {
if x >= uint64(numLevel) {
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
return 0
}
return int(x)
}
func (p *sessionRecord) decode(r io.Reader) error {
func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
br, ok := r.(byteReader)
if !ok {
br = bufio.NewReader(r)
@@ -286,13 +284,13 @@ func (p *sessionRecord) decode(r io.Reader) error {
p.setSeqNum(x)
}
case recCompPtr:
level := p.readLevel("comp-ptr.level", br)
level := p.readLevel("comp-ptr.level", br, numLevel)
ikey := p.readBytes("comp-ptr.ikey", br)
if p.err == nil {
p.addCompPtr(level, iKey(ikey))
}
case recAddTable:
level := p.readLevel("add-table.level", br)
level := p.readLevel("add-table.level", br, numLevel)
num := p.readUvarint("add-table.num", br)
size := p.readUvarint("add-table.size", br)
imin := p.readBytes("add-table.imin", br)
@@ -301,7 +299,7 @@ func (p *sessionRecord) decode(r io.Reader) error {
p.addTable(level, num, size, imin, imax)
}
case recDelTable:
level := p.readLevel("del-table.level", br)
level := p.readLevel("del-table.level", br, numLevel)
num := p.readUvarint("del-table.num", br)
if p.err == nil {
p.delTable(level, num)

View File

@@ -19,8 +19,8 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
if err != nil {
return
}
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
err = v.decode(b)
v2 := &sessionRecord{}
err = v.decode(b, opt.DefaultNumLevel)
if err != nil {
return
}
@@ -34,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
func TestSessionRecord_EncodeDecode(t *testing.T) {
big := uint64(1) << 50
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
v := &sessionRecord{}
i := uint64(0)
test := func() {
res, err := decodeEncode(v)

View File

@@ -182,7 +182,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
defer v.release()
}
if rec == nil {
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
rec = &sessionRecord{}
}
s.fillRecord(rec, true)
v.fillRecord(rec)

View File

@@ -42,6 +42,8 @@ type tsOp uint
const (
tsOpOpen tsOp = iota
tsOpCreate
tsOpReplace
tsOpRemove
tsOpRead
tsOpReadAt
tsOpWrite
@@ -241,6 +243,10 @@ func (tf tsFile) Replace(newfile storage.File) (err error) {
if err != nil {
return
}
if tf.shouldErr(tsOpReplace) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
err = tf.File.Replace(newfile.(tsFile).File)
if err != nil {
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
@@ -258,6 +264,10 @@ func (tf tsFile) Remove() (err error) {
if err != nil {
return
}
if tf.shouldErr(tsOpRemove) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
err = tf.File.Remove()
if err != nil {
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)

View File

@@ -441,22 +441,26 @@ func newTableOps(s *session) *tOps {
var (
cacher cache.Cacher
bcache *cache.Cache
bpool *util.BufferPool
)
if s.o.GetOpenFilesCacheCapacity() > 0 {
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
}
if !s.o.DisableBlockCache {
if !s.o.GetDisableBlockCache() {
var bcacher cache.Cacher
if s.o.GetBlockCacheCapacity() > 0 {
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
}
bcache = cache.NewCache(bcacher)
}
if !s.o.GetDisableBufferPool() {
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
}
return &tOps{
s: s,
cache: cache.NewCache(cacher),
bcache: bcache,
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
bpool: bpool,
}
}

View File

@@ -14,7 +14,7 @@ import (
"strings"
"sync"
"github.com/syndtr/gosnappy/snappy"
"github.com/google/go-snappy/snappy"
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/comparer"

View File

@@ -12,7 +12,7 @@ import (
"fmt"
"io"
"github.com/syndtr/gosnappy/snappy"
"github.com/google/go-snappy/snappy"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"

View File

@@ -136,9 +136,8 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
if !tseek {
if tset == nil {
tset = &tSet{level, t}
} else if tset.table.consumeSeek() <= 0 {
} else {
tseek = true
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
}
@@ -203,6 +202,28 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
return true
})
if tseek && tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return
}
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
var tset *tSet
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
if tset == nil {
tset = &tSet{level, t}
return true
} else {
if tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return false
}
}, nil)
return
}
@@ -279,7 +300,7 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
return
}
func (v *version) pickLevel(umin, umax []byte) (level int) {
func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
var overlaps tFiles
maxLevel := v.s.o.GetMaxMemCompationLevel()

2
NICKS
View File

@@ -25,6 +25,7 @@ dzarda <dzardacz@gmail.com>
facastagnini <federico.castagnini@gmail.com>
filoozoom <philippe@schommers.be>
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
fti7 <frank@isemann.name>
gillisig <gilli@vx.is>
jarlebring <jarlebring@gmail.com>
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
@@ -55,4 +56,5 @@ tnn2 <tnn@nygren.pp.se>
tojrobinson <tully@tojr.org>
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
veeti <veeti.paananen@rojekti.fi>
wsgcsysadmin <e.meitner@willystreet.coo>
zukoo <fxgsell@gmail.com>

View File

@@ -36,8 +36,7 @@ const (
func Assets() map[string][]byte {
var assets = make(map[string][]byte, {{.Assets | len}})
{{range $asset := .Assets}}
assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
{{end}}
assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}"){{end}}
return assets
}

View File

@@ -49,7 +49,6 @@ var (
guiErrors = []guiError{}
guiErrorsMut = sync.NewMutex()
startTime = time.Now()
eventSub *events.BufferedSubscription
)
type apiSvc struct {
@@ -60,14 +59,16 @@ type apiSvc struct {
fss *folderSummarySvc
stop chan struct{}
systemConfigMut sync.Mutex
eventSub *events.BufferedSubscription
}
func newAPISvc(cfg config.GUIConfiguration, assetDir string, m *model.Model) (*apiSvc, error) {
func newAPISvc(cfg config.GUIConfiguration, assetDir string, m *model.Model, eventSub *events.BufferedSubscription) (*apiSvc, error) {
svc := &apiSvc{
cfg: cfg,
assetDir: assetDir,
model: m,
systemConfigMut: sync.NewMutex(),
eventSub: eventSub,
}
var err error
@@ -125,9 +126,6 @@ func (s *apiSvc) Serve() {
s.stop = make(chan struct{})
l.AddHandler(logger.LevelWarn, s.showGuiError)
sub := events.Default.Subscribe(events.AllEvents)
eventSub = events.NewBufferedSubscription(sub, 1000)
defer events.Default.Unsubscribe(sub)
// The GET handlers
getRestMux := http.NewServeMux()
@@ -574,7 +572,12 @@ func (s *apiSvc) postSystemReset(w http.ResponseWriter, r *http.Request) {
folder := qs.Get("folder")
var err error
if len(folder) == 0 {
err = resetDB()
for folder := range cfg.Folders() {
err = s.model.ResetFolder(folder)
if err != nil {
break
}
}
} else {
err = s.model.ResetFolder(folder)
}
@@ -585,7 +588,7 @@ func (s *apiSvc) postSystemReset(w http.ResponseWriter, r *http.Request) {
if len(folder) == 0 {
s.flushResponse(`{"ok": "resetting database"}`, w)
} else {
s.flushResponse(`{"ok": "resetting folder " + folder}`, w)
s.flushResponse(`{"ok": "resetting folder `+folder+`"}`, w)
}
go restart()
}
@@ -743,7 +746,7 @@ func (s *apiSvc) getEvents(w http.ResponseWriter, r *http.Request) {
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
evs := s.eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}

View File

@@ -31,6 +31,7 @@ const (
locCsrfTokens = "csrfTokens"
locPanicLog = "panicLog"
locAuditLog = "auditLog"
locGUIAssets = "GUIAssets"
locDefFolder = "defFolder"
)
@@ -52,6 +53,7 @@ var locations = map[locationEnum]string{
locCsrfTokens: "${config}/csrftokens.txt",
locPanicLog: "${config}/panic-${timestamp}.log",
locAuditLog: "${config}/audit-${timestamp}.log",
locGUIAssets: "${config}/gui",
locDefFolder: "${home}/Sync",
}

View File

@@ -252,6 +252,10 @@ func main() {
l.Fatalln(err)
}
if guiAssets == "" {
guiAssets = locations[locGUIAssets]
}
if runtime.GOOS == "windows" {
if logFile == "" {
// Use the default log file location
@@ -443,12 +447,13 @@ func syncthingMain() {
mainSvc.Add(newVerboseSvc())
}
// Event subscription for the API; must start early to catch the early events.
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
events.Default.Log(events.Starting, map[string]string{"home": baseDirs["config"]})
// Ensure that that we have a certificate and key.
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
if err != nil {
@@ -468,6 +473,13 @@ func syncthingMain() {
l.Infoln(LongVersion)
l.Infoln("My ID:", myID)
// Emit the Starting event, now that we know who we are.
events.Default.Log(events.Starting, map[string]string{
"home": baseDirs["config"],
"myID": myID.String(),
})
// Prepare to be able to save configuration
cfgFile := locations[locConfigFile]
@@ -589,7 +601,6 @@ func syncthingMain() {
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
cfg.Subscribe(m)
mainSvc.Add(m)
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
it, err := strconv.Atoi(t)
@@ -622,9 +633,11 @@ func syncthingMain() {
}
}
mainSvc.Add(m)
// GUI
setupGUI(mainSvc, cfg, m)
setupGUI(mainSvc, cfg, m, apiSub)
// The default port we announce, possibly modified by setupUPnP next.
@@ -700,7 +713,9 @@ func syncthingMain() {
}
}
events.Default.Log(events.StartupComplete, nil)
events.Default.Log(events.StartupComplete, map[string]string{
"myID": myID.String(),
})
go generatePingEvents()
cleanConfigDirectory()
@@ -764,7 +779,7 @@ func startAuditing(mainSvc *suture.Supervisor) {
l.Infoln("Audit log in", auditFile)
}
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model) {
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription) {
opts := cfg.Options()
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
@@ -793,7 +808,7 @@ func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model) {
urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
l.Infoln("Starting web GUI on", urlShow)
api, err := newAPISvc(guiCfg, guiAssets, m)
api, err := newAPISvc(guiCfg, guiAssets, m, apiSub)
if err != nil {
l.Fatalln("Cannot start GUI:", err)
}

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Няма логика на зададен отрицателен брой дни.",
"A new major version may not be compatible with previous versions.": "Нова основна версия, която може да не е съвмеситима с предишни версии.",
"API Key": "API Ключ",
"About": "За Програмата",
@@ -20,7 +20,7 @@
"Bugs": "Бъгове",
"CPU Utilization": "Натоварване на Процесора",
"Changelog": "Сипъск с промени",
"Clean out after": "Clean out after",
"Clean out after": "Изчисти след",
"Close": "Затвори",
"Command": "Команда",
"Comment, when used at the start of a line": "Коментар, използван в началото на реда",
@@ -30,6 +30,7 @@
"Copied from original": "Копиран от оригинала",
"Copyright © 2015 the following Contributors:": "Правата запазени © 2015 Сътрудници:",
"Delete": "Изтрий",
"Deleted": "Deleted",
"Device ID": "Идентификатор на устройство",
"Device Identification": "Идентификация на устройство",
"Device Name": "Име на устройство",
@@ -52,7 +53,7 @@
"File Pull Order": "По ред на дърпане",
"File Versioning": "Файлови Версии",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Битовете за права за достъп са игнорирани, когато се проверява за промени. Използвай с файлови системи тип FAT.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Файловете биват преместени в .stversions папка, когато са заменен или изтрити от Syncthing.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Когато syncthing замени или изтрие файл той се премества в .stversions и преименува с дабавени дата и час.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файловете са защитени от промени направени на други устройства, но промени направени на това устройство ще бъдат синхронизирани с другите устройства.",
"Folder ID": "Идентификатор на папка",
@@ -80,6 +81,7 @@
"Later": "По-късно",
"Local Discovery": "Локално Откриване",
"Local State": "Локално състояние",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Основно Обновяване",
"Maximum Age": "Максимална Възраст",
"Metadata Only": "Само мета информация",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Използва се следния интервал: за първия час се пази версия всеки 30 секунди, за първия ден се пази версия всеки час, за първите 30 дена се пази версия всеки ден, до максимума се пази една версия всяка седмица.",
"The maximum age must be a number and cannot be blank.": "Максималната възраст трябва да е число и не може д ае празна.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максималното време да се пазят весрсии (в дни, сложи 0, за да пазиш версии завинаги).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Броят дни трябва да бъде число и неможе да бъде празно.",
"The number of days to keep files in the trash can. Zero means forever.": "Броят дни за запазване на файловете в кошчето. Нула значи завинаги.",
"The number of old versions to keep, per file.": "Броят стари версии, които да бъдат пазени за всеки файл.",
"The number of versions must be a number and cannot be blank.": "Броят версии трябва да бъде число и не може да бъде празно.",
"The path cannot be blank.": "Пътят неможе да бъде празен.",
"The rescan interval must be a non-negative number of seconds.": "Интервала на сканиране трябва да бъде не отрицателно число в секунди.",
"This is a major version upgrade.": "Това е нова основна версия.",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Версии на файлове в кошчето",
"Unknown": "Неясен",
"Unshared": "Споделянето прекратено",
"Unused": "Неизползван",
"Up to Date": "Актуален",
"Updated": "Updated",
"Upgrade": "Обнови",
"Upgrade To {%version%}": "Обновен До {{version}}",
"Upgrading": "Обновяване",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiat de l'original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents col·laboradors:",
"Delete": "Esborrar",
"Deleted": "Deleted",
"Device ID": "ID del dispositiu",
"Device Identification": "Identificació del dispositiu",
"Device Name": "Nom del dispositiu",
@@ -80,6 +81,7 @@
"Later": "Després",
"Local Discovery": "Descobriment Local",
"Local State": "Estat local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Actualització major",
"Maximum Age": "Antiguitat Màxima",
"Metadata Only": "Només metadades",
@@ -175,6 +177,7 @@
"Unshared": "No compartit",
"Unused": "No usat",
"Up to Date": "Actualitzat",
"Updated": "Updated",
"Upgrade": "Actualització",
"Upgrade To {%version%}": "Actualitzar a {{version}}",
"Upgrading": "Actualitzant",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiat de l'original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents Col·laboradors:",
"Delete": "Esborrar",
"Deleted": "Deleted",
"Device ID": "ID del dispositiu",
"Device Identification": "Identificació del dispositiu",
"Device Name": "Nom del dispositiu",
@@ -80,6 +81,7 @@
"Later": "Més tard",
"Local Discovery": "Descobriment local",
"Local State": "Estat local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Actualització important",
"Maximum Age": "Edat màxima",
"Metadata Only": "Sols metadades",
@@ -175,6 +177,7 @@
"Unshared": "No compartit",
"Unused": "No utilitzat",
"Up to Date": "Actualitzat",
"Updated": "Updated",
"Upgrade": "Actualitzar",
"Upgrade To {%version%}": "Actualitzar a {{version}}",
"Upgrading": "Actualitzant",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Zkopírováno z originálu",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 následující přispěvatelé:",
"Delete": "Smazat",
"Deleted": "Deleted",
"Device ID": "ID přístroje",
"Device Identification": "Identifikace přístroje",
"Device Name": "Jméno přístroje",
@@ -80,6 +81,7 @@
"Later": "Později",
"Local Discovery": "Místní oznamování",
"Local State": "Místní status",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Důležitá aktualizace",
"Maximum Age": "Maximální časový limit",
"Metadata Only": "Pouze metadata",
@@ -175,6 +177,7 @@
"Unshared": "Nesdílený",
"Unused": "Nepoužitý",
"Up to Date": "Aktuální",
"Updated": "Updated",
"Upgrade": "Aktualizace",
"Upgrade To {%version%}": "Aktualizovat na {{version}}",
"Upgrading": "Aktualizuji",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Eine negative Anzahl von Tagen ergibt keinen Sinn.",
"A new major version may not be compatible with previous versions.": "Die neue Hauptversion ist evtl. nicht mit vorherigen Versionen kompatibel.",
"API Key": "API-Schlüssel",
"About": "Über Syncthing",
@@ -12,7 +12,7 @@
"Addresses": "Adressen",
"All Data": "Alle Daten",
"Allow Anonymous Usage Reporting?": "Übertragung von anonymen Nutzungsberichten erlauben?",
"Alphabetic": "alphabetisch",
"Alphabetic": "Alphabetisch",
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ein externer Programmaufruf handhabt die Versionierung. Es muss die Datei aus dem zu synchronisierendem Ordner entfernen.",
"Anonymous Usage Reporting": "Anonymer Nutzungsbericht",
"Any devices configured on an introducer device will be added to this device as well.": "Alle Geräte, die beim Verteiler eingetragen sind, werden auch bei diesem Gerät eingetragen",
@@ -20,7 +20,7 @@
"Bugs": "Fehler",
"CPU Utilization": "Prozessorauslastung",
"Changelog": "Änderungsprotokoll",
"Clean out after": "Clean out after",
"Clean out after": "Aufräumen nach",
"Close": "Schließen",
"Command": "Kommando",
"Comment, when used at the start of a line": "Kommentar, wenn am Anfang der Zeile benutzt.",
@@ -30,6 +30,7 @@
"Copied from original": "Vom Original kopiert",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 die folgenden Unterstützer:",
"Delete": "Löschen",
"Deleted": "Deleted",
"Device ID": "Geräte ID",
"Device Identification": "Gerät Identifikation",
"Device Name": "Gerätename",
@@ -52,7 +53,7 @@
"File Pull Order": "Dateiübertragungsreihenfolge",
"File Versioning": "Dateiversionierung",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Dateizugriffsrechte beim Suchen nach Veränderungen ignorieren. Bei FAT-Dateisystemen zu verwenden.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner namens .stversions verschoben.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Dateien werden, bevor Syncthing sie löscht oder ersetzt, als datierte Versionen in einen Ordner namens .stversions verschoben.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Dateien sind vor Veränderung durch andere Geräte geschützt. Auf diesem Gerät durchgeführte Veränderungen werden aber auf den Rest des Verbunds übertragen.",
"Folder ID": "Verzeichnis ID",
@@ -63,23 +64,24 @@
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche",
"GUI Listen Addresses": "Adresse(n) für die Benutzeroberfläche",
"Generate": "Generieren",
"Global Discovery": "verfügbare Indexserver",
"Global Discovery": "Globale Gerätesuche",
"Global Discovery Server": "Globale(r) Indexserver",
"Global State": "Globaler Status",
"Help": "Hilfe",
"Ignore": "Ignorieren",
"Ignore Patterns": "Ignoriermuster",
"Ignore Permissions": "Berechtigungen ignorieren",
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KiB/s)",
"Incoming Rate Limit (KiB/s)": "Limit Datenrate (eingehend) (KB/s)",
"Introducer": "Verteilergerät",
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (z.B. schließe nicht aus)",
"Keep Versions": "Versionen erhalten",
"Largest First": "Größtes zuerst",
"Last File Received": "Letzte Datei empfangen",
"Largest First": "Größte zuerst",
"Last File Received": "Letzte empfangene Datei ",
"Last seen": "Zuletzt online",
"Later": "Später",
"Local Discovery": "Client lokal freigeben",
"Local Discovery": "Lokale Gerätesuche",
"Local State": "Lokaler Status",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Hauptversionsupgrade",
"Maximum Age": "Höchstalter",
"Metadata Only": "Nur Metadaten",
@@ -88,16 +90,16 @@
"Never": "Nie",
"New Device": "Neues Gerät",
"New Folder": "Neues Verzeichnis",
"Newest First": "Neuestes zuerst",
"Newest First": "Neueste zuerst",
"No": "Nein",
"No File Versioning": "Keine Dateiversionierung",
"Notice": "Hinweis",
"OK": "OK",
"Off": "Aus",
"Oldest First": "Ältestes zuerst",
"Oldest First": "Älteste zuerst",
"Out Of Sync": "Nicht synchronisiert",
"Out of Sync Items": "Nicht synchronisierte Objekte",
"Outgoing Rate Limit (KiB/s)": "Ausgehendes Datenratelimit (KiB/s)",
"Outgoing Rate Limit (KiB/s)": "Ausgehendes Datenratelimit (KB/s)",
"Override Changes": "Änderungen überschreiben",
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pfad zum Verzeichnis auf dem lokalen Rechner. Wird erzeugt, wenn es nicht existiert. Das Tilden-Zeichen (~) kann als Abkürzung benutzt werden für",
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pfad in dem die Versionen gespeichert werden sollen (ohne Angabe wird das Verzeichnis .stversions im Verzeichnis verwendet).",
@@ -163,8 +165,8 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es wird in folgenden Abständen versioniert: in der ersten Stunde wird alle 30 Sekunden eine Version behalten, am ersten Tag eine jede Stunde, in den ersten 30 Tagen eine jeden Tag, danach wird bis zum Höchstalter eine Version pro Woche beibehalten.",
"The maximum age must be a number and cannot be blank.": "Das Höchstalter muss angegeben werden und eine Zahl sein.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Die längste Zeit, die alte Versionen vorgehalten werden (in Tagen, 0 bedeutet, alte Versionen für immer zu behalten).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
"The number of days to keep files in the trash can. Zero means forever.": "Dauer in Tagen für welche die Dateien aufgehoben werden sollen. 0 bedeutet für immer.",
"The number of old versions to keep, per file.": "Anzahl der alten Versionen, die von jeder Datei gespeichert werden sollen.",
"The number of versions must be a number and cannot be blank.": "Die Anzahl von Versionen muss eine Zahl und darf nicht leer sein.",
"The path cannot be blank.": "Der Pfad darf nicht leer sein.",
@@ -175,6 +177,7 @@
"Unshared": "Ungeteilt",
"Unused": "Ungenutzt",
"Up to Date": "Aktuell",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Update auf {{version}}",
"Upgrading": "Wird aktualisiert",

View File

@@ -1,9 +1,9 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Αριθμός ημερών με αρνητικό πρόσημο, δε βγάζει νόημα.",
"A new major version may not be compatible with previous versions.": "Μια νέα σημαντική έκδοση μπορεί να μην είναι συμβατή με τις προηγούμενες εκδόσεις.",
"API Key": "Κλειδί API",
"About": "Σχετικά με το Syncthing",
"Actions": "Actions",
"Actions": "Ενέργειες",
"Add": "Προσθήκη",
"Add Device": "Προσθήκη συσκευής",
"Add Folder": "Προσθήκη φακέλου",
@@ -30,6 +30,7 @@
"Copied from original": "Έχει αντιγραφεί από το πρωτότυπο",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 από τους παρακάτω συνεισφορείς:",
"Delete": "Διαγραφή",
"Deleted": "Deleted",
"Device ID": "Ταυτότητα συσκευής",
"Device Identification": "Ταυτότητα συσκευής",
"Device Name": "Όνομα συσκευής",
@@ -52,7 +53,7 @@
"File Pull Order": "Σειρά με την οποία θα κατεβαίνουν τα αρχεία",
"File Versioning": "Τήρηση εκδόσεων",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Τα δικαιώματα των αρχείων θα αγνοούνται όταν κοιτάζω για αλλαγές. Αφορά συστήματα αρχείων FAT.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Τα αρχεία μετακινούνται στον φάκελο .stversions, όταν αντικαθίστανται ή διαγράφονται από το Syncthing.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Τα αρχεία που σβήνονται ή αντικαθιστούνται από το Syncthing μετακινούνται σε έναν φάκελο .stversions με χρονοσφραγίδα.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Τα αρχεία προστατεύονται από αλλαγές που γίνονται σε άλλες συσκευές, αλλά όποιες αλλαγές γίνουν σε αυτή τη συσκευή θα αποσταλούν σε όλη τη συστάδα συσκευών.",
"Folder ID": "Ταυτότητα φακέλου",
@@ -66,7 +67,7 @@
"Global Discovery": "Καθολική ανεύρεση",
"Global Discovery Server": "Διακομιστής καθολικής ανεύρεσης κόμβου",
"Global State": "Καθολική κατάσταση",
"Help": "Help",
"Help": "Βοήθεια",
"Ignore": "Αγνόησε",
"Ignore Patterns": "Πρότυπο για αγνόηση",
"Ignore Permissions": "Αγνόησε τα δικαιώματα",
@@ -80,6 +81,7 @@
"Later": "Αργότερα",
"Local Discovery": "Τοπική ανεύρεση",
"Local State": "Τοπική κατάσταση",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Σημαντική αναβάθμιση",
"Maximum Age": "Μέγιστη ηλικία",
"Metadata Only": "Μόνο μεταδεδομένα",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Θα χρησιμοποιούνται τα εξής διαστήματα: Την πρώτη ώρα θα τηρείται μια έκδοση κάθε 30 δευτερόλεπτα. Την πρώτη ημέρα, μια έκδοση κάθε μια ώρα. Τις πρώτες 30 ημέρες, μία έκδοση κάθε ημέρα. Από εκεί και έπειτα μέχρι τη μέγιστη ηλικία, θα τηρείται μια έκδοση κάθε εβδομάδα.",
"The maximum age must be a number and cannot be blank.": "Η μέγιστη ηλικία πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Η μέγιστη ηλικία παλιότερων εκδόσεων (σε ημέρες, αν δώσεις 0 οι παλιότερες εκδόσεις θα διατηρούνται για πάντα).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Ο αριθμός ημερών πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
"The number of days to keep files in the trash can. Zero means forever.": "Ο αριθμός ημερών για τήρηση αρχείων στον Κάδο. Αριθμός μηδέν σημαίνει τήρηση για πάντα.",
"The number of old versions to keep, per file.": "Πόσες παλιότερες εκδόσεις θα διατηρούνται, ανά αρχείο.",
"The number of versions must be a number and cannot be blank.": "Ο αριθμός εκδόσεων πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
"The path cannot be blank.": "Το μονοπάτι δεν μπορεί να είναι κενό.",
"The rescan interval must be a non-negative number of seconds.": "Ο χρόνος επανελέγχου για αλλαγές είναι σε δευτερόλεπτα (δηλ. θετικός αριθμός).",
"This is a major version upgrade.": "Αυτή είναι μιας σημαντική αναβάθμιση.",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Ο Κάδος μπορεί να τηρεί εκδόσεις",
"Unknown": "Άγνωστο",
"Unshared": "Δε μοιράζεται",
"Unused": "Δε χρησιμοποιείται",
"Up to Date": "Ενημερωμένος",
"Updated": "Updated",
"Upgrade": "Αναβάθμιση",
"Upgrade To {%version%}": "Αναβάθμιση στην έκδοση {{version}}",
"Upgrading": "Αναβάθμιση",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copied from original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
"Delete": "Delete",
"Deleted": "Deleted",
"Device ID": "Device ID",
"Device Identification": "Device Identification",
"Device Name": "Device Name",
@@ -80,6 +81,7 @@
"Later": "Later",
"Local Discovery": "Local Discovery",
"Local State": "Local State",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Maximum Age",
"Metadata Only": "Metadata Only",
@@ -175,6 +177,7 @@
"Unshared": "Unshared",
"Unused": "Unused",
"Up to Date": "Up to Date",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Upgrade to {{version}}",
"Upgrading": "Upgrading",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copied from original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
"Delete": "Delete",
"Deleted": "Deleted",
"Device ID": "Device ID",
"Device Identification": "Device Identification",
"Device Name": "Device Name",
@@ -80,6 +81,7 @@
"Later": "Later",
"Local Discovery": "Local Discovery",
"Local State": "Local State",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Maximum Age",
"Metadata Only": "Metadata Only",
@@ -175,6 +177,7 @@
"Unshared": "Unshared",
"Unused": "Unused",
"Up to Date": "Up to Date",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Upgrade To {{version}}",
"Upgrading": "Upgrading",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiado del original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 los siguientes Colaboradores:",
"Delete": "Borrar",
"Deleted": "Deleted",
"Device ID": "ID del dispositivo",
"Device Identification": "Identificación del dispositivo",
"Device Name": "Nombre del dispositivo",
@@ -80,6 +81,7 @@
"Later": "Más tarde",
"Local Discovery": "Descubrimiento local",
"Local State": "Estado local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Actualización importante",
"Maximum Age": "Edad máxima",
"Metadata Only": "Sólo metadatos",
@@ -175,6 +177,7 @@
"Unshared": "No compartido",
"Unused": "No usado",
"Up to Date": "Actualizado",
"Updated": "Updated",
"Upgrade": "Actualizar",
"Upgrade To {%version%}": "Actualizar a {{version}}",
"Upgrading": "Actualizando",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Un número negativo no tiene sentido",
"A new major version may not be compatible with previous versions.": "Una versión mayor nueva puede ser incompatible con versiones anteriores.",
"API Key": "Clave API",
"About": "Acerca de",
@@ -20,7 +20,7 @@
"Bugs": "Errores",
"CPU Utilization": "Uso de CPU",
"Changelog": "Registro de cambios",
"Clean out after": "Clean out after",
"Clean out after": "Limpiar después",
"Close": "Cerrar",
"Command": "Comando",
"Comment, when used at the start of a line": "Comentario, cuando es utilizado al inicio de una línea.",
@@ -30,6 +30,7 @@
"Copied from original": "Copiado del original",
"Copyright © 2015 the following Contributors:": "Derechos de autor © 2015 los siguientes colaboradores:",
"Delete": "Suprimir",
"Deleted": "Deleted",
"Device ID": "ID del dispositivo",
"Device Identification": "Identificación del dispositivo",
"Device Name": "Nombre del dispositivo",
@@ -80,6 +81,7 @@
"Later": "Más tarde",
"Local Discovery": "Búsqueda en red local",
"Local State": "Estado local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Actualización mayor",
"Maximum Age": "Edad máxima",
"Metadata Only": "Sólo metadatos",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Los siguientes intervalos se utilizan: para la primera hora una versión se mantiene cada 30 segundos, para el primer día de una versión se mantiene cada hora, durante los primeros 30 días de la versión se mantiene todos los días, hasta que la edad máxima de una versión se mantiene cada semana.",
"The maximum age must be a number and cannot be blank.": "La edad máxima debe ser un número y no puede estar en blanco.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "El tiempo máximo para mantener una versión (en días, establece en 0 para mantener versiones para siempre).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "El número de días debe ser un número y no puede estar vacío.",
"The number of days to keep files in the trash can. Zero means forever.": "El tiempo máximo para mantener un archivo en el cubo de basura (en días, establece en 0 para mantener versiones para siempre).",
"The number of old versions to keep, per file.": "El numero de versiones anteriores a conservar, por archivo.",
"The number of versions must be a number and cannot be blank.": "El número de versiones debe ser un número y no puede estar vacío.",
"The path cannot be blank.": "La ruta no puede estar vacía.",
"The rescan interval must be a non-negative number of seconds.": "El intervalo de reescaneo debe ser un número no negativo de segundos.",
"This is a major version upgrade.": "Esta es una actualización de version mayor.",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Versiones como cubo de basura",
"Unknown": "Desconocido",
"Unshared": "No compartido",
"Unused": "No utilizado",
"Up to Date": "Actualizado",
"Updated": "Updated",
"Upgrade": "Actualizar",
"Upgrade To {%version%}": "Actualizar a {{version}}",
"Upgrading": "Actualizando",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Kopioitu alkuperäisestä lähteestä",
"Copyright © 2015 the following Contributors:": "Tekijänoikeus © 2015 seuraavat avustajat:",
"Delete": "Poista",
"Deleted": "Deleted",
"Device ID": "Laitteen ID",
"Device Identification": "Laitteen tunniste",
"Device Name": "Laitteen nimi",
@@ -80,6 +81,7 @@
"Later": "Myöhemmin",
"Local Discovery": "Paikallinen etsintä",
"Local State": "Paikallinen tila",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Maksimi-ikä",
"Metadata Only": "Vain metadata",
@@ -175,6 +177,7 @@
"Unshared": "Jakamaton",
"Unused": "Käyttämätön",
"Up to Date": "Ajan tasalla",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Päivitä versioon {{version}}",
"Upgrading": "Päivitetään",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Un nombre négatif de jours n'a pas de sens.",
"A new major version may not be compatible with previous versions.": "Une nouvelle version majeure peut présenter des incompatibilités avec les versions antérieures.",
"API Key": "Clé API",
"About": "À propos",
@@ -19,8 +19,8 @@
"Automatic upgrades": "Mises à jour automatiques",
"Bugs": "Bugs",
"CPU Utilization": "Utilisation du CPU",
"Changelog": "Nouveautés",
"Clean out after": "Clean out after",
"Changelog": "Historique des changements",
"Clean out after": "Nettoyer après",
"Close": "Fermer",
"Command": "Commande",
"Comment, when used at the start of a line": "Commentaire, lorsque utilisé en début de ligne",
@@ -30,6 +30,7 @@
"Copied from original": "Copié de l'original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 Les contributeurs suivants:",
"Delete": "Supprimer",
"Deleted": "Deleted",
"Device ID": "ID du périphérique",
"Device Identification": "Identification de l'appareil",
"Device Name": "Nom du périphérique",
@@ -52,7 +53,7 @@
"File Pull Order": "Ordre d'envoi de fichier",
"File Versioning": "Versions de fichier",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Les bits de permission de fichier sont ignorés lors de la recherche de changements. Utilisé sur les systèmes de fichiers FAT.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Les fichiers sont déplacés vers le dossier .stversions quand ils sont remplacés ou effacés par Syncthing.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Les fichiers sont déplacés, avec horodatage, dans un dossier .stversions quand ils sont remplacés ou supprimés par Syncthing.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Les fichiers sont protégés des changements réalisés sur les autres appareils, mais les changements réalisés sur cet appareil seront transférés au reste du groupe.",
"Folder ID": "ID du répertoire",
@@ -80,6 +81,7 @@
"Later": "Plus tard",
"Local Discovery": "Recherche locale",
"Local State": "État local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Mise à jour majeure",
"Maximum Age": "Ancienneté maximum",
"Metadata Only": "Métadonnées uniquement",
@@ -163,23 +165,24 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Les intervalles suivant sont utilisés: la première heure une version est conservée chaque 30 secondes, le premier jour une version est conservée chaque heure, les premiers 30 jours une version est conservée chaque jour, jusqu'à la limite d'âge maximum une version est conservée chaque semaine.",
"The maximum age must be a number and cannot be blank.": "L'ancienneté maximum doit être un nombre et ne peut être vide.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Le temps maximum de conservation d'une version (en jours, mettre à 0 pour conserver les versions pour toujours)",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Le nombre de jours doit être numérique et ne peut pas être vide.",
"The number of days to keep files in the trash can. Zero means forever.": "Le nombre de jours de conservation des fichiers dans la poubelle. Zéro signifie toujours.",
"The number of old versions to keep, per file.": "Le nombre d'anciennes versions à garder, par fichier.",
"The number of versions must be a number and cannot be blank.": "Le nombre de versions doit être numérique, et ne peut pas être vide.",
"The path cannot be blank.": "Le chemin ne peut pas être vide.",
"The rescan interval must be a non-negative number of seconds.": "L'intervalle d'analyse ne doit pas être un nombre négatif de secondes.",
"This is a major version upgrade.": "Ceci est une mise à jour majeure",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Gestion des versions de fichier de la poubelle.",
"Unknown": "Inconnu",
"Unshared": "Non partagé",
"Unused": "Non utilisé",
"Up to Date": "Synchronisation à jour",
"Up to Date": "Synchronisé",
"Updated": "Updated",
"Upgrade": "Mise à jour",
"Upgrade To {%version%}": "Mettre à jour vers {{version}}",
"Upgrading": "Mise à jour de Syncthing",
"Upload Rate": "Débit d'envoi",
"Uptime": "Durée de fonctionnement depuis dernier démarrage",
"Uptime": "Durée de fonctionnement",
"Use HTTPS for GUI": "Utiliser l'HTTPS pour le GUI",
"Version": "Version",
"Versions Path": "Emplacement des versions",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Másolva az eredetiről",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 az alábbi Közreműködők",
"Delete": "Törlés",
"Deleted": "Deleted",
"Device ID": "Eszköz azonosító",
"Device Identification": "Eszköz azonosító",
"Device Name": "Eszköz neve",
@@ -80,6 +81,7 @@
"Later": "Később",
"Local Discovery": "Helyi felfedezés",
"Local State": "Helyi állapot",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Főverzió frissítés",
"Maximum Age": "Maximális kor",
"Metadata Only": "Csak metaadatok",
@@ -175,6 +177,7 @@
"Unshared": "Nincs megosztva",
"Unused": "Nincs használatban",
"Up to Date": "Friss",
"Updated": "Updated",
"Upgrade": "Frissítés",
"Upgrade To {%version%}": "Frissítés a {{version}} verzióra",
"Upgrading": "Frissítés",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiato dall'originale",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 i seguenti Collaboratori:",
"Delete": "Elimina",
"Deleted": "Deleted",
"Device ID": "ID Dispositivo",
"Device Identification": "Identificazione Dispositivo",
"Device Name": "Nome Dispositivo",
@@ -80,6 +81,7 @@
"Later": "Più Tardi",
"Local Discovery": "Individuazione Locale",
"Local State": "Stato Locale",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Aggiornamento principale",
"Maximum Age": "Durata Massima",
"Metadata Only": "Solo i Metadati",
@@ -175,6 +177,7 @@
"Unshared": "Non Condiviso",
"Unused": "Non Utilizzato",
"Up to Date": "Sincronizzato",
"Updated": "Updated",
"Upgrade": "Aggiornamento",
"Upgrade To {%version%}": "Aggiorna alla {{version}}",
"Upgrading": "Aggiornamento",

View File

@@ -30,6 +30,7 @@
"Copied from original": "원본에서 복사됨",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
"Delete": "삭제",
"Deleted": "Deleted",
"Device ID": "기기 ID",
"Device Identification": "기기 식별자",
"Device Name": "기기 이름",
@@ -80,6 +81,7 @@
"Later": "나중에",
"Local Discovery": "로컬 노드 검색",
"Local State": "로컬 상태",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "메이저 업데이트",
"Maximum Age": "최대 보존 기간",
"Metadata Only": "메타데이터만",
@@ -175,6 +177,7 @@
"Unshared": "공유되지 않음",
"Unused": "사용되지 않음",
"Up to Date": "최신 데이터",
"Updated": "Updated",
"Upgrade": "업데이트",
"Upgrade To {%version%}": "{{version}} 으로 업데이트",
"Upgrading": "업데이트 중",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Privalo būti teigiamas skaičius.",
"A new major version may not be compatible with previous versions.": "Nauja versija gali būti nesuderinama su senomis versijomis.",
"API Key": "API raktas",
"About": "Apie programą",
@@ -20,7 +20,7 @@
"Bugs": "Klaidos",
"CPU Utilization": "Procesoriaus panaudojimas",
"Changelog": "Pasikeitimai",
"Clean out after": "Clean out after",
"Clean out after": "Išvalyto po",
"Close": "Uždaryti",
"Command": "Komanda",
"Comment, when used at the start of a line": "Komentaras naudojamas naujoje eilutėje",
@@ -30,6 +30,7 @@
"Copied from original": "Nukopijuota iš originalo",
"Copyright © 2015 the following Contributors:": "Visos teisės saugomos © 2015 šių bendraautorių:",
"Delete": "Trinti",
"Deleted": "Deleted",
"Device ID": "Įrenginio ID",
"Device Identification": "Įrenginio identifikacija",
"Device Name": "Įrenginio pavadinimas",
@@ -52,7 +53,7 @@
"File Pull Order": "Failų siuntimo tvarka",
"File Versioning": "Versijų valdymas",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Ieškant pakeitimų, į failų leidimų bitus yra nekreipiama dėmesio. Naudoti FAT failų sistemose.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Failai perkeliami į .stversions aplanką kai tampa pakeisti arba ištrinti.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Programai Syncthing pakeičiant ar ištrinant failus, jie yra perkeliami į datomis pažymėtas versijas, aplanke .stversions.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Failai apsaugoti nuo pakeitimų atliktų kituose įrenginiuose, bet pakeitimai šiame įrenginyje bus nusiųsti kitiems.",
"Folder ID": "Aplanko ID",
@@ -80,6 +81,7 @@
"Later": "Vėliau",
"Local Discovery": "Vietinis matomumas",
"Local State": "Vietinė būsena",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Stambus atnaujinimas",
"Maximum Age": "Maksimalus amžius",
"Metadata Only": "Metaduomenims",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Šie pertraukų nustatymai naudojami: pirmą valandą versijos laikomos 30 sekundžių, pirmą dieną versijos laikomos valandą, pirmas 30 dienų versijos laikomos parą, kol nebus viršytas nustatytas maksimalus amžius.",
"The maximum age must be a number and cannot be blank.": "Maksimalus amžius turi būti skaitmuo ir negali būti tuščias laukelis.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimalus laikas kurį bus saugojama versija (dienomis, nustatykite 0 norėdami saugoti amžinai).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Dienų skaičius turi būti teigiamas skaičius.",
"The number of days to keep files in the trash can. Zero means forever.": "Kiek dienų laikyti failus šiukšliadėžėje. Nulis reiškia amžinai.",
"The number of old versions to keep, per file.": "Kiek failo versijų saugoti.",
"The number of versions must be a number and cannot be blank.": "Versijų skaičius turi būti skaitmuo ir negali būti tuščias laukelis.",
"The path cannot be blank.": "Kelias negali būti tuščias.",
"The rescan interval must be a non-negative number of seconds.": "Nuskaitymo dažnis negali būti neigiamas skaičius.",
"This is a major version upgrade.": "Tai yra stambus atnaujinimas.",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Šiukšliadėžės versiju valdymas",
"Unknown": "Nežinoma",
"Unshared": "Nesidalinama",
"Unused": "Nenaudojamas",
"Up to Date": "Atnaujinta",
"Updated": "Updated",
"Upgrade": "Atnaujinimas",
"Upgrade To {%version%}": "Atnaujinti į {{version}}",
"Upgrading": "Atnaujinama",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Et negativt antall dager gir ikke mening.",
"A new major version may not be compatible with previous versions.": "En ny hovedversjon kan bli ikke-kompatibel med en eldre versjon.",
"API Key": "API-nøkkel",
"About": "Om",
@@ -20,7 +20,7 @@
"Bugs": "Programfeil",
"CPU Utilization": "CPU-utnyttelse",
"Changelog": "Endringslog",
"Clean out after": "Clean out after",
"Clean out after": "Tøm etter",
"Close": "Lukk",
"Command": "Kommando",
"Comment, when used at the start of a line": "Kommentar, når det blir brukt i starten av en linje.",
@@ -30,7 +30,8 @@
"Copied from original": "Kopiert fra original",
"Copyright © 2015 the following Contributors:": "Kopirett © 2015 de følgende bidragsytere:",
"Delete": "Slett",
"Device ID": "Enhet ID",
"Deleted": "Deleted",
"Device ID": "Enhets ID",
"Device Identification": "Enhetskjennemerke",
"Device Name": "Navn På Enhet",
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Enhet {{device}} ({{address}}) ønsker å koble seg til. Legg til ny enhet?",
@@ -49,10 +50,10 @@
"Enter ignore patterns, one per line.": "Skriv inn mønster som skal utelates, ett per linje.",
"Error": "Feilmelding",
"External File Versioning": "Ekstern versjonskontroll",
"File Pull Order": "Fil henterekkefølge",
"File Pull Order": "Filenes Henterekkefølge",
"File Versioning": "Versjonskontroll",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Fil bit-rettigheter ignoreres når forandringer oppdages. Bruk FAT filsystem. ",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Filer som slettes eller erstattes av Syncthing flyttes til katalogen .stversions",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Filer flyttes til en datostemplet versjon i .stversions-katalogen når den oppdateres eller slettes av Syncthing.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Filer er beskyttet mot endringer som er gjort på andre enheter, men endringer som er gjort på denne enheten blir sendt til resten av gruppen.",
"Folder ID": "Mappe ID",
@@ -80,6 +81,7 @@
"Later": "Senere",
"Local Discovery": "Lokal Søking",
"Local State": "Lokal Tilstand",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Hovedoppgradering",
"Maximum Age": "Maksimal Levetid",
"Metadata Only": "Kun metadata",
@@ -99,7 +101,7 @@
"Out of Sync Items": "Ikke Synkroniserte Element",
"Outgoing Rate Limit (KiB/s)": "Utgående Hastighetsbegrensning (KiB/s)",
"Override Changes": "Overstyr Endringer",
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappen på datamaskinen. Blir opprettet om den ikke finnes. Krøllstrektegnet (~) kan brukes som forkortelse for",
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Plasseringen av mappen på datamaskinen. Denne vil bli opprettet dersom den ikke finnes. Krøllstrektegnet (~) kan brukes som forkortelse for",
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Plasseringen for lagrede versjoner (la denne være tom for å bruke standard .stversions-mappen i mappen).",
"Please consult the release notes before performing a major upgrade.": "Se \"release notes\" før en hovedoppgradering utføres.",
"Please wait": "Vennligst vent",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Følgende intervall blir brukt: den første timen blir en versjon lagret hvert 30. sekund, den første dagen blir en versjon lagret hver time, de første 30 dagene blir en versjon lagret hver dag, og inntil maksimal levetid blir en versjon lagret hver uke.",
"The maximum age must be a number and cannot be blank.": "Maksimal levetid må være et tall og kan ikke være tomt.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Maksimal tid å beholde en versjon (i dager, sett til 0 for å beholde versjoner på ubegrenset tid).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Antall dager må være et tall og kan ikke være tomt.",
"The number of days to keep files in the trash can. Zero means forever.": "Antall dager man skal bevare filene i papirkurven. Null betyr for alltid.",
"The number of old versions to keep, per file.": "Antall gamle versjoner å beholde, per fil.",
"The number of versions must be a number and cannot be blank.": "Antall versjoner må være et tall og kan ikke være tomt.",
"The path cannot be blank.": "Plasseringen kan ikke være tom.",
"The rescan interval must be a non-negative number of seconds.": "Antall sekund i skanneintervallet kan ikke være negativt.",
"This is a major version upgrade.": "Dette er en hovedoppgradering",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Fil versjonskontroll i papirkurven",
"Unknown": "Ukjent",
"Unshared": "Ikke delt",
"Unused": "Ikke i bruk",
"Up to Date": "Oppdatert",
"Updated": "Updated",
"Upgrade": "Oppgradere",
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
"Upgrading": "Oppgraderer",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Gekopieerd van het origineel",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 de volgende Bijdragers:",
"Delete": "Verwijderen",
"Deleted": "Deleted",
"Device ID": "Apparaat-ID",
"Device Identification": "Apparaat identificatie",
"Device Name": "Naam apparaat",
@@ -80,6 +81,7 @@
"Later": "Later",
"Local Discovery": "Lokaal zoeken",
"Local State": "Lokale status",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Grote update",
"Maximum Age": "Maximum leeftijd",
"Metadata Only": "Alleen Metadata",
@@ -175,6 +177,7 @@
"Unshared": "Niet gedeeld",
"Unused": "Ongebruikt",
"Up to Date": "Gesynchroniseerd",
"Updated": "Updated",
"Upgrade": "Update",
"Upgrade To {%version%}": "Upgrade naar {{version}}",
"Upgrading": "Bezig met upgrade",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Kopiert frå originalen",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
"Delete": "Slett",
"Deleted": "Deleted",
"Device ID": "Eining ID",
"Device Identification": "Einingskjennemerke",
"Device Name": "Namn På Eining",
@@ -80,6 +81,7 @@
"Later": "Seinare",
"Local Discovery": "Lokal oppdaging",
"Local State": "Lokal Tilstand",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Maksimal Levetid",
"Metadata Only": "Berre metadata",
@@ -175,6 +177,7 @@
"Unshared": "Ikkje delt",
"Unused": "Ubrukt",
"Up to Date": "Oppdatert",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
"Upgrading": "Oppgraderer",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Skopiowane z oryginału",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 the following Contributors:",
"Delete": "Usuń",
"Deleted": "Deleted",
"Device ID": "ID urządzenia",
"Device Identification": "Identyfikator urządzenia",
"Device Name": "Nazwa urządzenia",
@@ -80,6 +81,7 @@
"Later": "Później",
"Local Discovery": "Lokalne odnajdywanie",
"Local State": "Status lokalny",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Maksymalny wiek",
"Metadata Only": "Tylko metadane",
@@ -175,6 +177,7 @@
"Unshared": "Nieudostępnione",
"Unused": "Nieużywane",
"Up to Date": "Aktualny",
"Updated": "Updated",
"Upgrade": "Aktualizacja",
"Upgrade To {%version%}": "Aktualizuj do {{version}}",
"Upgrading": "Aktualizowanie",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Um número negativo de dias não faz sentido.",
"A new major version may not be compatible with previous versions.": "Uma nova versão principal pode não ser compatível com versões anteriores.",
"API Key": "Chave da API",
"About": "Sobre",
@@ -20,7 +20,7 @@
"Bugs": "Erros",
"CPU Utilization": "Uso de CPU",
"Changelog": "Registro de alterações",
"Clean out after": "Clean out after",
"Clean out after": "Limpar depois de",
"Close": "Fechar",
"Command": "Comando",
"Comment, when used at the start of a line": "Comentário, se usado no início de uma linha",
@@ -30,6 +30,7 @@
"Copied from original": "Copiado do original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015. Direitos reservados aos seguintes colaboradores:",
"Delete": "Apagar",
"Deleted": "Deleted",
"Device ID": "ID do dispositivo",
"Device Identification": "Identificação do dispositivo",
"Device Name": "Nome do dispositivo",
@@ -52,7 +53,7 @@
"File Pull Order": "Ordem de retirada do arquivo",
"File Versioning": "Versionamento de arquivos",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Os bits de permissão de um arquivo são ignorados durante as verificações. Use em sistemas de arquivo FAT.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Os arquivos são motivos para a pasta .stversions quando substituídos ou apagados pelo Syncthing.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Os arquivos são renomeados com suas datas na pasta .stversions quando são substituídos ou removidos pelo Syncthing.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Os arquivos estão protegidos contra alterações feitas em outros dispositivos, mas alterações feitas neste dispositivo serão enviadas ao resto do grupo.",
"Folder ID": "ID da pasta",
@@ -80,6 +81,7 @@
"Later": "Depois",
"Local Discovery": "Descoberta local",
"Local State": "Estado local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Atualização \"major\"",
"Maximum Age": "Idade máxima",
"Metadata Only": "Somente metadados",
@@ -161,20 +163,21 @@
"The folder ID must be unique.": "O ID da pasta deve ser único.",
"The folder path cannot be blank.": "O caminho da pasta não pode ficar vazio.",
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "São utilizados os seguintes intervalos: na primeira hora é guardada uma versão a cada 30 segundos, no primeiro dia é guardada uma versão a cada hora, nos primeiros 30 dias é guardada uma versão por dia e, até que atinja a idade máxima, é guardada uma versão por semana.",
"The maximum age must be a number and cannot be blank.": "A idade máxima deve ser um valor numérico e não pode ficar vazio.",
"The maximum age must be a number and cannot be blank.": "A idade máxima deve ser um valor numérico. O campo não pode ficar vazio.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "O número máximo de dias em que uma versão é guardada. (Use 0 para manter para sempre).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "O número de dias deve ser um número valido e não pode ficar em branco.",
"The number of days to keep files in the trash can. Zero means forever.": "O número de dias em que são mantidos os arquivos da lixeira. Zero significa para sempre.",
"The number of old versions to keep, per file.": "O número de versões antigas a serem mantidas, por arquivo.",
"The number of versions must be a number and cannot be blank.": "O número de versões deve ser um valor numério e não pode ficar vazio.",
"The number of versions must be a number and cannot be blank.": "O número de versões deve ser um valor numérico. O campo não pode ficar vazio.",
"The path cannot be blank.": "O caminho não pode ficar vazio.",
"The rescan interval must be a non-negative number of seconds.": "O intervalo entre verificações deve ser um número positivo de segundos.",
"This is a major version upgrade.": "Esta é uma atualização para uma versão \"major\".",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Versionamento de arquivos da lixeira",
"Unknown": "Desconhecida",
"Unshared": "Não compartilhada",
"Unused": "Não utilizado",
"Up to Date": "Sincronizada",
"Updated": "Updated",
"Upgrade": "Atualização",
"Upgrade To {%version%}": "Atualizar para {{version}}",
"Upgrading": "Atualizando",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiado do original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 os seguintes contribuidores:",
"Delete": "Eliminar",
"Deleted": "Deleted",
"Device ID": "ID do dispositivo",
"Device Identification": "Identificação do dispositivo",
"Device Name": "Nome do dispositivo",
@@ -80,6 +81,7 @@
"Later": "Mais tarde",
"Local Discovery": "Busca local",
"Local State": "Estado local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Actualização importante",
"Maximum Age": "Idade máxima",
"Metadata Only": "Metadados apenas",
@@ -175,6 +177,7 @@
"Unshared": "Não partilhada",
"Unused": "Não utilizado",
"Up to Date": "Sincronizado",
"Updated": "Updated",
"Upgrade": "Actualizar",
"Upgrade To {%version%}": "Actualizar para {{version}}",
"Upgrading": "Actualizando",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copiat din original",
"Copyright © 2015 the following Contributors:": "Copyright ©2015 Următorii Contribuitori:",
"Delete": "Şterge",
"Deleted": "Deleted",
"Device ID": "ID Dispozitiv",
"Device Identification": "Identificare Dispozitiv",
"Device Name": "Nume Dispozitiv",
@@ -80,6 +81,7 @@
"Later": "Mai tîrziu",
"Local Discovery": "Găsire Locală",
"Local State": "Status Local",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Vârsta Maximă",
"Metadata Only": "Doar Metadate",
@@ -175,6 +177,7 @@
"Unshared": "Neîmpărțit",
"Unused": "Nefolosit",
"Up to Date": "La Zi",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "Actualizează La Versiunea {{version}}",
"Upgrading": "Se Actualizează",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "Отрицательное число дней не имеет значения.",
"A new major version may not be compatible with previous versions.": "Новое обновление основной версии может быть несовместимо с предыдущими версиями.",
"API Key": "Ключ API",
"About": "О программе",
@@ -20,7 +20,7 @@
"Bugs": "Ошибки",
"CPU Utilization": "Загрузка ЦПУ",
"Changelog": "Журнал изменений",
"Clean out after": "Clean out after",
"Clean out after": "Очистить после",
"Close": "Закрыть",
"Command": "Команда",
"Comment, when used at the start of a line": "Комментарий, если используется в начале строки",
@@ -30,6 +30,7 @@
"Copied from original": "Скопировано с оригинала",
"Copyright © 2015 the following Contributors:": "Все права защищены ©, 2015 участники:",
"Delete": "Удалить",
"Deleted": "Deleted",
"Device ID": "ID устройства",
"Device Identification": "Идентификация устройства",
"Device Name": "Имя устройства",
@@ -52,7 +53,7 @@
"File Pull Order": "Порядок получения файлов",
"File Versioning": "Управление версиями",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Права на файлы игнорируются при поиске изменений. Используется на файловой системе FAT.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Файлы перемещаются в .stversions после замены или удаления Syncthing.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Файлы с временнОй меткой версии помещаются в папку .stversions при их замене или удалении Syncthing.",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Файлы защищены от изменений сделанных на других устройствах, но изменения сделанные на этом устройстве будут отправлены всему кластеру.",
"Folder ID": "ID папки",
@@ -80,6 +81,7 @@
"Later": "Потом",
"Local Discovery": "Локальное обнаружение",
"Local State": "Локальное состояние",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Обновление основной версии",
"Maximum Age": "Максимальный срок",
"Metadata Only": "Только метаданные",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Используются следующие интервалы: в первый час версия меняется каждые 30 секунд, в первый день - каждый час, первые 30 дней - каждый день, после, до максимального срока - каждую неделю.",
"The maximum age must be a number and cannot be blank.": "Максимальный срок должен быть числом и не может быть пустым.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максимальный срок хранения версии (в днях, 0 значит вечное хранение).",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "Количество дней должно быть числом и не может быть пустым.",
"The number of days to keep files in the trash can. Zero means forever.": "Количество дней хранения файлов в корзине. Ноль значит навсегда.",
"The number of old versions to keep, per file.": "Количество хранимых версий файла.",
"The number of versions must be a number and cannot be blank.": "Количество версий должно быть числом и не может быть пустым.",
"The path cannot be blank.": "Путь не может быть пустым.",
"The rescan interval must be a non-negative number of seconds.": "Интервал пересканирования должен быть неотрицательным количеством секунд.",
"This is a major version upgrade.": "Это обновление основной версии продукта.",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "Использовать Корзину для версий файлов",
"Unknown": "Неизвестно",
"Unshared": "Необщедоступно",
"Unused": "Не используется",
"Up to Date": "Обновлено",
"Updated": "Updated",
"Upgrade": "Обновить",
"Upgrade To {%version%}": "Обновить до {{version}}",
"Upgrading": "Обновление",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Oförändrat",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 följande medverkande:",
"Delete": "Radera",
"Deleted": "Deleted",
"Device ID": "Enhets-ID",
"Device Identification": "Enhetsidentifikation",
"Device Name": "Enhetsnamn",
@@ -80,6 +81,7 @@
"Later": "Senare",
"Local Discovery": "Lokal uppslagning",
"Local State": "Lokal status",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Stor uppgradering",
"Maximum Age": "Högsta åldersgräns",
"Metadata Only": "Endast metadata",
@@ -175,6 +177,7 @@
"Unshared": "Inte delad",
"Unused": "Oanvänd",
"Up to Date": "Helt uppdaterad",
"Updated": "Updated",
"Upgrade": "Uppgradering",
"Upgrade To {%version%}": "Uppgradera till {{version}}",
"Upgrading": "Uppgraderar",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Aslından kopyalanmış",
"Copyright © 2015 the following Contributors:": "Telif Hakkı © 2015 Katkıda bulunanlar:",
"Delete": "Sil",
"Deleted": "Deleted",
"Device ID": "Cihaz ID",
"Device Identification": "Cihaz Kimliği",
"Device Name": "Cihaz Adı",
@@ -80,6 +81,7 @@
"Later": "Sonra",
"Local Discovery": "Yerel bulma",
"Local State": "Yerel Durum",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Major Upgrade",
"Maximum Age": "Azami Süre",
"Metadata Only": "Metadata Only",
@@ -175,6 +177,7 @@
"Unshared": "Paylaşılmayan",
"Unused": "Kullanılmayan",
"Up to Date": "Güncel",
"Updated": "Updated",
"Upgrade": "Upgrade",
"Upgrade To {%version%}": "{{version}} sürümüne yükselt",
"Upgrading": "Yükseltiliyor",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Скопійовано з оригіналу",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 наступних контриб’юторів:",
"Delete": "Видалити",
"Deleted": "Deleted",
"Device ID": "ID пристрою",
"Device Identification": "Ідентифікатор пристрою",
"Device Name": "Назва пристрою",
@@ -80,6 +81,7 @@
"Later": "Пізніше",
"Local Discovery": "Локальне виявлення",
"Local State": "Локальний статус",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "Мажорне оновлення",
"Maximum Age": "Максимальний вік",
"Metadata Only": "Тільки метадані",
@@ -175,6 +177,7 @@
"Unshared": "Не розповсюджується",
"Unused": "Не використовується",
"Up to Date": "Актуальна версія",
"Updated": "Updated",
"Upgrade": "Оновлення",
"Upgrade To {%version%}": "Оновити до {{version}}",
"Upgrading": "Оновлення",

View File

@@ -1,5 +1,5 @@
{
"A negative number of days doesn't make sense.": "A negative number of days doesn't make sense.",
"A negative number of days doesn't make sense.": "天数不能为负。",
"A new major version may not be compatible with previous versions.": "重大更新可能与之前的版本之间无法兼容",
"API Key": "API Key",
"About": "关于",
@@ -20,7 +20,7 @@
"Bugs": "Bug汇报",
"CPU Utilization": "CPU使用率",
"Changelog": "更新日志",
"Clean out after": "Clean out after",
"Clean out after": "在该时间后清除:",
"Close": "关闭",
"Command": "命令",
"Comment, when used at the start of a line": "注释,在行首使用",
@@ -30,6 +30,7 @@
"Copied from original": "从源复制",
"Copyright © 2015 the following Contributors:": "版权 ©2015 由下列贡献者所有:",
"Delete": "删除",
"Deleted": "已删除",
"Device ID": "设备标识",
"Device Identification": "设备标识",
"Device Name": "设备名",
@@ -52,7 +53,7 @@
"File Pull Order": "文件拉取顺序",
"File Versioning": "版本控制",
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "当寻找文件变更时忽略文件权限。用于FAT文件系统。",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Files are moved to .stversions folder when replaced or deleted by Syncthing.",
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "当文件被 Syncthing 替换或删时,将会被移动到 .stversions 文件夹",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "当某个文件在其他设备被替换或删除时,本设备将会在 .stversions 文件夹中保留该文件的备份,并在文件名中加入时间戳信息。",
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "在其它设备中对该文件夹内文件的修改并不会被同步到本机,但是在本机上对其的修改,则会被同步到其它设备中。",
"Folder ID": "文件夹标识",
@@ -80,6 +81,7 @@
"Later": "稍后",
"Local Discovery": "在局域网上寻找设备",
"Local State": "本地状态",
"Local State (Total)": "本地状态汇总",
"Major Upgrade": "重大更新",
"Maximum Age": "历史版本最长保留时间",
"Metadata Only": "仅元数据",
@@ -163,18 +165,19 @@
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "保留的历史版本会满足以下条件最近一小时内的历史版本更新间隔小于30秒的仅保留一份。最近一天内的历史版本更新间隔小于1小时的仅保留一份。最近一个月内的历史版本更新间隔小于1天的仅保留一份。距离现在超过一个月且小于最长保留时间的更新间隔小于1周的仅保留一份。",
"The maximum age must be a number and cannot be blank.": "最长保留时间必须为数字,且不能为空。",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "历史版本保留的最长天数0为永久保存",
"The number of days must be a number and cannot be blank.": "The number of days must be a number and cannot be blank.",
"The number of days to keep files in the trash can. Zero means forever.": "The number of days to keep files in the trash can. Zero means forever.",
"The number of days must be a number and cannot be blank.": "天数必须为数字,且不能为空。",
"The number of days to keep files in the trash can. Zero means forever.": "文件保存在回收站的天数。零表示永久。",
"The number of old versions to keep, per file.": "每个文件保留的版本数量上限。",
"The number of versions must be a number and cannot be blank.": "保留版本数量必须为数字,且不能为空。",
"The path cannot be blank.": "路径不能为空",
"The rescan interval must be a non-negative number of seconds.": "扫描间隔单位为秒,且不能为负数。",
"This is a major version upgrade.": "这是一个重大版本更新。",
"Trash Can File Versioning": "Trash Can File Versioning",
"Trash Can File Versioning": "回收站式版本控制",
"Unknown": "未知",
"Unshared": "未共享",
"Unused": "未使用",
"Up to Date": "同步完成",
"Updated": "已更新",
"Upgrade": "更新",
"Upgrade To {%version%}": "升级至版本{{version}}",
"Upgrading": "升级中",

View File

@@ -30,6 +30,7 @@
"Copied from original": "Copied from original",
"Copyright © 2015 the following Contributors:": "Copyright © 2015 下列貢獻者:",
"Delete": "刪除",
"Deleted": "Deleted",
"Device ID": "裝置識別碼",
"Device Identification": "裝置識別",
"Device Name": "裝置名稱",
@@ -80,6 +81,7 @@
"Later": "稍後",
"Local Discovery": "本地探索",
"Local State": "本地狀態",
"Local State (Total)": "Local State (Total)",
"Major Upgrade": "重大更新",
"Maximum Age": "最長保留時間",
"Metadata Only": "僅中繼資料",
@@ -175,6 +177,7 @@
"Unshared": "未共享",
"Unused": "未使用",
"Up to Date": "最新",
"Updated": "Updated",
"Upgrade": "升級",
"Upgrade To {%version%}": "升級至 {{version}}",
"Upgrading": "正在升級",

17
gui/index.html Normal file → Executable file
View File

@@ -16,15 +16,13 @@
<meta name="author" content="">
<link rel="shortcut icon" href="assets/img/favicon.png">
<title>Syncthing | {{thisDeviceName()}}</title>
<title>{{thisDeviceName()}} | Syncthing</title>
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<link href="assets/font/raleway.css" rel="stylesheet">
<link href="assets/css/overrides.css" rel="stylesheet">
</head>
<body>
<div ng-controller="EventController"></div>
<!-- Top bar -->
<nav class="navbar navbar-top navbar-default" role="navigation">
@@ -273,10 +271,12 @@
<th><span class="glyphicon glyphicon-share-alt"></span>&nbsp;<span translate>Shared With</span></th>
<td class="text-right">{{sharesFolder(folder)}}</td>
</tr>
<tr ng-if="!folder.readOnly && folderStats[folder.id].lastFile">
<tr ng-if="!folder.readOnly && folderStats[folder.id].lastFile && folderStats[folder.id].lastFile.filename">
<th><span class="glyphicon glyphicon-transfer"></span>&nbsp;<span translate>Last File Received</span></th>
<td class="text-right">
<span title="{{folderStats[folder.id].lastFile.filename}} @ {{folderStats[folder.id].lastFile.at | date:'yyyy-MM-dd HH:mm:ss'}}">
<span translate ng-if="!folderStats[folder.id].lastFile.deleted">Updated</span>
<span translate ng-if="folderStats[folder.id].lastFile.deleted">Deleted</span>
{{folderStats[folder.id].lastFile.filename | basename}}
</span>
</td>
@@ -327,6 +327,10 @@
<th><span class="glyphicon glyphicon-cloud-upload"></span>&nbsp;<span translate>Upload Rate</span></th>
<td class="text-right">{{connectionsTotal.outbps | binary}}B/s ({{connectionsTotal.outBytesTotal | binary}}B)</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-home"></span>&nbsp;<span translate>Local State (Total)</span> </th>
<td class="text-right">{{foldersTotalLocalFiles | alwaysNumber}} <span translate>items</span>, ~{{ foldersTotalLocalBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-th"></span>&nbsp;<span translate>RAM Utilization</span></th>
<td class="text-right">{{system.sys | binary}}B</td>
@@ -655,6 +659,7 @@
<label>
<input type="checkbox" ng-model="currentFolder.readOnly"> <span translate>Folder Master</span>
</label>
&nbsp;<a href="http://docs.syncthing.net/users/foldermaster.html" target="_blank"><span class="glyphicon glyphicon-book"></span>&nbsp;<span translate>Help</span></a>
</div>
<p translate class="help-block">Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.</p>
</div>
@@ -1054,10 +1059,12 @@
<li class="auto-generated">Dominik Heidler</li>
<li class="auto-generated">Elias Jarlebring</li>
<li class="auto-generated">Emil Hessman</li>
<li class="auto-generated">Erik Meitner</li>
<li class="auto-generated">Federico Castagnini</li>
<li class="auto-generated">Felix Ableitner</li>
<li class="auto-generated">Felix Unterpaintner</li>
<li class="auto-generated">Francois-Xavier Gsell</li>
<li class="auto-generated">Frank Isemann</li>
<li class="auto-generated">Gilli Sigurdsson</li>
<li class="auto-generated">Jakob Borg</li>
<li class="auto-generated">James Patterson</li>
@@ -1118,7 +1125,7 @@
<!-- gui application code -->
<script src="scripts/syncthing/core/module.js"></script>
<script src="scripts/syncthing/core/controllers/eventController.js"></script>
<script src="scripts/syncthing/core/services/events.js"></script>
<script src="scripts/syncthing/core/controllers/syncthingController.js"></script>
<script src="scripts/syncthing/core/directives/identiconDirective.js"></script>
<script src="scripts/syncthing/core/directives/languageSelectDirective.js"></script>

View File

@@ -1,55 +0,0 @@
var debugEvents = false;
angular.module('syncthing.core')
.controller('EventController', function ($scope, $http) {
'use strict';
$scope.lastEvent = null;
var lastID = 0;
var successFn = function (data) {
// When Syncthing restarts while the long polling connection is in
// progress the browser on some platforms returns a 200 (since the
// headers has been flushed with the return code 200), with no data.
// This basically means that the connection has been reset, and the call
// was not actually successful.
if (!data) {
errorFn(data);
return;
}
$scope.$emit('UIOnline');
if (lastID > 0) {
data.forEach(function (event) {
if (debugEvents) {
console.log("event", event.id, event.type, event.data);
}
$scope.$emit(event.type, event);
});
}
$scope.lastEvent = data[data.length - 1];
lastID = $scope.lastEvent.id;
setTimeout(function () {
$http.get(urlbase + '/events?since=' + lastID)
.success(successFn)
.error(errorFn);
}, 500);
};
var errorFn = function (data) {
$scope.$emit('UIOffline');
setTimeout(function () {
$http.get(urlbase + '/events?limit=1')
.success(successFn)
.error(errorFn);
}, 1000);
};
$http.get(urlbase + '/events?limit=1')
.success(successFn)
.error(errorFn);
});

View File

@@ -2,7 +2,7 @@ angular.module('syncthing.core')
.config(function($locationProvider) {
$locationProvider.html5Mode(true).hashPrefix('!');
})
.controller('SyncthingController', function ($scope, $http, $location, LocaleService) {
.controller('SyncthingController', function ($scope, $http, $location, LocaleService, Events) {
'use strict';
// private/helper definitions
@@ -15,6 +15,7 @@ angular.module('syncthing.core')
function initController() {
LocaleService.autoConfigLocale();
setInterval($scope.refresh, 10000);
Events.start();
}
@@ -44,6 +45,8 @@ angular.module('syncthing.core')
$scope.neededTotal = 0;
$scope.neededCurrentPage = 1;
$scope.neededPageSize = 10;
$scope.foldersTotalLocalBytes = 0;
$scope.foldersTotalLocalFiles = 0;
$(window).bind('beforeunload', function () {
navigatingAway = true;
@@ -66,7 +69,7 @@ angular.module('syncthing.core')
'touch': 'asterisk'
};
$scope.$on('UIOnline', function (event, arg) {
$scope.$on(Events.ONLINE, function () {
if (online && !restarting) {
return;
}
@@ -100,7 +103,7 @@ angular.module('syncthing.core')
$('#shutdown').modal('hide');
});
$scope.$on('UIOffline', function (event, arg) {
$scope.$on(Events.OFFLINE, function () {
if (navigatingAway || !online) {
return;
}
@@ -125,7 +128,7 @@ angular.module('syncthing.core')
if (!restarting) {
if (arg.status === 0) {
// A network error, not an HTTP error
$scope.$emit('UIOffline');
$scope.$emit(Events.OFFLINE);
} else if (arg.status >= 400 && arg.status <= 599) {
// A genuine HTTP error
$('#networkError').modal('hide');
@@ -136,7 +139,7 @@ angular.module('syncthing.core')
}
});
$scope.$on('StateChanged', function (event, arg) {
$scope.$on(Events.STATE_CHANGED, function (event, arg) {
var data = arg.data;
if ($scope.model[data.folder]) {
$scope.model[data.folder].state = data.to;
@@ -144,21 +147,24 @@ angular.module('syncthing.core')
}
});
$scope.$on('LocalIndexUpdated', function (event, arg) {
var data = arg.data;
$scope.$on(Events.LOCAL_INDEX_UPDATED, function (event, arg) {
refreshFolderStats();
});
$scope.$on('RemoteIndexUpdated', function (event, arg) {
/* currently not using
$scope.$on('Events.REMOTE_INDEX_UPDATED', function (event, arg) {
// Nothing
});
$scope.$on('DeviceDisconnected', function (event, arg) {
*/
$scope.$on(Events.DEVICE_DISCONNECTED, function (event, arg) {
delete $scope.connections[arg.data.id];
refreshDeviceStats();
});
$scope.$on('DeviceConnected', function (event, arg) {
$scope.$on(Events.DEVICE_CONNECTED, function (event, arg) {
if (!$scope.connections[arg.data.id]) {
$scope.connections[arg.data.id] = {
inbps: 0,
@@ -173,7 +179,7 @@ angular.module('syncthing.core')
}
});
$scope.$on('ConfigLoaded', function (event) {
$scope.$on('ConfigLoaded', function () {
if ($scope.config.options.urAccepted === 0) {
// If usage reporting has been neither accepted nor declined,
// we want to ask the user to make a choice. But we don't want
@@ -193,15 +199,15 @@ angular.module('syncthing.core')
}
});
$scope.$on('DeviceRejected', function (event, arg) {
$scope.$on(Events.DEVICE_REJECTED, function (event, arg) {
$scope.deviceRejections[arg.data.device] = arg;
});
$scope.$on('FolderRejected', function (event, arg) {
$scope.$on(Events.FOLDER_REJECTED, function (event, arg) {
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
});
$scope.$on('ConfigSaved', function (event, arg) {
$scope.$on(Events.CONFIG_SAVED, function (event, arg) {
updateLocalConfig(arg.data);
$http.get(urlbase + '/system/config/insync').success(function (data) {
@@ -209,7 +215,7 @@ angular.module('syncthing.core')
}).error($scope.emitHTTPError);
});
$scope.$on('DownloadProgress', function (event, arg) {
$scope.$on(Events.DOWNLOAD_PROGRESS, function (event, arg) {
var stats = arg.data;
var progress = {};
for (var folder in stats) {
@@ -254,12 +260,12 @@ angular.module('syncthing.core')
console.log("DownloadProgress", $scope.progress);
});
$scope.$on('FolderSummary', function (event, arg) {
$scope.$on(Events.FOLDER_SUMMARY, function (event, arg) {
var data = arg.data;
$scope.model[data.folder] = data.summary;
});
$scope.$on('FolderCompletion', function (event, arg) {
$scope.$on(Events.FOLDER_COMPLETION, function (event, arg) {
var data = arg.data;
if (!$scope.completion[data.device]) {
$scope.completion[data.device] = {};
@@ -336,6 +342,15 @@ angular.module('syncthing.core')
}
}
$scope.announceServersFailed = failed;
$scope.foldersTotalLocalBytes = 0;
$scope.foldersTotalLocalFiles = 0;
for (var f in $scope.model) {
$scope.foldersTotalLocalBytes += $scope.model[f].localBytes;
$scope.foldersTotalLocalFiles += $scope.model[f].localFiles;
};
console.log("refreshSystem", data);
}).error($scope.emitHTTPError);
}
@@ -444,7 +459,7 @@ angular.module('syncthing.core')
} else {
return 'sync';
}
};
}
function parseNeeded(data) {
var merged = [];
@@ -475,7 +490,7 @@ angular.module('syncthing.core')
$scope.neededChangePageSize = function (perpage) {
$scope.neededPageSize = perpage;
refreshNeed($scope.neededFolder);
}
};
var refreshDeviceStats = debounce(function () {
$http.get(urlbase + "/stats/device").success(function (data) {
@@ -973,7 +988,7 @@ angular.module('syncthing.core')
$scope.$watch('currentFolder.path', function (newvalue) {
if (newvalue && newvalue.trim().charAt(0) == '~') {
$scope.currentFolder.path = $scope.system.tilde + newvalue.trim().substring(1)
$scope.currentFolder.path = $scope.system.tilde + newvalue.trim().substring(1);
}
$http.get(urlbase + '/system/browse', {
params: { current: newvalue }
@@ -1035,6 +1050,7 @@ angular.module('syncthing.core')
selectedDevices: {}
};
$scope.currentFolder.rescanIntervalS = 60;
$scope.currentFolder.order = "random";
$scope.currentFolder.fileVersioningSelector = "none";
$scope.currentFolder.trashcanClean = 0;
$scope.currentFolder.simpleKeep = 5;
@@ -1052,19 +1068,19 @@ angular.module('syncthing.core')
$scope.dismissFolderRejection(folder, device);
$scope.currentFolder = {
id: folder,
selectedDevices: {}
selectedDevices: {},
rescanIntervalS: 60,
fileVersioningSelector: "none",
trashcanClean: 0,
simpleKeep: 5,
staggeredMaxAge: 365,
staggeredCleanInterval: 3600,
staggeredVersionsPath: "",
externalCommand: "",
autoNormalize: true
};
$scope.currentFolder.selectedDevices[device] = true;
$scope.currentFolder.rescanIntervalS = 60;
$scope.currentFolder.fileVersioningSelector = "none";
$scope.currentFolder.trashcanClean = 0;
$scope.currentFolder.simpleKeep = 5;
$scope.currentFolder.staggeredMaxAge = 365;
$scope.currentFolder.staggeredCleanInterval = 3600;
$scope.currentFolder.staggeredVersionsPath = "";
$scope.currentFolder.externalCommand = "";
$scope.currentFolder.autoNormalize = true;
$scope.editingExisting = false;
$scope.folderEditor.$setPristine();
$('#editFolder').modal();
@@ -1159,12 +1175,12 @@ angular.module('syncthing.core')
});
names.sort();
return names.join(", ");
}
};
$scope.deviceFolders = function (deviceCfg) {
var folders = [];
for (var folderID in $scope.folders) {
var devices = $scope.folders[folderID].devices
var devices = $scope.folders[folderID].devices;
for (var i = 0; i < devices.length; i++) {
if (devices[i].deviceID == deviceCfg.deviceID) {
folders.push(folderID);

View File

@@ -0,0 +1,85 @@
var debugEvents = !true;
angular.module('syncthing.core')
.service('Events', ['$http', '$rootScope', '$timeout', function ($http, $rootScope, $timeout) {
'use strict';
var lastID = 0;
var self = this;
function successFn (data) {
// When Syncthing restarts while the long polling connection is in
// progress the browser on some platforms returns a 200 (since the
// headers has been flushed with the return code 200), with no data.
// This basically means that the connection has been reset, and the call
// was not actually successful.
if (!data) {
errorFn(data);
return;
}
$rootScope.$broadcast(self.ONLINE);
if (lastID > 0) { // not emit events from first response
data.forEach(function (event) {
if (debugEvents) {
console.log("event", event.id, event.type, event.data);
}
$rootScope.$broadcast(event.type, event);
});
}
var lastEvent = data.pop();
if (lastEvent) {
lastID = lastEvent.id;
}
$timeout(function () {
$http.get(urlbase + '/events?since=' + lastID)
.success(successFn)
.error(errorFn);
}, 500, false);
}
function errorFn (dummy) {
$rootScope.$broadcast(self.OFFLINE);
$timeout(function () {
$http.get(urlbase + '/events?limit=1')
.success(successFn)
.error(errorFn);
}, 1000, false);
}
angular.extend(self, {
// emitted by this
ONLINE: 'UIOnline',
OFFLINE: 'UIOffline',
// emitted by syncthing process
CONFIG_SAVED: 'ConfigSaved', // Emitted after the config has been saved by the user or by Syncthing itself
DEVICE_CONNECTED: 'DeviceConnected', // Generated each time a connection to a device has been established
DEVICE_DISCONNECTED: 'DeviceDisconnected', // Generated each time a connection to a device has been terminated
DEVICE_DISCOVERED: 'DeviceDiscovered', // Emitted when a new device is discovered using local discovery
DEVICE_REJECTED: 'DeviceRejected', // Emitted when there is a connection from a device we are not configured to talk to
DOWNLOAD_PROGRESS: 'DownloadProgress', // Emitted during file downloads for each folder for each file
FOLDER_COMPLETION: 'FolderCompletion', //Emitted when the local or remote contents for a folder changes
FOLDER_REJECTED: 'FolderRejected', // Emitted when a device sends index information for a folder we do not have, or have but do not share with the device in question
FOLDER_SUMMARY: 'FolderSummary', // Emitted when folder contents have changed locally
ITEM_FINISHED: 'ItemFinished', // Generated when Syncthing ends synchronizing a file to a newer version
ITEM_STARTED: 'ItemStarted', // Generated when Syncthing begins synchronizing a file to a newer version
LOCAL_INDEX_UPDATED: 'LocalIndexUpdated', // Generated when the local index information has changed, due to synchronizing one or more items from the cluster or discovering local changes during a scan
PING: 'Ping', // Generated automatically every 60 seconds
REMOTE_INDEX_UPDATED: 'RemoteIndexUpdated', // Generated each time new index information is received from a device
STARTING: 'Starting', // Emitted exactly once, when Syncthing starts, before parsing configuration etc
STARTUP_COMPLETED: 'StartupCompleted', // Emitted exactly once, when initialization is complete and Syncthing is ready to start exchanging data with other devices
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
start: function() {
$http.get(urlbase + '/events?limit=1')
.success(successFn)
.error(errorFn);
}
});
}]);

View File

File diff suppressed because one or more lines are too long

View File

@@ -28,6 +28,7 @@ import (
const (
OldestHandledVersion = 5
CurrentVersion = 10
MaxRescanIntervalS = 365 * 24 * 60 * 60
)
type Configuration struct {
@@ -330,6 +331,12 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
folder.ID = "default"
}
if folder.RescanIntervalS > MaxRescanIntervalS {
folder.RescanIntervalS = MaxRescanIntervalS
} else if folder.RescanIntervalS < 0 {
folder.RescanIntervalS = 0
}
if seen, ok := seenFolders[folder.ID]; ok {
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)

View File

@@ -578,3 +578,17 @@ func TestPullOrder(t *testing.T) {
}
}
}
func TestLargeRescanInterval(t *testing.T) {
wrapper, err := Load("testdata/largeinterval.xml", device1)
if err != nil {
t.Fatal(err)
}
if wrapper.Folders()["l1"].RescanIntervalS != MaxRescanIntervalS {
t.Error("too large rescan interval should be maxed out")
}
if wrapper.Folders()["l2"].RescanIntervalS != 0 {
t.Error("negative rescan interval should become zero")
}
}

View File

@@ -0,0 +1,4 @@
<configuration version="10">
<folder id="l1" path="~/Sync" rescanIntervalS="60000000000"></folder>
<folder id="l2" path="~/Sync" rescanIntervalS="-1"></folder>
</configuration>

View File

@@ -129,6 +129,28 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
return valBs, true
}
// PutBool stores a new boolean. Any existing value (even if of another type)
// is overwritten.
func (n *NamespacedKV) PutBool(key string, val bool) {
keyBs := append(n.prefix, []byte(key)...)
if val {
n.db.Put(keyBs, []byte{0x0}, nil)
} else {
n.db.Put(keyBs, []byte{0x1}, nil)
}
}
// Bool returns the stored value as a boolean and a boolean that
// is false if no value was stored at the key.
func (n NamespacedKV) Bool(key string) (bool, bool) {
keyBs := append(n.prefix, []byte(key)...)
valBs, err := n.db.Get(keyBs, nil)
if err != nil {
return false, false
}
return valBs[0] == 0x0, true
}
// Delete deletes the specified key. It is allowed to delete a nonexistent
// key.
func (n NamespacedKV) Delete(key string) {

View File

@@ -99,7 +99,7 @@ type Model struct {
}
var (
SymlinkWarning = stdsync.Once{}
symlinkWarning = stdsync.Once{}
)
// NewModel creates and starts a new model. The model starts in read-only mode,
@@ -179,6 +179,7 @@ func (m *Model) StartFolderRW(folder string) {
if !ok {
l.Fatalf("Requested versioning type %q that does not exist", cfg.Versioning.Type)
}
versioner := factory(folder, cfg.Path(), cfg.Versioning.Params)
if service, ok := versioner.(suture.Service); ok {
// The versioner implements the suture.Service interface, so
@@ -189,7 +190,7 @@ func (m *Model) StartFolderRW(folder string) {
p.versioner = versioner
}
go p.Serve()
m.Add(p)
}
// StartFolderRO starts read only processing on the current model. When in
@@ -512,7 +513,7 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else if symlinkInvalid(fs[i].IsSymlink()) {
} else if symlinkInvalid(folder, fs[i]) {
if debug {
l.Debugln("dropping update for unsupported symlink", fs[i])
}
@@ -566,7 +567,7 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
}
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else if symlinkInvalid(fs[i].IsSymlink()) {
} else if symlinkInvalid(folder, fs[i]) {
if debug {
l.Debugln("dropping update for unsupported symlink", fs[i])
}
@@ -1025,8 +1026,8 @@ func (m *Model) folderStatRef(folder string) *stats.FolderStatisticsReference {
return sr
}
func (m *Model) receivedFile(folder, filename string) {
m.folderStatRef(folder).ReceivedFile(filename)
func (m *Model) receivedFile(folder string, file protocol.FileInfo) {
m.folderStatRef(folder).ReceivedFile(file)
}
func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) {
@@ -1072,7 +1073,7 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
maxLocalVer = f.LocalVersion
}
if ignores.Match(f.Name) || symlinkInvalid(f.IsSymlink()) {
if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
if debug {
l.Debugln("not sending update for ignored/unsupported symlink", f)
}
@@ -1123,8 +1124,10 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
m.fmut.RLock()
m.folderFiles[folder].Update(protocol.LocalDeviceID, fs)
files := m.folderFiles[folder]
m.fmut.RUnlock()
files.Update(protocol.LocalDeviceID, fs)
m.rvmut.Lock()
for _, f := range fs {
delete(m.reqValidationCache, folder+"/"+f.Name)
@@ -1132,8 +1135,9 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
m.rvmut.Unlock()
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"folder": folder,
"items": len(fs),
"folder": folder,
"items": len(fs),
"version": files.LocalVersion(protocol.LocalDeviceID),
})
}
@@ -1360,7 +1364,7 @@ nextSub:
batch = batch[:0]
}
if ignores.Match(f.Name) || symlinkInvalid(f.IsSymlink()) {
if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
// File has been ignored or an unsupported symlink. Set invalid bit.
if debug {
l.Debugln("setting invalid bit on ignored", f)
@@ -1508,7 +1512,7 @@ func (m *Model) Override(folder string) {
fs.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
need := fi.(protocol.FileInfo)
if len(batch) == indexBatchSize {
fs.Update(protocol.LocalDeviceID, batch)
m.updateLocals(folder, batch)
batch = batch[:0]
}
@@ -1528,7 +1532,7 @@ func (m *Model) Override(folder string) {
return true
})
if len(batch) > 0 {
fs.Update(protocol.LocalDeviceID, batch)
m.updateLocals(folder, batch)
}
runner.setState(FolderIdle)
}
@@ -1774,11 +1778,21 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
return true
}
func symlinkInvalid(isLink bool) bool {
if !symlinks.Supported && isLink {
SymlinkWarning.Do(func() {
func symlinkInvalid(folder string, fi db.FileIntf) bool {
if !symlinks.Supported && fi.IsSymlink() && !fi.IsInvalid() && !fi.IsDeleted() {
symlinkWarning.Do(func() {
l.Warnln("Symlinks are disabled, unsupported or require Administrator privileges. This might cause your folder to appear out of sync.")
})
// Need to type switch for the concrete type to be able to access fields...
var name string
switch fi := fi.(type) {
case protocol.FileInfo:
name = fi.Name
case db.FileInfoTruncated:
name = fi.Name
}
l.Infoln("Unsupported symlink", name, "in folder", folder)
return true
}
return false

View File

@@ -97,6 +97,7 @@ func TestRequest(t *testing.T) {
m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default")
m.ScanFolder("default")
m.ServeBackground()
// Existing, shared file
bs, err := m.Request(device1, "default", "foo", 0, 6, nil, 0, nil)
@@ -189,6 +190,7 @@ func benchmarkIndex(b *testing.B, nfiles int) {
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default")
m.ServeBackground()
files := genFiles(nfiles)
m.Index(device1, "default", files, 0, nil)
@@ -217,6 +219,7 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default")
m.ServeBackground()
files := genFiles(nfiles)
ufiles := genFiles(nufiles)
@@ -277,6 +280,7 @@ func BenchmarkRequest(b *testing.B) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
m.ScanFolder("default")
const n = 1000
@@ -327,6 +331,7 @@ func TestDeviceRename(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.ServeBackground()
if cfg.Devices()[device1].Name != "" {
t.Errorf("Device already has a name")
}
@@ -396,6 +401,7 @@ func TestClusterConfig(t *testing.T) {
m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(cfg.Folders[0])
m.AddFolder(cfg.Folders[1])
m.ServeBackground()
cm := m.clusterConfig(device2)
@@ -466,6 +472,7 @@ func TestIgnores(t *testing.T) {
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.StartFolderRO("default")
m.ServeBackground()
expected := []string{
".*",
@@ -539,6 +546,7 @@ func TestRefuseUnknownBits(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
m.ScanFolder("default")
m.Index(device1, "default", []protocol.FileInfo{
@@ -596,9 +604,9 @@ func TestROScanRecovery(t *testing.T) {
os.RemoveAll(fcfg.RawPath)
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
m.AddFolder(fcfg)
m.StartFolderRO("default")
m.ServeBackground()
waitFor := func(status string) error {
timeout := time.Now().Add(2 * time.Second)
@@ -680,9 +688,9 @@ func TestRWScanRecovery(t *testing.T) {
os.RemoveAll(fcfg.RawPath)
m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
m.AddFolder(fcfg)
m.StartFolderRW("default")
m.ServeBackground()
waitFor := func(status string) error {
timeout := time.Now().Add(2 * time.Second)
@@ -744,6 +752,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
b := func(isfile bool, path ...string) protocol.FileInfo {
flags := uint32(protocol.FlagDirectory)
@@ -993,6 +1002,7 @@ func TestGlobalDirectorySelfFixing(t *testing.T) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
b := func(isfile bool, path ...string) protocol.FileInfo {
flags := uint32(protocol.FlagDirectory)
@@ -1166,6 +1176,8 @@ func benchmarkTree(b *testing.B, n1, n2 int) {
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
m.ScanFolder("default")
files := genDeepFiles(n1, n2)

View File

@@ -54,6 +54,19 @@ var (
errNoDevice = errors.New("no available source device")
)
const (
dbUpdateHandleDir = iota
dbUpdateDeleteDir
dbUpdateHandleFile
dbUpdateDeleteFile
dbUpdateShortcutFile
)
type dbUpdateJob struct {
file protocol.FileInfo
jobType int
}
type rwFolder struct {
stateTracker
@@ -73,7 +86,7 @@ type rwFolder struct {
stop chan struct{}
queue *jobQueue
dbUpdates chan protocol.FileInfo
dbUpdates chan dbUpdateJob
scanTimer *time.Timer
pullTimer *time.Timer
delayScan chan time.Duration
@@ -326,7 +339,7 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
l.Debugln(p, "c", p.copiers, "p", p.pullers)
}
p.dbUpdates = make(chan protocol.FileInfo)
p.dbUpdates = make(chan dbUpdateJob)
updateWg.Add(1)
go func() {
// dbUpdaterRoutine finishes when p.dbUpdates is closed
@@ -528,7 +541,7 @@ nextFile:
// handleDir creates or updates the given directory
func (p *rwFolder) handleDir(file protocol.FileInfo) {
var err error
events.Default.Log(events.ItemStarted, map[string]interface{}{
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "dir",
@@ -583,7 +596,7 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
}
if err = osutil.InWritableDir(mkdir, realName); err == nil {
p.dbUpdates <- file
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else {
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
}
@@ -600,9 +613,9 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
// It's OK to change mode bits on stuff within non-writable directories.
if p.ignorePermissions(file) {
p.dbUpdates <- file
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else if err := os.Chmod(realName, mode); err == nil {
p.dbUpdates <- file
p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir}
} else {
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
}
@@ -611,7 +624,7 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
// deleteDir attempts to delete the given directory
func (p *rwFolder) deleteDir(file protocol.FileInfo) {
var err error
events.Default.Log(events.ItemStarted, map[string]interface{}{
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "dir",
@@ -642,13 +655,13 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
err = osutil.InWritableDir(osutil.Remove, realName)
if err == nil || os.IsNotExist(err) {
// It was removed or it doesn't exist to start with
p.dbUpdates <- file
} else if _, err := os.Lstat(realName); err != nil && !os.IsPermission(err) {
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
} else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) {
// We get an error just looking at the directory, and it's not a
// permission problem. Lets assume the error is in fact some variant
// of "file does not exist" (possibly expressed as some parent being a
// file and not a directory etc) and that the delete is handled.
p.dbUpdates <- file
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
} else {
l.Infof("Puller (folder %q, dir %q): delete: %v", p.folder, file.Name, err)
}
@@ -657,7 +670,7 @@ func (p *rwFolder) deleteDir(file protocol.FileInfo) {
// deleteFile attempts to delete the given file
func (p *rwFolder) deleteFile(file protocol.FileInfo) {
var err error
events.Default.Log(events.ItemStarted, map[string]interface{}{
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
@@ -690,13 +703,13 @@ func (p *rwFolder) deleteFile(file protocol.FileInfo) {
if err == nil || os.IsNotExist(err) {
// It was removed or it doesn't exist to start with
p.dbUpdates <- file
} else if _, err := os.Lstat(realName); err != nil && !os.IsPermission(err) {
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
} else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) {
// We get an error just looking at the file, and it's not a permission
// problem. Lets assume the error is in fact some variant of "file
// does not exist" (possibly expressed as some parent being a file and
// not a directory etc) and that the delete is handled.
p.dbUpdates <- file
p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
} else {
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
}
@@ -706,13 +719,13 @@ func (p *rwFolder) deleteFile(file protocol.FileInfo) {
// and set the right attributes on it.
func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
var err error
events.Default.Log(events.ItemStarted, map[string]interface{}{
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": source.Name,
"type": "file",
"action": "delete",
})
events.Default.Log(events.ItemStarted, map[string]interface{}{
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": target.Name,
"type": "file",
@@ -756,13 +769,15 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
// of the source and the creation of the target. Fix-up the metadata,
// and update the local index of the target file.
p.dbUpdates <- source
p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile}
err = p.shortcutFile(target)
if err != nil {
l.Infof("Puller (folder %q, file %q): rename from %q metadata: %v", p.folder, target.Name, source.Name, err)
return
}
p.dbUpdates <- dbUpdateJob{target, dbUpdateHandleFile}
} else {
// We failed the rename so we have a source file that we still need to
// get rid of. Attempt to delete it instead so that we make *some*
@@ -774,7 +789,7 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
return
}
p.dbUpdates <- source
p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile}
}
}
@@ -815,13 +830,6 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
events.Default.Log(events.ItemStarted, map[string]interface{}{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "update",
})
curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)
if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
@@ -831,23 +839,47 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
if debug {
l.Debugln(p, "taking shortcut on", file.Name)
}
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "metadata",
})
p.queue.Done(file.Name)
var err error
if file.IsSymlink() {
err = p.shortcutSymlink(file)
} else {
err = p.shortcutFile(file)
}
events.Default.Log(events.ItemFinished, map[string]interface{}{
"folder": p.folder,
"item": file.Name,
"error": events.Error(err),
"type": "file",
"action": "update",
"action": "metadata",
})
if err != nil {
l.Infoln("Puller: shortcut:", err)
} else {
p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
}
return
}
events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder,
"item": file.Name,
"type": "file",
"action": "update",
})
scanner.PopulateOffsets(file.Blocks)
// Figure out the absolute filenames we need once and for all
@@ -944,16 +976,13 @@ func (p *rwFolder) shortcutFile(file protocol.FileInfo) error {
file.Version = file.Version.Merge(cur.Version)
}
p.dbUpdates <- file
return nil
}
// shortcutSymlink changes the symlinks type if necessary.
func (p *rwFolder) shortcutSymlink(file protocol.FileInfo) (err error) {
err = symlinks.ChangeType(filepath.Join(p.dir, file.Name), file.Flags)
if err == nil {
p.dbUpdates <- file
} else {
if err != nil {
l.Infof("Puller (folder %q, file %q): symlink shortcut: %v", p.folder, file.Name, err)
}
return
@@ -1173,7 +1202,7 @@ func (p *rwFolder) performFinish(state *sharedPullerState) error {
}
// Record the updated file in the index
p.dbUpdates <- state.file
p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile}
return nil
}
@@ -1229,39 +1258,63 @@ func (p *rwFolder) dbUpdaterRoutine() {
maxBatchTime = 2 * time.Second
)
batch := make([]protocol.FileInfo, 0, maxBatchSize)
batch := make([]dbUpdateJob, 0, maxBatchSize)
files := make([]protocol.FileInfo, 0, maxBatchSize)
tick := time.NewTicker(maxBatchTime)
defer tick.Stop()
handleBatch := func() {
found := false
var lastFile protocol.FileInfo
for _, job := range batch {
files = append(files, job.file)
if job.file.IsInvalid() || (job.file.IsDirectory() && !job.file.IsSymlink()) {
continue
}
if job.jobType&(dbUpdateHandleFile|dbUpdateDeleteFile) == 0 {
continue
}
found = true
lastFile = job.file
}
p.model.updateLocals(p.folder, files)
if found {
p.model.receivedFile(p.folder, lastFile)
}
batch = batch[:0]
files = files[:0]
}
loop:
for {
select {
case file, ok := <-p.dbUpdates:
case job, ok := <-p.dbUpdates:
if !ok {
break loop
}
file.LocalVersion = 0
batch = append(batch, file)
job.file.LocalVersion = 0
batch = append(batch, job)
if len(batch) == maxBatchSize {
p.model.updateLocals(p.folder, batch)
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
batch = batch[:0]
handleBatch()
}
case <-tick.C:
if len(batch) > 0 {
p.model.updateLocals(p.folder, batch)
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
batch = batch[:0]
handleBatch()
}
}
}
if len(batch) > 0 {
p.model.updateLocals(p.folder, batch)
p.model.receivedFile(p.folder, batch[len(batch)-1].Name)
handleBatch()
}
}

19
internal/rc/debug.go Normal file
View File

@@ -0,0 +1,19 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package rc
import (
"os"
"strings"
"github.com/calmh/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "rc") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
)

496
internal/rc/rc.go Normal file
View File

@@ -0,0 +1,496 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// Package rc provides remote control of a Syncthing process via the REST API.
package rc
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
stdsync "sync"
"time"
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/sync"
)
// We set the API key via the STGUIAPIKEY variable when we launch the binary,
// to ensure that we have API access regardless of authentication settings.
const APIKey = "592A47BC-A7DF-4C2F-89E0-A80B3E5094EE"
type Process struct {
// Set at initialization
addr string
// Set by eventLoop()
eventMut sync.Mutex
id protocol.DeviceID
folders []string
startComplete bool
startCompleteCond *stdsync.Cond
stop bool
localVersion map[string]map[string]int64 // Folder ID => Device ID => LocalVersion
done map[string]bool // Folder ID => 100%
cmd *exec.Cmd
logfd *os.File
}
// NewProcess returns a new Process talking to Syncthing at the specified address.
// Example: NewProcess("127.0.0.1:8082")
func NewProcess(addr string) *Process {
p := &Process{
addr: addr,
localVersion: make(map[string]map[string]int64),
done: make(map[string]bool),
eventMut: sync.NewMutex(),
}
p.startCompleteCond = stdsync.NewCond(p.eventMut)
return p
}
// LogTo creates the specified log file and ensures that stdout and stderr
// from the Start()ed process is redirected there. Must be called before
// Start().
func (p *Process) LogTo(filename string) error {
if p.cmd != nil {
panic("logfd cannot be set with an existing cmd")
}
if p.logfd != nil {
p.logfd.Close()
}
fd, err := os.Create(filename)
if err != nil {
return err
}
p.logfd = fd
return nil
}
// Start runs the specified Syncthing binary with the given arguments.
// Syncthing should be configured to provide an API on the address given to
// NewProcess. Event processing is started.
func (p *Process) Start(bin string, args ...string) error {
cmd := exec.Command(bin, args...)
if p.logfd != nil {
cmd.Stdout = p.logfd
cmd.Stderr = p.logfd
}
cmd.Env = append(os.Environ(), "STNORESTART=1", "STGUIAPIKEY="+APIKey)
err := cmd.Start()
if err != nil {
return err
}
p.cmd = cmd
go p.eventLoop()
return nil
}
// AwaitStartup waits for the Syncthing process to start and perform initial
// scans of all folders.
func (p *Process) AwaitStartup() {
p.eventMut.Lock()
for !p.startComplete {
p.startCompleteCond.Wait()
}
p.eventMut.Unlock()
return
}
// Stop stops the running Syncthing process. If the process was logging to a
// local file (set by LogTo), the log file will be opened and checked for
// panics and data races. The presence of either will be signalled in the form
// of a returned error.
func (p *Process) Stop() (*os.ProcessState, error) {
p.eventMut.Lock()
if p.stop {
p.eventMut.Unlock()
return p.cmd.ProcessState, nil
}
p.stop = true
p.eventMut.Unlock()
if err := p.cmd.Process.Signal(os.Kill); err != nil {
return nil, err
}
p.cmd.Wait()
var err error
if p.logfd != nil {
err = p.checkForProblems(p.logfd)
}
return p.cmd.ProcessState, err
}
// Get performs an HTTP GET and returns the bytes and/or an error. Any non-200
// return code is returned as an error.
func (p *Process) Get(path string) ([]byte, error) {
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
DisableKeepAlives: true,
},
}
url := fmt.Sprintf("http://%s%s", p.addr, path)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Add("X-API-Key", APIKey)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return p.readResponse(resp)
}
// Post performs an HTTP POST and returns the bytes and/or an error. Any
// non-200 return code is returned as an error.
func (p *Process) Post(path string, data io.Reader) ([]byte, error) {
client := &http.Client{
Timeout: 600 * time.Second,
Transport: &http.Transport{
DisableKeepAlives: true,
},
}
url := fmt.Sprintf("http://%s%s", p.addr, path)
req, err := http.NewRequest("POST", url, data)
if err != nil {
return nil, err
}
req.Header.Add("X-API-Key", APIKey)
req.Header.Add("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return p.readResponse(resp)
}
type Event struct {
ID int
Time time.Time
Type string
Data interface{}
}
func (p *Process) Events(since int) ([]Event, error) {
bs, err := p.Get(fmt.Sprintf("/rest/events?since=%d", since))
if err != nil {
return nil, err
}
var evs []Event
dec := json.NewDecoder(bytes.NewReader(bs))
dec.UseNumber()
err = dec.Decode(&evs)
if err != nil {
return nil, fmt.Errorf("Events: %s in %q", err, bs)
}
return evs, err
}
func (p *Process) Rescan(folder string) error {
_, err := p.Post("/rest/db/scan?folder="+folder, nil)
return err
}
func (p *Process) RescanDelay(folder string, delaySeconds int) error {
_, err := p.Post(fmt.Sprintf("/rest/db/scan?folder=%s&next=%d", folder, delaySeconds), nil)
return err
}
func InSync(folder string, ps ...*Process) bool {
for _, p := range ps {
p.eventMut.Lock()
}
defer func() {
for _, p := range ps {
p.eventMut.Unlock()
}
}()
for i := range ps {
// If our latest FolderSummary didn't report 100%, then we are not done.
if !ps[i].done[folder] {
return false
}
// Check LocalVersion for each device. The local version seen by remote
// devices should be the same as what it has locally, or the index
// hasn't been sent yet.
sourceID := ps[i].id.String()
sourceVersion := ps[i].localVersion[folder][sourceID]
for j := range ps {
if i != j {
remoteVersion := ps[j].localVersion[folder][sourceID]
if remoteVersion != sourceVersion {
return false
}
}
}
}
return true
}
func AwaitSync(folder string, ps ...*Process) {
for {
time.Sleep(250 * time.Millisecond)
if InSync(folder, ps...) {
return
}
}
}
type Model struct {
GlobalBytes int
GlobalDeleted int
GlobalFiles int
InSyncBytes int
InSyncFiles int
Invalid string
LocalBytes int
LocalDeleted int
LocalFiles int
NeedBytes int
NeedFiles int
State string
StateChanged time.Time
Version int
}
func (p *Process) Model(folder string) (Model, error) {
bs, err := p.Get("/rest/db/status?folder=" + folder)
if err != nil {
return Model{}, err
}
var res Model
if err := json.Unmarshal(bs, &res); err != nil {
return Model{}, err
}
if debug {
l.Debugf("%+v", res)
}
return res, nil
}
func (p *Process) readResponse(resp *http.Response) ([]byte, error) {
bs, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return bs, err
}
if resp.StatusCode != 200 {
return bs, fmt.Errorf("%s", resp.Status)
}
return bs, nil
}
func (p *Process) checkForProblems(logfd *os.File) error {
fd, err := os.Open(logfd.Name())
if err != nil {
return err
}
defer fd.Close()
raceConditionStart := []byte("WARNING: DATA RACE")
raceConditionSep := []byte("==================")
panicConditionStart := []byte("panic:")
panicConditionSep := []byte(p.id.String()[:5])
sc := bufio.NewScanner(fd)
race := false
_panic := false
for sc.Scan() {
line := sc.Bytes()
if race || _panic {
if bytes.Contains(line, panicConditionSep) {
_panic = false
continue
}
fmt.Printf("%s\n", line)
if bytes.Contains(line, raceConditionSep) {
race = false
}
} else if bytes.Contains(line, raceConditionStart) {
fmt.Printf("%s\n", raceConditionSep)
fmt.Printf("%s\n", raceConditionStart)
race = true
if err == nil {
err = errors.New("Race condition detected")
}
} else if bytes.Contains(line, panicConditionStart) {
_panic = true
if err == nil {
err = errors.New("Panic detected")
}
}
}
return err
}
func (p *Process) eventLoop() {
since := 0
notScanned := make(map[string]struct{})
start := time.Now()
for {
p.eventMut.Lock()
if p.stop {
p.eventMut.Unlock()
return
}
p.eventMut.Unlock()
time.Sleep(250 * time.Millisecond)
events, err := p.Events(since)
if err != nil {
if time.Since(start) < 5*time.Second {
// The API has probably not started yet, lets give it some time.
continue
}
// If we're stopping, no need to print the error.
p.eventMut.Lock()
if p.stop {
p.eventMut.Unlock()
return
}
p.eventMut.Unlock()
log.Println("eventLoop: events:", err)
continue
}
since = events[len(events)-1].ID
for _, ev := range events {
switch ev.Type {
case "Starting":
// The Starting event tells us where the configuration is. Load
// it and populate our list of folders.
data := ev.Data.(map[string]interface{})
id, err := protocol.DeviceIDFromString(data["myID"].(string))
if err != nil {
log.Println("eventLoop: DeviceIdFromString:", err)
continue
}
p.id = id
home := data["home"].(string)
w, err := config.Load(filepath.Join(home, "config.xml"), protocol.LocalDeviceID)
if err != nil {
log.Println("eventLoop: Starting:", err)
continue
}
for id := range w.Folders() {
p.eventMut.Lock()
p.folders = append(p.folders, id)
p.eventMut.Unlock()
notScanned[id] = struct{}{}
}
case "StateChanged":
// When a folder changes to idle, we tick it off by removing
// it from p.notScanned.
if !p.startComplete {
data := ev.Data.(map[string]interface{})
to := data["to"].(string)
if to == "idle" {
folder := data["folder"].(string)
delete(notScanned, folder)
if len(notScanned) == 0 {
p.eventMut.Lock()
p.startComplete = true
p.startCompleteCond.Broadcast()
p.eventMut.Unlock()
}
}
}
case "LocalIndexUpdated":
data := ev.Data.(map[string]interface{})
folder := data["folder"].(string)
version, _ := data["version"].(json.Number).Int64()
p.eventMut.Lock()
m := p.localVersion[folder]
if m == nil {
m = make(map[string]int64)
}
m[p.id.String()] = version
p.localVersion[folder] = m
p.done[folder] = false
if debug {
l.Debugf("LocalIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
}
p.eventMut.Unlock()
case "RemoteIndexUpdated":
data := ev.Data.(map[string]interface{})
device := data["device"].(string)
folder := data["folder"].(string)
version, _ := data["version"].(json.Number).Int64()
p.eventMut.Lock()
m := p.localVersion[folder]
if m == nil {
m = make(map[string]int64)
}
m[device] = version
p.localVersion[folder] = m
p.done[folder] = false
if debug {
l.Debugf("RemoteIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
}
p.eventMut.Unlock()
case "FolderSummary":
data := ev.Data.(map[string]interface{})
folder := data["folder"].(string)
summary := data["summary"].(map[string]interface{})
need, _ := summary["needBytes"].(json.Number).Int64()
done := need == 0
p.eventMut.Lock()
p.done[folder] = done
if debug {
l.Debugf("Foldersummary %v %v\n\t%+v", p.id, folder, p.done)
}
p.eventMut.Unlock()
}
}
}
}

View File

@@ -9,6 +9,8 @@ package stats
import (
"time"
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/db"
"github.com/syndtr/goleveldb/leveldb"
)
@@ -25,6 +27,7 @@ type FolderStatisticsReference struct {
type LastFile struct {
At time.Time `json:"at"`
Filename string `json:"filename"`
Deleted bool `json:"deleted"`
}
func NewFolderStatisticsReference(ldb *leveldb.DB, folder string) *FolderStatisticsReference {
@@ -44,18 +47,21 @@ func (s *FolderStatisticsReference) GetLastFile() LastFile {
if !ok {
return LastFile{}
}
deleted, ok := s.ns.Bool("lastFileDeleted")
return LastFile{
At: at,
Filename: file,
Deleted: deleted,
}
}
func (s *FolderStatisticsReference) ReceivedFile(filename string) {
func (s *FolderStatisticsReference) ReceivedFile(file protocol.FileInfo) {
if debug {
l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, filename)
l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, file)
}
s.ns.PutTime("lastFileAt", time.Now())
s.ns.PutString("lastFileName", filename)
s.ns.PutString("lastFileName", file.Name)
s.ns.PutBool("lastFileDeleted", file.IsDeleted())
}
func (s *FolderStatisticsReference) GetStatistics() FolderStatistics {

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-CONFIG" "5" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-CONFIG" "5" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-config \- Syncthing Configuration
.
@@ -30,21 +30,13 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.sp
\fBWARNING:\fP
.INDENT 0.0
.INDENT 3.5
This page may be outdated and requires review.
Attributes have been added that are not documented.
.UNINDENT
.UNINDENT
.SH SYNOPSIS
.INDENT 0.0
.INDENT 3.5
.sp
.nf
.ft C
$HOME/.config/syncthing/config.xml
$HOME/.config/syncthing
$HOME/Library/Application Support/Syncthing
%AppData%/Syncthing
%localappdata%/Syncthing
@@ -57,31 +49,27 @@ $HOME/Library/Application Support/Syncthing
Syncthing uses a single directory to store configuration, crypto keys
and index caches. The location defaults to \fB$HOME/.config/syncthing\fP
(Unix\-like), \fB$HOME/Library/Application Support/Syncthing\fP (Mac),
\fB%AppData%/Syncthing\fP (Windows XP) or \fB%localappdata%/Syncthing\fP
(Windows 7/8). It can be changed at runtime using the \fB\-home\fP flag. In this
\fB%AppData%/Syncthing\fP (Windows XP) or \fB%LocalAppData%/Syncthing\fP
(Windows 7+). It can be changed at runtime using the \fB\-home\fP flag. In this
directory the following files are located:
.INDENT 0.0
.TP
.B cert.pem
The device\(aqs RSA public key, named "cert" for legacy reasons.
.TP
.B key.pem
The device\(aqs RSA private key. This needs to be protected.
.TP
.B config.xml
.B \fBconfig.xml\fP
The configuration file, in XML format.
.TP
.B https\-cert.pem
The certificate for HTTPS GUI connections.
.B \fBcert.pem\fP, \fBkey.pem\fP
The device\(aqs RSA public and private key. These form the basis for the
device ID. The key must be kept private.
.TP
.B https\-key.pem
The key for HTTPS GUI connections.
.B \fBhttps\-cert.pem\fP, \fBhttps\-key.pem\fP
The certificate and key for HTTPS GUI connections. These may be replaced
with a custom certificate for HTTPS as desired.
.TP
.B index/
.B \fBindex\-\fI*\fP\&.db\fP
A directory holding the database with metadata and hashes of the files
currently on disk and available from peers.
.TP
.B csrftokens.txt
.B \fBcsrftokens.txt\fP
A list of recently issued CSRF tokens (for protection against browser cross
site request forgery).
.UNINDENT
@@ -93,80 +81,170 @@ The following is shows the default configuration file:
.sp
.nf
.ft C
<configuration version="2">
<folder id="default" directory="/Users/jb/Sync" ro="false" ignorePerms="false">
<device id="GXN5ECCWTA2B7EB5FXYL5OWGOADX5EF5VNJAQSIBAY6XHJ24BNOA"></device>
<configuration version="10">
<folder id="default" path="/Users/jb/Sync" ro="false" rescanIntervalS="60" ignorePerms="false" autoNormalize="false">
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU"></device>
<versioning></versioning>
<copiers>0</copiers>
<pullers>0</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<device id="GXN5ECCWTA2B7EB5FXYL5OWGOADX5EF5VNJAQSIBAY6XHJ24BNOA" name="jborg\-mbp">
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false">
<address>dynamic</address>
</device>
<gui enabled="true" tls="true">
<address>127.0.0.1:54096</address>
<user>jb</user>
<password>$2a$10$EKaTIcpz2...</password>
<apikey>O80CDOJ9LVUVCMHFK2OJDO4T882735</apikey>
<gui enabled="true" tls="false">
<address>127.0.0.1:8384</address>
<apikey>l7jSbCqPD95JYZ0g8vi4ZLAMg3ulnN1b</apikey>
</gui>
<options>
<listenAddress>:54097</listenAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<listenAddress>0.0.0.0:56847</listenAddress>
<globalAnnounceServer>udp4://announce.syncthing.net:22026</globalAnnounceServer>
<globalAnnounceServer>udp6://announce\-v6.syncthing.net:22026</globalAnnounceServer>
<globalAnnounceEnabled>true</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<parallelRequests>16</parallelRequests>
<localAnnouncePort>21025</localAnnouncePort>
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>60</rescanIntervalS>
<maxRecvKbps>0</maxRecvKbps>
<reconnectionIntervalS>60</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>true</startBrowser>
<upnpEnabled>true</upnpEnabled>
<upnpLeaseMinutes>60</upnpLeaseMinutes>
<upnpRenewalMinutes>30</upnpRenewalMinutes>
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
<urAccepted>0</urAccepted>
<urUniqueID></urUniqueID>
<restartOnWakeup>true</restartOnWakeup>
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
<keepTemporariesH>24</keepTemporariesH>
<cacheIgnoredFiles>true</cacheIgnoredFiles>
<progressUpdateIntervalS>5</progressUpdateIntervalS>
<symlinksEnabled>true</symlinksEnabled>
<limitBandwidthInLan>false</limitBandwidthInLan>
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
</options>
</configuration>
.ft P
.fi
.UNINDENT
.UNINDENT
.SS configuration
.SH CONFIGURATION ELEMENT
.sp
This is the root element.
.INDENT 0.0
.TP
.B version
The config version. The current version is \fB2\fP\&.
The config version. Increments whenever a change is made that requires migration from previous formats.
.UNINDENT
.SS folder
.SH FOLDER ELEMENT
.INDENT 0.0
.INDENT 3.5
.sp
One or more \fBfolder\fP elements must be present in the file. Each
element describes one folder.
.nf
.ft C
<folder id="default" path="/Users/jb/Sync" ro="false" rescanIntervalS="60" ignorePerms="false" autoNormalize="false">
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU"></device>
<versioning></versioning>
<copiers>0</copiers>
<pullers>0</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
Within the \fBfolder\fP element one or more \fBdevice\fP element should be
present. These must contain the \fBid\fP attribute and nothing else.
Mentioned devices are those that will be sharing the folder in question.
Each mentioned device must have a separate \fBdevice\fP element later in
the file. It is customary that the local device ID is included in all
repositories. Syncthing will currently add this automatically if it is
not present in the configuration file.
One or more \fBfolder\fP elements must be present in the file. Each element
describes one folder. The following attributes may be set on the \fBfolder\fP
element:
.INDENT 0.0
.TP
.B id
The folder ID, must be unique. (mandatory)
.TP
.B directory
The directory where the folder is stored on this
.B path
The oath to the directory where the folder is stored on this
device; not sent to other devices. (mandatory)
.TP
.B ro
True if the folder is read only (will not be modified by Syncthing) on this
device. (optional, defaults to \fBfalse\fP)
True if the folder is read only (Master mode; will not be modified by
Syncthing) on this device.
.TP
.B rescanIntervalS
The rescan interval, in seconds.
.TP
.B ignorePerms
True if the folder should \fI\%ignore permissions\fP <\fBhttp://forum.syncthing.net/t/263\fP>\&.
True if the folder should ignore permissions.
.TP
.B autoNormalize
Automatically correct UTF\-8 normalization errors found in file names.
.UNINDENT
.SS device
.sp
One or more \fBdevice\fP elements must be present in the file. Each
element describes a device participating in the cluster. It is customary
to include a \fBdevice\fP element for the local device; Syncthing will
currently add one if it is not present.
The following child elements may exist:
.INDENT 0.0
.TP
.B device
These must have the \fBid\fP attribute and nothing else. Mentioned devices
are those that will be sharing the folder in question. Each mentioned
device must have a separate \fBdevice\fP element later in the file. It is
customary that the local device ID is included in all repositories.
Syncthing will currently add this automatically if it is not present in
the configuration file.
.TP
.B versioning
Specifies a versioning configuration.
.sp
\fBNOTE:\fP
.INDENT 7.0
.INDENT 3.5
Needs explanation.
.UNINDENT
.UNINDENT
.TP
.B copiers, pullers, hashers
The number of copier, puller and hasher routines to use, or zero for the
system determined optimum. These are low level performance options for
advanced users only; do not change unless requested to or you\(aqve actually
read and understood the code yourself. :)
.TP
.B order
The order in which needed files should be pulled from the cluster. The possibles values are:
.INDENT 7.0
.TP
.B random
Pull files in random order. This optimizes for balancing resources among the devices in a cluster.
.TP
.B alphabetic
Pull files ordered by file name alphabetically.
.TP
.B smallestFirst, largestFirst
Pull files ordered by file size; smallest and largest first respectively.
.TP
.B oldestFirst, newestFirst
Pull files ordered by modification time; oldest and newest first respectively.
.UNINDENT
.UNINDENT
.SH DEVICE ELEMENT
.INDENT 0.0
.INDENT 3.5
.sp
.nf
.ft C
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false">
<address>dynamic</address>
</device>
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
One or more \fBdevice\fP elements must be present in the file. Each element
describes a device participating in the cluster. It is customary to include a
\fBdevice\fP element for the local device; Syncthing will currently add one if
it is not present. The following attributes may be set on the \fBdevice\fP
element:
.INDENT 0.0
.TP
.B id
@@ -176,31 +254,95 @@ spaces or dashes. (mandatory)
.B name
A friendly name for the device. (optional)
.TP
.B address
The address section is only valid inside of \fBdevice\fP elements. It contains
a single address, on one of the following forms:
.B compression
Whether to use protocol compression when sending messages to this device.
The possible values are:
.INDENT 7.0
.IP \(bu 2
IPv4 addresses, IPv6 addresses within brackets, or DNS names, all
optionally followed by a port number.
.IP \(bu 2
\fBdynamic\fP: The address will be resolved using discovery.
.TP
.B metadata
Compress metadata packets, such as index information. Metadata is
usually very compression friendly so this is a good default.
.TP
.B always
Compress all packets, including file data. This is recommended if the
folders contents are mainly compressible data such as documents or
text files.
.TP
.B never
Disable all compression.
.UNINDENT
.TP
.B introducer
Set to true if this device should be trusted as an introducer, i.e. we
should copy their list of devices per folder when connecting.
.UNINDENT
.SS gui
.sp
There must be \fIexactly one\fP \fBgui\fP element.
In addition, one or more \fBaddress\fP child elements must be present. Each
contains an address to use when attempting to connect to this device and will
be tried in order. Accepted formats are:
.INDENT 0.0
.TP
.B IPv4 address (\fB192.0.2.42\fP)
The default port (22000) is used.
.TP
.B IPv4 address and port (\fB192.0.2.42:12345\fP)
The address and port is used as given.
.TP
.B IPv6 address (\fB2001:db8::23:42\fP)
The default port (22000) is used.
.TP
.B IPv6 address and port (\fB[2001:db8::23:42]:12345\fP)
The address and port is used as given. The address must be enclosed in angled brackets.
.TP
.B \fBdynamic\fP
The word \fBdynamic\fP means to use local and global discovery to find the device.
.UNINDENT
.SH GUI ELEMENT
.INDENT 0.0
.INDENT 3.5
.sp
.nf
.ft C
<gui enabled="true" tls="false">
<address>127.0.0.1:8384</address>
<apikey>l7jSbCqPD95JYZ0g8vi4ZLAMg3ulnN1b</apikey>
</gui>
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
There must be exactly one \fBgui\fP element. The GUI configuration is also used
by the rest\-api and the event\-api\&. The following attributes may
be set on the \fBgui\fP element:
.INDENT 0.0
.TP
.B enabled
\fBtrue\fP/\fBfalse\fP
If not \fBtrue\fP, the GUI and API will not be started.
.TP
.B tls
\fBtrue\fP/\fBfalse\fP: If true then the GUI will use HTTPS.
If set to \fBtrue\fP, TLS (HTTPS) will be enforced. Non\-HTTPS requests will
be redirected to HTTPS. When this is set to \fBfalse\fP, TLS connections are
still possible but it is not mandatory.
.UNINDENT
.sp
The following child elements may be present:
.INDENT 0.0
.TP
.B address
One or more address elements must be present, containing an \fBip:port\fP
listen address.
Set the listen addresses. One or more address elements must be present.
Allowed address formats are:
.INDENT 7.0
.TP
.B IPv4 address and port (\fB127.0.0.1:8384\fP)
The address and port is used as given.
.TP
.B IPv6 address and port (\fB[::1]:8384\fP)
The address and port is used as given. The address must be enclosed in angled brackets.
.TP
.B Wildcard and port (\fB0.0.0.0:12345\fP, \fB[::]:12345\fP, \fB:12345\fP)
These are equivalent and will result in Syncthing listening on all interfaces and both IPv4 and IPv6.
.UNINDENT
.TP
.B username
Set to require authentication.
@@ -211,56 +353,141 @@ Contains the bcrypt hash of the real password.
.B apikey
If set, this is the API key that enables usage of the REST interface.
.UNINDENT
.SH OPTIONS ELEMENT
.INDENT 0.0
.INDENT 3.5
.sp
Additionally, there must be \fIexactly one\fP \fBoptions\fP element. It contains the
following configuration settings as children:
.nf
.ft C
<options>
<listenAddress>0.0.0.0:56847</listenAddress>
<globalAnnounceServer>udp4://announce.syncthing.net:22026</globalAnnounceServer>
<globalAnnounceServer>udp6://announce\-v6.syncthing.net:22026</globalAnnounceServer>
<globalAnnounceEnabled>true</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
<maxSendKbps>0</maxSendKbps>
<maxRecvKbps>0</maxRecvKbps>
<reconnectionIntervalS>60</reconnectionIntervalS>
<startBrowser>true</startBrowser>
<upnpEnabled>true</upnpEnabled>
<upnpLeaseMinutes>60</upnpLeaseMinutes>
<upnpRenewalMinutes>30</upnpRenewalMinutes>
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
<urAccepted>0</urAccepted>
<urUniqueID></urUniqueID>
<restartOnWakeup>true</restartOnWakeup>
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
<keepTemporariesH>24</keepTemporariesH>
<cacheIgnoredFiles>true</cacheIgnoredFiles>
<progressUpdateIntervalS>5</progressUpdateIntervalS>
<symlinksEnabled>true</symlinksEnabled>
<limitBandwidthInLan>false</limitBandwidthInLan>
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
</options>
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
The \fBoptions\fP element contains all other global configuration options.
.INDENT 0.0
.TP
.B listenAddress
\fBhost:port\fP or \fB:port\fP string denoting an address to listen for BEP
connections. More than one \fBlistenAddress\fP may be given.
(default: \fB0.0.0.0:22000\fP)
The listen address for incoming sync connections. See the \fBaddress\fP
element under the \fI\%GUI Element\fP for allowed syntax.
.TP
.B globalAnnounceServer
\fBhost:port\fP string denoting where a global announce server may be
reached. (default: \fBannounce.syncthing.net:22025\fP)
A URI to a global announce (discvoery) server. Allowed protocol prefixes
are \fBudp4://\fP (UDP over IPv4), \fBudp6://\fP (UDP over IPv6) and
\fBudp://\fP (UDP over any available protocol).
.TP
.B globalAnnounceEnabled
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
Whether to announce this device to the global announce (discovery) server,
and also use it to look up other device.
.TP
.B localAnnounceEnabled
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
Whether to send announcements to the local LAN, also use such
announcements to find other devices.
.TP
.B parallelRequests
The maximum number of outstanding block requests to have against any given
peer. (default: \fB16\fP)
.B localAnnouncePort
The port on which to listen and send IPv4 broadcast announcements to.
.TP
.B localAnnounceMCAddr
The group address and port to join and send IPv6 multicast announcements on.
.TP
.B maxSendKbps
Rate limit
Outgoing data rate limit, in kibibits per second.
.TP
.B rescanIntervalS
The number of seconds to wait between each scan for modification of the
local repositories. A value of \fB0\fP disables the scanner. (default: \fB60\fP)
.B maxRecvKbps
Incoming data rate limits, in kibibits per second.
.TP
.B reconnectionIntervalS
The number of seconds to wait between each attempt to connect to currently
unconnected devices. (default: \fB60\fP)
.TP
.B maxChangeKbps
The maximum rate of change allowed for a single file. When this rate is
exceeded, further changes to the file are not announced, until the rate is
reduced below the limit. (default: \fB10000\fP)
unconnected devices.
.TP
.B startBrowser
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
Whether to attempt to start a browser to show the GUI when Syncthing starts.
.TP
.B upnpEnabled
\fBtrue\fP/\fBfalse\fP (default: \fBtrue\fP)
Whether to attempt to perform an UPnP port mapping for incoming sync connections.
.TP
.B upnpLeaseMinutes
Request a lease for this many minutes; zero to request a permanent lease.
.TP
.B upnpRenewalMinutes
Attempt to renew the lease after this many minutes.
.TP
.B upnpTimeoutSeconds
When scanning for UPnP devices, wait this long for responses.
.TP
.B urAccepted
Whether the user as accepted to submit anonymous usage data. The default,
\fB0\fP, mean the user has not made a choice, and Syncthing will ask at some
point in the future. \fB\-1\fP means no, \fB1\fP means yes.
point in the future. \fB\-1\fP means no, a number above zero means that that
version of usage reporting has been accepted.
.TP
.B urUniqueID
The unique ID sent together with the usage report. Generated when usage
reporting is enabled.
.TP
.B restartOnWakeup
Whether to perform a restart of Syncthing when it is detected that we are
waking from sleep mode (i.e. a folded up laptop).
.TP
.B autoUpgradeIntervalH
Check for a newer version after this many hours. Set to zero to disable
automatic upgrades.
.TP
.B keepTemporariesH
Keep temporary failed transfers for this many hours. While the temporaries
are kept, the data they contain need not be transferred again.
.TP
.B cacheIgnoredFiles
Whether to cache the results of ignore pattern evaluation. Performance at
the price of memory.
.TP
.B progressUpdateIntervalS
.
\fBNOTE:\fP
.INDENT 7.0
.INDENT 3.5
Requires explanation.
.UNINDENT
.UNINDENT
.TP
.B symlinksEnabled
Whether to sync symlinks, if supported by the system.
.TP
.B limitBandwidthInLan
Whether to apply bandwidth limits to devices in the same broadcast domain
as the local device.
.TP
.B databaseBlockCacheMiB
Override the automatically calculated database block cache size. Don\(aqt,
unless you\(aqre very short on memory, in which case you want to set this to
\fB8\fP\&.
.UNINDENT
.SH AUTHOR
The Syncthing Authors

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-DEVICE-IDS" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-DEVICE-IDS" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-device-ids \- Understanding Device IDs
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-EVENT-API" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-EVENT-API" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-event-api \- Event API
.
@@ -444,7 +444,10 @@ An unsuccessful operation:
.UNINDENT
.UNINDENT
.sp
The \fBaction\fP field is either \fBupdate\fP or \fBdelete\fP\&.
The \fBaction\fP field is either \fBupdate\fP (contents changed), \fBmetadata\fP (file metadata changed but not contents), or \fBdelete\fP\&.
.sp
New in version 0.11.10: The \fBmetadata\fP action.
.SS ItemStarted
.sp
Generated when Syncthing begins synchronizing a file to a newer version.
@@ -469,7 +472,10 @@ Generated when Syncthing begins synchronizing a file to a newer version.
.UNINDENT
.UNINDENT
.sp
The \fBaction\fP field is either \fBupdate\fP or \fBdelete\fP\&.
The \fBaction\fP field is either \fBupdate\fP (contents changed), \fBmetadata\fP (file metadata changed but not contents), or \fBdelete\fP\&.
.sp
New in version 0.11.10: The \fBmetadata\fP action.
.SS LocalIndexUpdated
.sp
Generated when the local index information has changed, due to

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-FAQ" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-FAQ" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-faq \- Frequently Asked Questions
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-NETWORKING" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-NETWORKING" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-networking \- Firewall Setup
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-REST-API" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-REST-API" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-rest-api \- REST API
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-SECURITY" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-SECURITY" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-security \- Security Principles
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-STIGNORE" "5" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING-STIGNORE" "5" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing-stignore \- Prevent files from being synchronized to other nodes
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "TODO" "7" "June 14, 2015" "v0.11" "Syncthing"
.TH "TODO" "7" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
Todo \- Keep automatic backups of deleted files by other nodes
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING" "1" "June 14, 2015" "v0.11" "Syncthing"
.TH "SYNCTHING" "1" "June 20, 2015" "v0.11" "Syncthing"
.SH NAME
syncthing \- Syncthing
.

View File

@@ -16,9 +16,10 @@ import (
"time"
"github.com/syncthing/syncthing/internal/osutil"
"github.com/syncthing/syncthing/internal/rc"
)
func TestConflict(t *testing.T) {
func TestConflictsDefault(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
@@ -49,16 +50,21 @@ func TestConflict(t *testing.T) {
t.Fatal(err)
}
sender, receiver := coSenderReceiver(t)
defer sender.stop()
defer receiver.stop()
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
if err = awaitCompletion("default", sender, receiver); err != nil {
// Rescan with a delay on the next one, so we are not surprised by a
// sudden rescan while we're trying to introduce conflicts.
if err := sender.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
sender.stop()
receiver.stop()
if err := receiver.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
log.Println("Verifying...")
@@ -101,22 +107,13 @@ func TestConflict(t *testing.T) {
log.Println("Syncing...")
err = receiver.start()
err = sender.start()
if err != nil {
if err := sender.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
if err != nil {
sender.stop()
if err := receiver.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
sender.stop()
receiver.stop()
rc.AwaitSync("default", sender, receiver)
// The conflict is expected on the s2 side due to how we calculate which
// file is the winner (based on device ID)
@@ -151,22 +148,13 @@ func TestConflict(t *testing.T) {
log.Println("Syncing...")
err = receiver.start()
err = sender.start()
if err != nil {
if err := sender.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
if err != nil {
sender.stop()
if err := receiver.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
sender.stop()
receiver.stop()
rc.AwaitSync("default", sender, receiver)
// The conflict should manifest on the s2 side again, where we should have
// moved the file to a conflict copy instead of just deleting it.
@@ -180,7 +168,7 @@ func TestConflict(t *testing.T) {
}
}
func TestInitialMergeConflicts(t *testing.T) {
func TestConflictsInitialMerge(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
@@ -224,18 +212,17 @@ func TestInitialMergeConflicts(t *testing.T) {
// Let them sync
sender, receiver := coSenderReceiver(t)
defer sender.stop()
defer receiver.stop()
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
log.Println("Syncing...")
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
sender.stop()
receiver.stop()
checkedStop(t, sender)
checkedStop(t, receiver)
log.Println("Verifying...")
@@ -270,7 +257,7 @@ func TestInitialMergeConflicts(t *testing.T) {
}
}
func TestResetConflicts(t *testing.T) {
func TestConflictsIndexReset(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
@@ -303,15 +290,14 @@ func TestResetConflicts(t *testing.T) {
// Let them sync
sender, receiver := coSenderReceiver(t)
defer sender.stop()
defer receiver.stop()
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
log.Println("Syncing...")
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
log.Println("Verifying...")
@@ -341,43 +327,25 @@ func TestResetConflicts(t *testing.T) {
// This will make the file on the cluster look newer than what we have
// locally after we rest the index, unless we have a fix for that.
err = ioutil.WriteFile("s2/file2", []byte("hello1\n"), 0644)
if err != nil {
t.Fatal(err)
for i := 0; i < 5; i++ {
err = ioutil.WriteFile("s2/file2", []byte("hello1\n"), 0644)
if err != nil {
t.Fatal(err)
}
err = receiver.Rescan("default")
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
}
err = receiver.rescan("default")
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = ioutil.WriteFile("s2/file2", []byte("hello2\n"), 0644)
if err != nil {
t.Fatal(err)
}
err = receiver.rescan("default")
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
err = ioutil.WriteFile("s2/file2", []byte("hello3\n"), 0644)
if err != nil {
t.Fatal(err)
}
err = receiver.rescan("default")
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
// Now nuke the index
log.Println("Resetting...")
receiver.stop()
checkedStop(t, receiver)
removeAll("h2/index*")
// s1/file1 (remote) changes while receiver is down
@@ -388,7 +356,7 @@ func TestResetConflicts(t *testing.T) {
}
// s1 must know about it
err = sender.rescan("default")
err = sender.Rescan("default")
if err != nil {
t.Fatal(err)
}
@@ -400,13 +368,12 @@ func TestResetConflicts(t *testing.T) {
t.Fatal(err)
}
receiver.start()
receiver = startInstance(t, 2)
defer checkedStop(t, receiver)
log.Println("Syncing...")
if err = awaitCompletion("default", sender, receiver); err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
// s2 should have five files (three plus two conflicts)
@@ -438,32 +405,3 @@ func TestResetConflicts(t *testing.T) {
t.Errorf("Expected 2 'file2' files in s2 instead of %d", len(files))
}
}
func coSenderReceiver(t *testing.T) (syncthingProcess, syncthingProcess) {
log.Println("Starting sender...")
sender := syncthingProcess{ // id1
instance: "1",
argv: []string{"-home", "h1"},
port: 8081,
apiKey: apiKey,
}
err := sender.start()
if err != nil {
t.Fatal(err)
}
log.Println("Starting receiver...")
receiver := syncthingProcess{ // id2
instance: "2",
argv: []string{"-home", "h2"},
port: 8082,
apiKey: apiKey,
}
err = receiver.start()
if err != nil {
sender.stop()
t.Fatal(err)
}
return sender, receiver
}

View File

@@ -16,7 +16,7 @@ import (
"time"
)
func TestDelayScan(t *testing.T) {
func TestRescanWithDelay(t *testing.T) {
log.Println("Cleaning...")
err := removeAll("s1", "h1/index*")
if err != nil {
@@ -36,30 +36,8 @@ func TestDelayScan(t *testing.T) {
}
log.Println("Starting up...")
st := syncthingProcess{ // id1
instance: "1",
argv: []string{"-home", "h1"},
port: 8081,
apiKey: apiKey,
}
err = st.start()
if err != nil {
t.Fatal(err)
}
// Wait for one scan to succeed, or up to 20 seconds...
// This is to let startup, UPnP etc complete.
for i := 0; i < 20; i++ {
err := st.rescan("default")
if err != nil {
time.Sleep(time.Second)
continue
}
break
}
// Wait for UPnP and stuff
time.Sleep(10 * time.Second)
st := startInstance(t, 1)
var wg sync.WaitGroup
log.Println("Starting scans...")
@@ -68,7 +46,7 @@ func TestDelayScan(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
err := st.rescanNext("default", time.Duration(1)*time.Second)
err := st.RescanDelay("default", 1)
log.Println(j)
if err != nil {
log.Println(err)
@@ -84,8 +62,5 @@ func TestDelayScan(t *testing.T) {
// This is where the real test is currently, since stop() checks for data
// race output in the log.
log.Println("Stopping...")
_, err = st.stop()
if err != nil {
t.Fatal(err)
}
checkedStop(t, st)
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/rc"
)
func TestFileTypeChange(t *testing.T) {
@@ -73,11 +74,11 @@ func testFileTypeChange(t *testing.T) {
// A file that we will replace with a directory later
fd, err := os.Create("s1/fileToReplace")
if err != nil {
if fd, err := os.Create("s1/fileToReplace"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
fd.Close()
// A directory that we will replace with a file later
@@ -92,52 +93,26 @@ func testFileTypeChange(t *testing.T) {
if err != nil {
t.Fatal(err)
}
fd, err = os.Create("s1/dirToReplace/emptyFile")
if err != nil {
if fd, err := os.Create("s1/dirToReplace/emptyFile"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
fd.Close()
// Verify that the files and directories sync to the other side
sender := startInstance(t, 1)
defer checkedStop(t, sender)
receiver := startInstance(t, 2)
defer checkedStop(t, receiver)
log.Println("Syncing...")
sender := syncthingProcess{ // id1
instance: "1",
argv: []string{"-home", "h1"},
port: 8081,
apiKey: apiKey,
}
err = sender.start()
if err != nil {
t.Fatal(err)
}
defer sender.stop()
rc.AwaitSync("default", sender, receiver)
receiver := syncthingProcess{ // id2
instance: "2",
argv: []string{"-home", "h2"},
port: 8082,
apiKey: apiKey,
}
err = receiver.start()
if err != nil {
sender.stop()
t.Fatal(err)
}
defer receiver.stop()
err = awaitCompletion("default", sender, receiver)
if err != nil {
t.Fatal(err)
}
_, err = sender.stop()
if err != nil {
t.Fatal(err)
}
_, err = receiver.stop()
if err != nil {
// Delay scans for the moment
if err := sender.RescanDelay("default", 86400); err != nil {
t.Fatal(err)
}
@@ -166,11 +141,11 @@ func testFileTypeChange(t *testing.T) {
if err != nil {
t.Fatal(err)
}
fd, err = os.Create("s1/emptyDirToReplace")
if err != nil {
if fd, err := os.Create("s1/emptyDirToReplace"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
fd.Close()
// Clear directory and replace with file
@@ -178,30 +153,21 @@ func testFileTypeChange(t *testing.T) {
if err != nil {
t.Fatal(err)
}
fd, err = os.Create("s1/dirToReplace")
if err != nil {
if fd, err := os.Create("s1/dirToReplace"); err != nil {
t.Fatal(err)
} else {
fd.Close()
}
fd.Close()
// Sync these changes and recheck
log.Println("Syncing...")
err = sender.start()
if err != nil {
if err := sender.Rescan("default"); err != nil {
t.Fatal(err)
}
err = receiver.start()
if err != nil {
t.Fatal(err)
}
err = awaitCompletion("default", sender, receiver)
if err != nil {
t.Fatal(err)
}
rc.AwaitSync("default", sender, receiver)
log.Println("Comparing directories...")
err = compareDirectories("s1", "s2")

View File

@@ -8,6 +8,7 @@
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<followSymlinks>true</followSymlinks>
</folder>
<folder id="s12" path="s12-1" ro="false" rescanIntervalS="10" ignorePerms="false" autoNormalize="true">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>

View File

@@ -3,31 +3,31 @@
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning type="simple">
<param key="keep" val="5"></param>
<versioning type="trashcan">
<param key="cleanoutDays" val="1"></param>
</versioning>
<lenientMtimes>false</lenientMtimes>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<folder id="s12" path="s12-2" ro="false" rescanIntervalS="15" ignorePerms="false" autoNormalize="true">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<versioning></versioning>
<lenientMtimes>false</lenientMtimes>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<folder id="s23" path="s23-2" ro="false" rescanIntervalS="15" ignorePerms="false" autoNormalize="true">
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
<lenientMtimes>false</lenientMtimes>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="metadata" introducer="false">
<address>127.0.0.1:22001</address>
@@ -57,6 +57,7 @@
<upnpEnabled>true</upnpEnabled>
<upnpLeaseMinutes>0</upnpLeaseMinutes>
<upnpRenewalMinutes>1</upnpRenewalMinutes>
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urUniqueID></urUniqueID>
<restartOnWakeup>true</restartOnWakeup>
@@ -66,5 +67,6 @@
<progressUpdateIntervalS>5</progressUpdateIntervalS>
<symlinksEnabled>true</symlinksEnabled>
<limitBandwidthInLan>false</limitBandwidthInLan>
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
</options>
</configuration>

View File

@@ -1,24 +1,24 @@
<configuration version="9">
<folder id="s23" path="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false" autoNormalize="false">
<configuration version="10">
<folder id="s23" path="s23-3" ro="false" rescanIntervalS="20" ignorePerms="false" autoNormalize="true">
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning></versioning>
<lenientMtimes>false</lenientMtimes>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<folder id="default" path="s3" ro="false" rescanIntervalS="20" ignorePerms="false" autoNormalize="false">
<folder id="default" path="s3" ro="false" rescanIntervalS="20" ignorePerms="false" autoNormalize="true">
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
<versioning type="simple">
<param key="keep" val="5"></param>
</versioning>
<lenientMtimes>false</lenientMtimes>
<copiers>1</copiers>
<pullers>16</pullers>
<hashers>0</hashers>
<order>random</order>
</folder>
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="metadata" introducer="false">
<address>127.0.0.1:22001</address>
@@ -48,6 +48,7 @@
<upnpEnabled>false</upnpEnabled>
<upnpLeaseMinutes>0</upnpLeaseMinutes>
<upnpRenewalMinutes>30</upnpRenewalMinutes>
<upnpTimeoutSeconds>10</upnpTimeoutSeconds>
<urAccepted>-1</urAccepted>
<urUniqueID></urUniqueID>
<restartOnWakeup>true</restartOnWakeup>
@@ -57,5 +58,6 @@
<progressUpdateIntervalS>5</progressUpdateIntervalS>
<symlinksEnabled>true</symlinksEnabled>
<limitBandwidthInLan>false</limitBandwidthInLan>
<databaseBlockCacheMiB>0</databaseBlockCacheMiB>
</options>
</configuration>

View File

@@ -14,11 +14,10 @@ import (
"io/ioutil"
"net/http"
"strings"
"sync"
"testing"
"time"
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/rc"
)
var jsonEndpoints = []string{
@@ -46,18 +45,12 @@ var jsonEndpoints = []string{
}
func TestGetIndex(t *testing.T) {
st := syncthingProcess{
argv: []string{"-home", "h2"},
port: 8082,
instance: "2",
}
err := st.start()
if err != nil {
t.Fatal(err)
}
defer st.stop()
p := startInstance(t, 2)
defer checkedStop(t, p)
res, err := st.get("/index.html")
// Check for explicint index.html
res, err := http.Get("http://localhost:8082/index.html")
if err != nil {
t.Fatal(err)
}
@@ -79,7 +72,9 @@ func TestGetIndex(t *testing.T) {
}
res.Body.Close()
res, err = st.get("/")
// Check for implicit index.html
res, err = http.Get("http://localhost:8082/")
if err != nil {
t.Fatal(err)
}
@@ -103,17 +98,8 @@ func TestGetIndex(t *testing.T) {
}
func TestGetIndexAuth(t *testing.T) {
st := syncthingProcess{
argv: []string{"-home", "h1"},
port: 8081,
instance: "1",
apiKey: "abc123",
}
err := st.start()
if err != nil {
t.Fatal(err)
}
defer st.stop()
p := startInstance(t, 1)
defer checkedStop(t, p)
// Without auth should give 401
@@ -162,19 +148,11 @@ func TestGetIndexAuth(t *testing.T) {
}
func TestGetJSON(t *testing.T) {
st := syncthingProcess{
argv: []string{"-home", "h2"},
port: 8082,
instance: "2",
}
err := st.start()
if err != nil {
t.Fatal(err)
}
defer st.stop()
p := startInstance(t, 2)
defer checkedStop(t, p)
for _, path := range jsonEndpoints {
res, err := st.get(path)
res, err := http.Get("http://127.0.0.1:8082" + path)
if err != nil {
t.Error(path, err)
continue
@@ -196,16 +174,8 @@ func TestGetJSON(t *testing.T) {
}
func TestPOSTWithoutCSRF(t *testing.T) {
st := syncthingProcess{
argv: []string{"-home", "h2"},
port: 8082,
instance: "2",
}
err := st.start()
if err != nil {
t.Fatal(err)
}
defer st.stop()
p := startInstance(t, 2)
defer checkedStop(t, p)
// Should fail without CSRF
@@ -271,12 +241,7 @@ func TestPOSTWithoutCSRF(t *testing.T) {
}
}
var (
initOnce sync.Once
proc syncthingProcess
)
func setupAPIBench() {
func setupAPIBench() *rc.Process {
err := removeAll("s1", "s2", "h1/index*", "h2/index*")
if err != nil {
panic(err)
@@ -292,47 +257,20 @@ func setupAPIBench() {
panic(err)
}
proc = syncthingProcess{ // id1
instance: "1",
argv: []string{"-home", "h1"},
port: 8081,
apiKey: apiKey,
}
err = proc.start()
if err != nil {
panic(err)
}
// Wait for one scan to succeed, or up to 20 seconds... This is to let
// startup, UPnP etc complete and make sure the sender has the full index
// before they connect.
for i := 0; i < 20; i++ {
resp, err := proc.post("/rest/scan?folder=default", nil)
if err != nil {
time.Sleep(time.Second)
continue
}
if resp.StatusCode != 200 {
resp.Body.Close()
time.Sleep(time.Second)
continue
}
break
}
// This will panic if there is an actual failure to start, when we try to
// call nil.Fatal(...)
return startInstance(nil, 1)
}
func benchmarkURL(b *testing.B, url string) {
initOnce.Do(setupAPIBench)
p := setupAPIBench()
defer p.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
resp, err := proc.get(url)
_, err := p.Get(url)
if err != nil {
b.Fatal(err)
}
if resp.StatusCode != 200 {
b.Fatal(resp.Status)
}
resp.Body.Close()
}
}

Some files were not shown because too many files have changed in this diff Show More