mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-06 21:09:13 -05:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60f760ee49 | ||
|
|
884aaab751 | ||
|
|
e968560ea4 | ||
|
|
07caaa96e4 | ||
|
|
e8a679c280 | ||
|
|
bc885f1d08 | ||
|
|
f2f051d6de | ||
|
|
49a0bfccba | ||
|
|
0c1e60894f | ||
|
|
ace87ad7bb | ||
|
|
50f0097843 | ||
|
|
32a9466277 | ||
|
|
1ee3407946 | ||
|
|
f1120d7aa9 | ||
|
|
2e7d6b2f99 | ||
|
|
dfef929187 | ||
|
|
e78d9ad592 | ||
|
|
9f2948f595 | ||
|
|
198da910ed | ||
|
|
5f1bf9d9d6 | ||
|
|
798c4aef9a | ||
|
|
f80f5b3bda | ||
|
|
cbb07b0d67 | ||
|
|
7cc9921615 | ||
|
|
7555fe065e | ||
|
|
d977f4278e | ||
|
|
870e3ca893 | ||
|
|
213acaee3b | ||
|
|
58381496a2 | ||
|
|
5981e42aed | ||
|
|
3c9165d295 | ||
|
|
60d0ef93ac | ||
|
|
f45d5b0066 | ||
|
|
b71306480f | ||
|
|
dc9df0a79a | ||
|
|
8976e53998 |
@@ -1,6 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- tip
|
||||
|
||||
install:
|
||||
|
||||
4
Godeps/Godeps.json
generated
4
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.3",
|
||||
"GoVersion": "go1.3.1",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
@@ -49,7 +49,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "c9d6b7be1428942d4cf4f54055b991a8513392eb"
|
||||
"Rev": "a44c00531ccc005546f20c6e00ab7bb9a8f6b2e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
|
||||
13
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
13
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
@@ -170,7 +170,7 @@ func (p *dbBench) writes(perBatch int) {
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) drop() {
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
@@ -249,6 +249,7 @@ func (p *dbBench) newIter() iterator.Iterator {
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
p.b.Log(p.db.s.tops.bpool)
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
@@ -331,7 +332,7 @@ func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -362,7 +363,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -379,7 +380,7 @@ func BenchmarkDBReadTable(b *testing.B) {
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -395,7 +396,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -413,7 +414,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
|
||||
21
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
21
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -35,8 +35,8 @@ type DB struct {
|
||||
|
||||
// MemDB.
|
||||
memMu sync.RWMutex
|
||||
mem *memdb.DB
|
||||
frozenMem *memdb.DB
|
||||
memPool *util.Pool
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFile storage.File
|
||||
@@ -79,6 +79,8 @@ func openDB(s *session) (*DB, error) {
|
||||
s: s,
|
||||
// Initial sequence
|
||||
seq: s.stSeq,
|
||||
// MemDB
|
||||
memPool: util.NewPool(1),
|
||||
// Write
|
||||
writeC: make(chan *Batch),
|
||||
writeMergedC: make(chan bool),
|
||||
@@ -257,6 +259,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
var mSeq uint64
|
||||
var good, corrupted int
|
||||
rec := new(sessionRecord)
|
||||
bpool := util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
tmp = s.newTemp()
|
||||
writer, err := tmp.Create()
|
||||
@@ -314,7 +317,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
var tSeq uint64
|
||||
var tgood, tcorrupted, blockerr int
|
||||
var imin, imax []byte
|
||||
tr := table.NewReader(reader, size, nil, o)
|
||||
tr := table.NewReader(reader, size, nil, bpool, o)
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
|
||||
s.logf("table@recovery found error @%d %q", file.Num(), err)
|
||||
@@ -559,19 +562,20 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
|
||||
ikey := newIKey(key, seq, tSeek)
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memdb.DB{em, fm} {
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, mv, me := m.Find(ikey)
|
||||
mk, mv, me := m.db.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, t, ok := parseIkey(mk)
|
||||
if ok && db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if t == tDel {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return mv, nil
|
||||
return append([]byte{}, mv...), nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return nil, me
|
||||
@@ -591,8 +595,9 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
|
||||
// Get gets the value for the given key. It returns ErrNotFound if the
|
||||
// DB does not contain the key.
|
||||
//
|
||||
// The caller should not modify the contents of the returned slice, but
|
||||
// it is safe to modify the contents of the argument after Get returns.
|
||||
// The returned slice is its own copy, it is safe to modify the contents
|
||||
// of the returned slice.
|
||||
// It is safe to modify the contents of the argument after Get returns.
|
||||
func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
err = db.ok()
|
||||
if err != nil {
|
||||
|
||||
7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
7
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
@@ -216,14 +216,15 @@ func (db *DB) memCompaction() {
|
||||
if mem == nil {
|
||||
return
|
||||
}
|
||||
defer mem.decref()
|
||||
|
||||
c := newCMem(db.s)
|
||||
stats := new(cStatsStaging)
|
||||
|
||||
db.logf("mem@flush N·%d S·%s", mem.Len(), shortenb(mem.Size()))
|
||||
db.logf("mem@flush N·%d S·%s", mem.db.Len(), shortenb(mem.db.Size()))
|
||||
|
||||
// Don't compact empty memdb.
|
||||
if mem.Len() == 0 {
|
||||
if mem.db.Len() == 0 {
|
||||
db.logf("mem@flush skipping")
|
||||
// drop frozen mem
|
||||
db.dropFrozenMem()
|
||||
@@ -241,7 +242,7 @@ func (db *DB) memCompaction() {
|
||||
db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
defer stats.stopTimer()
|
||||
return c.flush(mem, -1)
|
||||
return c.flush(mem.db, -1)
|
||||
}, func() error {
|
||||
for _, r := range c.rec.addedTables {
|
||||
db.logf("mem@flush rollback @%d", r.num)
|
||||
|
||||
20
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
20
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@@ -9,6 +9,7 @@ package leveldb
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -19,6 +20,17 @@ var (
|
||||
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
)
|
||||
|
||||
type memdbReleaser struct {
|
||||
once sync.Once
|
||||
m *memDB
|
||||
}
|
||||
|
||||
func (mr *memdbReleaser) Release() {
|
||||
mr.once.Do(func() {
|
||||
mr.m.decref()
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
em, fm := db.getMems()
|
||||
v := db.s.version()
|
||||
@@ -26,9 +38,13 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
i = append(i, em.NewIterator(slice))
|
||||
emi := em.db.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
i = append(i, fm.NewIterator(slice))
|
||||
fmi := fm.db.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
i = append(i, fmi)
|
||||
}
|
||||
i = append(i, ti...)
|
||||
strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
|
||||
|
||||
61
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
61
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
@@ -11,8 +11,27 @@ import (
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
pool *util.Pool
|
||||
db *memdb.DB
|
||||
ref int32
|
||||
}
|
||||
|
||||
func (m *memDB) incref() {
|
||||
atomic.AddInt32(&m.ref, 1)
|
||||
}
|
||||
|
||||
func (m *memDB) decref() {
|
||||
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
||||
m.pool.Put(m)
|
||||
} else if ref < 0 {
|
||||
panic("negative memdb ref")
|
||||
}
|
||||
}
|
||||
|
||||
// Get latest sequence number.
|
||||
func (db *DB) getSeq() uint64 {
|
||||
return atomic.LoadUint64(&db.seq)
|
||||
@@ -25,7 +44,7 @@ func (db *DB) addSeq(delta uint64) {
|
||||
|
||||
// Create new memdb and froze the old one; need external synchronization.
|
||||
// newMem only called synchronously by the writer.
|
||||
func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
num := db.s.allocFileNum()
|
||||
file := db.s.getJournalFile(num)
|
||||
w, err := file.Create()
|
||||
@@ -37,6 +56,10 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
db.memMu.Lock()
|
||||
defer db.memMu.Unlock()
|
||||
|
||||
if db.frozenMem != nil {
|
||||
panic("still has frozen mem")
|
||||
}
|
||||
|
||||
if db.journal == nil {
|
||||
db.journal = journal.NewWriter(w)
|
||||
} else {
|
||||
@@ -47,8 +70,19 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
db.journalWriter = w
|
||||
db.journalFile = file
|
||||
db.frozenMem = db.mem
|
||||
db.mem = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
mem = db.mem
|
||||
mem, ok := db.memPool.Get().(*memDB)
|
||||
if ok && mem.db.Capacity() >= n {
|
||||
mem.db.Reset()
|
||||
mem.incref()
|
||||
} else {
|
||||
mem = &memDB{
|
||||
pool: db.memPool,
|
||||
db: memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)),
|
||||
ref: 1,
|
||||
}
|
||||
}
|
||||
mem.incref()
|
||||
db.mem = mem
|
||||
// The seq only incremented by the writer. And whoever called newMem
|
||||
// should hold write lock, so no need additional synchronization here.
|
||||
db.frozenSeq = db.seq
|
||||
@@ -56,16 +90,27 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
}
|
||||
|
||||
// Get all memdbs.
|
||||
func (db *DB) getMems() (e *memdb.DB, f *memdb.DB) {
|
||||
func (db *DB) getMems() (e, f *memDB) {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
if db.frozenMem != nil {
|
||||
db.frozenMem.incref()
|
||||
}
|
||||
return db.mem, db.frozenMem
|
||||
}
|
||||
|
||||
// Get frozen memdb.
|
||||
func (db *DB) getEffectiveMem() *memdb.DB {
|
||||
func (db *DB) getEffectiveMem() *memDB {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
return db.mem
|
||||
}
|
||||
|
||||
@@ -77,9 +122,12 @@ func (db *DB) hasFrozenMem() bool {
|
||||
}
|
||||
|
||||
// Get frozen memdb.
|
||||
func (db *DB) getFrozenMem() *memdb.DB {
|
||||
func (db *DB) getFrozenMem() *memDB {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.frozenMem != nil {
|
||||
db.frozenMem.incref()
|
||||
}
|
||||
return db.frozenMem
|
||||
}
|
||||
|
||||
@@ -92,6 +140,7 @@ func (db *DB) dropFrozenMem() {
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
|
||||
}
|
||||
db.frozenJournalFile = nil
|
||||
db.frozenMem.decref()
|
||||
db.frozenMem = nil
|
||||
db.memMu.Unlock()
|
||||
}
|
||||
|
||||
37
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
37
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -45,7 +45,7 @@ func (db *DB) jWriter() {
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) rotateMem(n int) (mem *memdb.DB, err error) {
|
||||
func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
// Wait for pending memdb compaction.
|
||||
err = db.compSendIdle(db.mcompCmdC)
|
||||
if err != nil {
|
||||
@@ -63,13 +63,19 @@ func (db *DB) rotateMem(n int) (mem *memdb.DB, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
|
||||
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
delayed := false
|
||||
flush := func() bool {
|
||||
flush := func() (retry bool) {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
mem = db.getEffectiveMem()
|
||||
nn = mem.Free()
|
||||
defer func() {
|
||||
if retry {
|
||||
mem.decref()
|
||||
mem = nil
|
||||
}
|
||||
}()
|
||||
nn = mem.db.Free()
|
||||
switch {
|
||||
case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
|
||||
delayed = true
|
||||
@@ -84,12 +90,17 @@ func (db *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
|
||||
}
|
||||
default:
|
||||
// Allow memdb to grow if it has no entry.
|
||||
if mem.Len() == 0 {
|
||||
if mem.db.Len() == 0 {
|
||||
nn = n
|
||||
return false
|
||||
} else {
|
||||
mem.decref()
|
||||
mem, err = db.rotateMem(n)
|
||||
if err == nil {
|
||||
nn = mem.db.Free()
|
||||
} else {
|
||||
nn = 0
|
||||
}
|
||||
}
|
||||
mem, err = db.rotateMem(n)
|
||||
nn = mem.Free()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -140,6 +151,7 @@ retry:
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer mem.decref()
|
||||
|
||||
// Calculate maximum size of the batch.
|
||||
m := 1 << 20
|
||||
@@ -178,7 +190,7 @@ drain:
|
||||
return
|
||||
case db.journalC <- b:
|
||||
// Write into memdb
|
||||
b.memReplay(mem)
|
||||
b.memReplay(mem.db)
|
||||
}
|
||||
// Wait for journal writer
|
||||
select {
|
||||
@@ -188,7 +200,7 @@ drain:
|
||||
case err = <-db.journalAckC:
|
||||
if err != nil {
|
||||
// Revert memdb if error detected
|
||||
b.revertMemReplay(mem)
|
||||
b.revertMemReplay(mem.db)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -197,7 +209,7 @@ drain:
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b.memReplay(mem)
|
||||
b.memReplay(mem.db)
|
||||
}
|
||||
|
||||
// Set last seq number.
|
||||
@@ -258,7 +270,8 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
|
||||
// Check for overlaps in memdb.
|
||||
mem := db.getEffectiveMem()
|
||||
if isMemOverlaps(db.s.icmp, mem, r.Start, r.Limit) {
|
||||
defer mem.decref()
|
||||
if isMemOverlaps(db.s.icmp, mem.db, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := db.rotateMem(0); err != nil {
|
||||
<-db.writeLockC
|
||||
|
||||
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDBReadConcurrent(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDBReadConcurrent2(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
var dir uint32
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
if atomic.AddUint32(&dir, 1)%2 == 0 {
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
} else {
|
||||
if pb.Next() && iter.Last() {
|
||||
for pb.Next() && iter.Prev() {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
generated
vendored
@@ -110,7 +110,7 @@ type ErrCorrupted struct {
|
||||
}
|
||||
|
||||
func (e ErrCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb/journal: corrupted %d bytes: %s", e.Size, e.Reason)
|
||||
return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
|
||||
}
|
||||
|
||||
// Dropper is the interface that wrap simple Drop method. The Drop
|
||||
|
||||
351
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
generated
vendored
351
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
generated
vendored
@@ -12,6 +12,7 @@ package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -380,6 +381,94 @@ func TestCorrupt_MissingLastBlock(t *testing.T) {
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #0.
|
||||
for i := 0; i < 1024; i++ {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (third record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
@@ -435,7 +524,7 @@ func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read.
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -448,7 +537,7 @@ func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read.
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -458,7 +547,7 @@ func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third read.
|
||||
// Third read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -470,4 +559,260 @@ func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #3.
|
||||
for i := len(b) - 1; i > len(b)-1024; i-- {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize - headerSize); n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Third read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #2: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Fourth read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize/2 + headerSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
11
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
11
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@@ -275,6 +275,7 @@ type tOps struct {
|
||||
s *session
|
||||
cache cache.Cache
|
||||
cacheNS cache.Namespace
|
||||
bpool *util.BufferPool
|
||||
}
|
||||
|
||||
// Creates an empty table and returns table writer.
|
||||
@@ -340,7 +341,7 @@ func (t *tOps) open(f *tFile) (c cache.Object, err error) {
|
||||
}
|
||||
|
||||
ok = true
|
||||
value = table.NewReader(r, int64(f.size), cacheNS, o)
|
||||
value = table.NewReader(r, int64(f.size), cacheNS, t.bpool, o)
|
||||
charge = 1
|
||||
fin = func() {
|
||||
r.Close()
|
||||
@@ -412,8 +413,12 @@ func (t *tOps) close() {
|
||||
// Creates new initialized table ops instance.
|
||||
func newTableOps(s *session, cacheCap int) *tOps {
|
||||
c := cache.NewLRUCache(cacheCap)
|
||||
ns := c.GetNamespace(0)
|
||||
return &tOps{s, c, ns}
|
||||
return &tOps{
|
||||
s: s,
|
||||
cache: c,
|
||||
cacheNS: c.GetNamespace(0),
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
}
|
||||
}
|
||||
|
||||
// tWriter wraps the table writer. It keep track of file descriptor
|
||||
|
||||
81
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
81
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@@ -13,7 +13,6 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"code.google.com/p/snappy-go/snappy"
|
||||
|
||||
@@ -438,18 +437,20 @@ func (i *blockIter) Value() []byte {
|
||||
}
|
||||
|
||||
func (i *blockIter) Release() {
|
||||
i.prevNode = nil
|
||||
i.prevKeys = nil
|
||||
i.key = nil
|
||||
i.value = nil
|
||||
i.dir = dirReleased
|
||||
if i.cache != nil {
|
||||
i.cache.Release()
|
||||
i.cache = nil
|
||||
}
|
||||
if i.releaser != nil {
|
||||
i.releaser.Release()
|
||||
i.releaser = nil
|
||||
if i.dir > dirReleased {
|
||||
i.prevNode = nil
|
||||
i.prevKeys = nil
|
||||
i.key = nil
|
||||
i.value = nil
|
||||
i.dir = dirReleased
|
||||
if i.cache != nil {
|
||||
i.cache.Release()
|
||||
i.cache = nil
|
||||
}
|
||||
if i.releaser != nil {
|
||||
i.releaser.Release()
|
||||
i.releaser = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -520,6 +521,7 @@ type Reader struct {
|
||||
reader io.ReaderAt
|
||||
cache cache.Namespace
|
||||
err error
|
||||
bpool *util.BufferPool
|
||||
// Options
|
||||
cmp comparer.Comparer
|
||||
filter filter.Filter
|
||||
@@ -529,8 +531,6 @@ type Reader struct {
|
||||
dataEnd int64
|
||||
indexBlock *block
|
||||
filterBlock *filterBlock
|
||||
|
||||
blockPool sync.Pool
|
||||
}
|
||||
|
||||
func verifyChecksum(data []byte) bool {
|
||||
@@ -541,13 +541,7 @@ func verifyChecksum(data []byte) bool {
|
||||
}
|
||||
|
||||
func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
|
||||
data, _ := r.blockPool.Get().([]byte) // data is either nil or a valid []byte from the pool
|
||||
if l := bh.length + blockTrailerLen; uint64(len(data)) >= l {
|
||||
data = data[:l]
|
||||
} else {
|
||||
r.blockPool.Put(data)
|
||||
data = make([]byte, l)
|
||||
}
|
||||
data := r.bpool.Get(int(bh.length + blockTrailerLen))
|
||||
if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
@@ -560,14 +554,16 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
|
||||
case blockTypeNoCompression:
|
||||
data = data[:bh.length]
|
||||
case blockTypeSnappyCompression:
|
||||
var err error
|
||||
decData, _ := r.blockPool.Get().([]byte)
|
||||
decData, err = snappy.Decode(decData, data[:bh.length])
|
||||
decLen, err := snappy.DecodedLen(data[:bh.length])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := data
|
||||
data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length])
|
||||
r.bpool.Put(tmp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.blockPool.Put(data[:cap(data)])
|
||||
data = decData
|
||||
default:
|
||||
return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
|
||||
}
|
||||
@@ -614,6 +610,18 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type releaseBlock struct {
|
||||
r *Reader
|
||||
b *block
|
||||
}
|
||||
|
||||
func (r releaseBlock) Release() {
|
||||
if r.b.data != nil {
|
||||
r.r.bpool.Put(r.b.data)
|
||||
r.b.data = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
|
||||
if r.cache != nil {
|
||||
// Get/set block cache.
|
||||
@@ -628,6 +636,10 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
|
||||
ok = true
|
||||
value = dataBlock
|
||||
charge = int(dataBH.length)
|
||||
fin = func() {
|
||||
r.bpool.Put(dataBlock.data)
|
||||
dataBlock.data = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
@@ -650,7 +662,7 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
|
||||
if err != nil {
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
iter := dataBlock.newIterator(slice, false, nil)
|
||||
iter := dataBlock.newIterator(slice, false, releaseBlock{r, dataBlock})
|
||||
return iter
|
||||
}
|
||||
|
||||
@@ -720,8 +732,11 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
|
||||
}
|
||||
return
|
||||
}
|
||||
// Don't use block buffer, no need to copy the buffer.
|
||||
rkey = data.Key()
|
||||
value = data.Value()
|
||||
// Use block buffer, and since the buffer will be recycled, the buffer
|
||||
// need to be copied.
|
||||
value = append([]byte{}, data.Value()...)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -772,13 +787,17 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
|
||||
}
|
||||
|
||||
// NewReader creates a new initialized table reader for the file.
|
||||
// The cache is optional and can be nil.
|
||||
// The cache and bpool is optional and can be nil.
|
||||
//
|
||||
// The returned table reader instance is goroutine-safe.
|
||||
func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options) *Reader {
|
||||
func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader {
|
||||
if bpool == nil {
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen)
|
||||
}
|
||||
r := &Reader{
|
||||
reader: f,
|
||||
cache: cache,
|
||||
bpool: bpool,
|
||||
cmp: o.GetComparer(),
|
||||
checksum: o.GetStrict(opt.StrictBlockChecksum),
|
||||
strictIter: o.GetStrict(opt.StrictIterator),
|
||||
|
||||
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
@@ -59,7 +59,7 @@ var _ = testutil.Defer(func() {
|
||||
It("Should be able to approximate offset of a key correctly", func() {
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
|
||||
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
|
||||
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
|
||||
CheckOffset := func(key string, expect, threshold int) {
|
||||
offset, err := tr.OffsetOf([]byte(key))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
@@ -95,7 +95,7 @@ var _ = testutil.Defer(func() {
|
||||
tw.Close()
|
||||
|
||||
// Opening the table.
|
||||
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
|
||||
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
|
||||
return tableWrapper{tr}
|
||||
}
|
||||
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
|
||||
|
||||
128
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
Normal file
128
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
b []byte
|
||||
miss int
|
||||
}
|
||||
|
||||
// BufferPool is a 'buffer pool'.
|
||||
type BufferPool struct {
|
||||
pool [4]sync.Pool
|
||||
size [3]uint32
|
||||
sizeMiss [3]uint32
|
||||
baseline0 int
|
||||
baseline1 int
|
||||
baseline2 int
|
||||
|
||||
less uint32
|
||||
equal uint32
|
||||
greater uint32
|
||||
miss uint32
|
||||
}
|
||||
|
||||
func (p *BufferPool) poolNum(n int) int {
|
||||
switch {
|
||||
case n <= p.baseline0:
|
||||
return 0
|
||||
case n <= p.baseline1:
|
||||
return 1
|
||||
case n <= p.baseline2:
|
||||
return 2
|
||||
default:
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns buffer with length of n.
|
||||
func (p *BufferPool) Get(n int) []byte {
|
||||
if poolNum := p.poolNum(n); poolNum == 0 {
|
||||
// Fast path.
|
||||
if b, ok := p.pool[0].Get().([]byte); ok {
|
||||
switch {
|
||||
case cap(b) > n:
|
||||
atomic.AddUint32(&p.less, 1)
|
||||
return b[:n]
|
||||
case cap(b) == n:
|
||||
atomic.AddUint32(&p.equal, 1)
|
||||
return b[:n]
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
} else {
|
||||
atomic.AddUint32(&p.miss, 1)
|
||||
}
|
||||
|
||||
return make([]byte, n, p.baseline0)
|
||||
} else {
|
||||
sizePtr := &p.size[poolNum-1]
|
||||
|
||||
if b, ok := p.pool[poolNum].Get().([]byte); ok {
|
||||
switch {
|
||||
case cap(b) > n:
|
||||
atomic.AddUint32(&p.less, 1)
|
||||
return b[:n]
|
||||
case cap(b) == n:
|
||||
atomic.AddUint32(&p.equal, 1)
|
||||
return b[:n]
|
||||
default:
|
||||
atomic.AddUint32(&p.greater, 1)
|
||||
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
|
||||
p.pool[poolNum].Put(b)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
atomic.AddUint32(&p.miss, 1)
|
||||
}
|
||||
|
||||
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
|
||||
if size == 0 {
|
||||
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
|
||||
} else {
|
||||
sizeMissPtr := &p.sizeMiss[poolNum-1]
|
||||
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
|
||||
atomic.StoreUint32(sizePtr, uint32(n))
|
||||
atomic.StoreUint32(sizeMissPtr, 0)
|
||||
}
|
||||
}
|
||||
return make([]byte, n)
|
||||
} else {
|
||||
return make([]byte, n, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put adds given buffer to the pool.
|
||||
func (p *BufferPool) Put(b []byte) {
|
||||
p.pool[p.poolNum(cap(b))].Put(b)
|
||||
}
|
||||
|
||||
func (p *BufferPool) String() string {
|
||||
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v L·%d E·%d G·%d M·%d}",
|
||||
p.baseline0, p.size, p.sizeMiss, p.less, p.equal, p.greater, p.miss)
|
||||
}
|
||||
|
||||
// NewBufferPool creates a new initialized 'buffer pool'.
|
||||
func NewBufferPool(baseline int) *BufferPool {
|
||||
if baseline <= 0 {
|
||||
panic("baseline can't be <= 0")
|
||||
}
|
||||
return &BufferPool{
|
||||
baseline0: baseline,
|
||||
baseline1: baseline * 2,
|
||||
baseline2: baseline * 4,
|
||||
}
|
||||
}
|
||||
143
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool_legacy.go
generated
vendored
Normal file
143
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.3
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
b []byte
|
||||
miss int
|
||||
}
|
||||
|
||||
// BufferPool is a 'buffer pool'.
|
||||
type BufferPool struct {
|
||||
pool [4]chan []byte
|
||||
size [3]uint32
|
||||
sizeMiss [3]uint32
|
||||
baseline0 int
|
||||
baseline1 int
|
||||
baseline2 int
|
||||
|
||||
less uint32
|
||||
equal uint32
|
||||
greater uint32
|
||||
miss uint32
|
||||
}
|
||||
|
||||
func (p *BufferPool) poolNum(n int) int {
|
||||
switch {
|
||||
case n <= p.baseline0:
|
||||
return 0
|
||||
case n <= p.baseline1:
|
||||
return 1
|
||||
case n <= p.baseline2:
|
||||
return 2
|
||||
default:
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns buffer with length of n.
|
||||
func (p *BufferPool) Get(n int) []byte {
|
||||
poolNum := p.poolNum(n)
|
||||
pool := p.pool[poolNum]
|
||||
if poolNum == 0 {
|
||||
// Fast path.
|
||||
select {
|
||||
case b := <-pool:
|
||||
switch {
|
||||
case cap(b) > n:
|
||||
atomic.AddUint32(&p.less, 1)
|
||||
return b[:n]
|
||||
case cap(b) == n:
|
||||
atomic.AddUint32(&p.equal, 1)
|
||||
return b[:n]
|
||||
default:
|
||||
panic("not reached")
|
||||
}
|
||||
default:
|
||||
atomic.AddUint32(&p.miss, 1)
|
||||
}
|
||||
|
||||
return make([]byte, n, p.baseline0)
|
||||
} else {
|
||||
sizePtr := &p.size[poolNum-1]
|
||||
|
||||
select {
|
||||
case b := <-pool:
|
||||
switch {
|
||||
case cap(b) > n:
|
||||
atomic.AddUint32(&p.less, 1)
|
||||
return b[:n]
|
||||
case cap(b) == n:
|
||||
atomic.AddUint32(&p.equal, 1)
|
||||
return b[:n]
|
||||
default:
|
||||
atomic.AddUint32(&p.greater, 1)
|
||||
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
|
||||
select {
|
||||
case pool <- b:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
atomic.AddUint32(&p.miss, 1)
|
||||
}
|
||||
|
||||
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
|
||||
if size == 0 {
|
||||
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
|
||||
} else {
|
||||
sizeMissPtr := &p.sizeMiss[poolNum-1]
|
||||
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
|
||||
atomic.StoreUint32(sizePtr, uint32(n))
|
||||
atomic.StoreUint32(sizeMissPtr, 0)
|
||||
}
|
||||
}
|
||||
return make([]byte, n)
|
||||
} else {
|
||||
return make([]byte, n, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put adds given buffer to the pool.
|
||||
func (p *BufferPool) Put(b []byte) {
|
||||
pool := p.pool[p.poolNum(cap(b))]
|
||||
select {
|
||||
case pool <- b:
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *BufferPool) String() string {
|
||||
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v L·%d E·%d G·%d M·%d}",
|
||||
p.baseline0, p.size, p.sizeMiss, p.less, p.equal, p.greater, p.miss)
|
||||
}
|
||||
|
||||
// NewBufferPool creates a new initialized 'buffer pool'.
|
||||
func NewBufferPool(baseline int) *BufferPool {
|
||||
if baseline <= 0 {
|
||||
panic("baseline can't be <= 0")
|
||||
}
|
||||
p := &BufferPool{
|
||||
baseline0: baseline,
|
||||
baseline1: baseline * 2,
|
||||
baseline2: baseline * 4,
|
||||
}
|
||||
for i, cap := range []int{6, 6, 3, 1} {
|
||||
p.pool[i] = make(chan []byte, cap)
|
||||
}
|
||||
return p
|
||||
}
|
||||
21
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Pool struct {
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
func NewPool(cap int) *Pool {
|
||||
return &Pool{}
|
||||
}
|
||||
33
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.3
|
||||
|
||||
package util
|
||||
|
||||
type Pool struct {
|
||||
pool chan interface{}
|
||||
}
|
||||
|
||||
func (p *Pool) Get() interface{} {
|
||||
select {
|
||||
case x := <-p.pool:
|
||||
return x
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Put(x interface{}) {
|
||||
select {
|
||||
case p.pool <- x:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func NewPool(cap int) *Pool {
|
||||
return &Pool{pool: make(chan interface{}, cap)}
|
||||
}
|
||||
14
README.md
14
README.md
@@ -1,10 +1,11 @@
|
||||
syncthing
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/syncthing/syncthing)
|
||||
[](https://coveralls.io/r/syncthing/syncthing?branch=master)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
[](http://build.syncthing.net/job/syncthing/lastSuccessfulBuild/artifact/)
|
||||
[](https://travis-ci.org/syncthing/syncthing)
|
||||
[](https://coveralls.io/r/syncthing/syncthing?branch=master)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is the `syncthing` project. The following are the project goals:
|
||||
|
||||
@@ -32,8 +33,9 @@ Signed Releases
|
||||
---------------
|
||||
|
||||
As of v0.7.0 and onwards, git tags and release binaries are GPG signed with
|
||||
the key BCE524C7 (http://nym.se/gpg.txt). The signature is included in the
|
||||
normal release bundle as `syncthing.asc` or `syncthing.exe.asc`.
|
||||
the key BCE524C7 (http://nym.se/gpg.txt). For release binaries, MD5 and
|
||||
SHA1 checksums are calculated and signed, available in the
|
||||
md5sum.txt.asc and sha1sum.txt.asc files.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
File diff suppressed because one or more lines are too long
38
build.sh
38
build.sh
@@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
export COPYFILE_DISABLE=true
|
||||
export GO386=387 # Don't use SSE on 32 bit builds
|
||||
@@ -54,22 +56,11 @@ test() {
|
||||
godep go test -cpu=1,2,4 $* ./...
|
||||
}
|
||||
|
||||
sign() {
|
||||
if git describe --exact-match 2>/dev/null >/dev/null ; then
|
||||
# HEAD is a tag
|
||||
id=BCE524C7
|
||||
if gpg --list-keys "$id" >/dev/null 2>&1 ; then
|
||||
gpg -ab -u "$id" "$1"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
tarDist() {
|
||||
name="$1"
|
||||
rm -rf "$name"
|
||||
mkdir -p "$name"
|
||||
cp syncthing "${distFiles[@]}" "$name"
|
||||
sign "$name/syncthing"
|
||||
tar zcvf "$name.tar.gz" "$name"
|
||||
rm -rf "$name"
|
||||
}
|
||||
@@ -82,7 +73,6 @@ zipDist() {
|
||||
GOARCH="" GOOS="" go run cmd/todos/main.go < "$f" > "$name/$f.txt"
|
||||
done
|
||||
cp syncthing.exe "$name"
|
||||
sign "$name/syncthing.exe"
|
||||
zip -r "$name.zip" "$name"
|
||||
rm -rf "$name"
|
||||
}
|
||||
@@ -121,11 +111,11 @@ transifex() {
|
||||
|
||||
build-all() {
|
||||
rm -f *.tar.gz *.zip
|
||||
test -short || exit 1
|
||||
test -short
|
||||
assets
|
||||
|
||||
rm -rf bin Godeps/_workspace/pkg $GOPATH/pkg/*/github.com/syncthing
|
||||
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 solaris-amd64 ; do
|
||||
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 ; do
|
||||
export GOOS=${os%-*}
|
||||
export GOARCH=${os#*-}
|
||||
|
||||
@@ -165,9 +155,11 @@ build-all() {
|
||||
tarDist "syncthing-linux-armv5-$version"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
"")
|
||||
shift
|
||||
case "${1:-default}" in
|
||||
default)
|
||||
if [[ $# -gt 1 ]] ; then
|
||||
shift
|
||||
fi
|
||||
export GOBIN=$(pwd)/bin
|
||||
godep go install $* -ldflags "$ldflags" ./cmd/...
|
||||
;;
|
||||
@@ -200,7 +192,7 @@ case "$1" in
|
||||
|
||||
tar)
|
||||
rm -f *.tar.gz *.zip
|
||||
test -short || exit 1
|
||||
test -short
|
||||
assets
|
||||
build
|
||||
|
||||
@@ -220,14 +212,6 @@ case "$1" in
|
||||
build-all -tags noupgrade
|
||||
;;
|
||||
|
||||
upload)
|
||||
tag=$(git describe)
|
||||
shopt -s nullglob
|
||||
for f in *.tar.gz *.zip *.asc ; do
|
||||
relup syncthing/syncthing "$tag" "$f"
|
||||
done
|
||||
;;
|
||||
|
||||
deps)
|
||||
deps
|
||||
;;
|
||||
@@ -253,6 +237,6 @@ case "$1" in
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build parameter $1"
|
||||
echo "Unknown build command $1"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -503,9 +503,8 @@ func restGetLang(w http.ResponseWriter, r *http.Request) {
|
||||
lang := r.Header.Get("Accept-Language")
|
||||
var langs []string
|
||||
for _, l := range strings.Split(lang, ",") {
|
||||
if len(l) >= 2 {
|
||||
langs = append(langs, l[:2])
|
||||
}
|
||||
parts := strings.SplitN(l, ";", 2)
|
||||
langs = append(langs, strings.TrimSpace(parts[0]))
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(langs)
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build heapprof
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -16,7 +14,9 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
go saveHeapProfiles()
|
||||
if os.Getenv("STHEAPPROFILE") != "" {
|
||||
go saveHeapProfiles()
|
||||
}
|
||||
}
|
||||
|
||||
func saveHeapProfiles() {
|
||||
|
||||
@@ -73,15 +73,17 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
cfg config.Configuration
|
||||
myID protocol.NodeID
|
||||
confDir string
|
||||
logFlags int = log.Ltime
|
||||
rateBucket *ratelimit.Bucket
|
||||
stop = make(chan bool)
|
||||
discoverer *discover.Discoverer
|
||||
lockConn *net.TCPListener
|
||||
lockPort int
|
||||
cfg config.Configuration
|
||||
myID protocol.NodeID
|
||||
confDir string
|
||||
logFlags int = log.Ltime
|
||||
rateBucket *ratelimit.Bucket
|
||||
stop = make(chan bool)
|
||||
discoverer *discover.Discoverer
|
||||
lockConn *net.TCPListener
|
||||
lockPort int
|
||||
externalPort int
|
||||
cert tls.Certificate
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -104,9 +106,6 @@ The following enviroment variables are interpreted by syncthing:
|
||||
Set this variable when running under a service manager such as
|
||||
runit, launchd, etc.
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings:
|
||||
- "beacon" (the beacon package)
|
||||
@@ -120,10 +119,19 @@ The following enviroment variables are interpreted by syncthing:
|
||||
- "xdr" (the xdr package)
|
||||
- "all" (all of the above)
|
||||
|
||||
STCPUPROFILE Write CPU profile to the specified file.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
|
||||
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
|
||||
heap usage increases.
|
||||
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
|
||||
STDEADLOCKTIMEOUT Alter deadlock detection timeout (seconds; default 1200).`
|
||||
)
|
||||
|
||||
@@ -248,7 +256,7 @@ func main() {
|
||||
// Ensure that our home directory exists and that we have a certificate and key.
|
||||
|
||||
ensureDir(confDir, 0700)
|
||||
cert, err := loadCert(confDir, "")
|
||||
cert, err = loadCert(confDir, "")
|
||||
if err != nil {
|
||||
newCertificate(confDir, "")
|
||||
cert, err = loadCert(confDir, "")
|
||||
@@ -266,6 +274,8 @@ func main() {
|
||||
cfgFile := filepath.Join(confDir, "config.xml")
|
||||
go saveConfigLoop(cfgFile)
|
||||
|
||||
var myName string
|
||||
|
||||
// Load the configuration file, if it exists.
|
||||
// If it does not, create a template.
|
||||
|
||||
@@ -277,11 +287,16 @@ func main() {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
cf.Close()
|
||||
myCfg := cfg.GetNodeConfiguration(myID)
|
||||
if myCfg == nil || myCfg.Name == "" {
|
||||
myName, _ = os.Hostname()
|
||||
} else {
|
||||
myName = myCfg.Name
|
||||
}
|
||||
} else {
|
||||
l.Infoln("No config file; starting with empty defaults")
|
||||
name, _ := os.Hostname()
|
||||
myName, _ = os.Hostname()
|
||||
defaultRepo := filepath.Join(getHomeDir(), "Sync")
|
||||
ensureDir(defaultRepo, 0755)
|
||||
|
||||
cfg, err = config.Load(nil, myID)
|
||||
cfg.Repositories = []config.RepositoryConfiguration{
|
||||
@@ -295,7 +310,7 @@ func main() {
|
||||
{
|
||||
NodeID: myID,
|
||||
Addresses: []string{"dynamic"},
|
||||
Name: name,
|
||||
Name: myName,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -358,7 +373,7 @@ func main() {
|
||||
if err != nil {
|
||||
l.Fatalln("leveldb.OpenFile():", err)
|
||||
}
|
||||
m := model.NewModel(confDir, &cfg, "syncthing", Version, db)
|
||||
m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
|
||||
|
||||
nextRepo:
|
||||
for i, repo := range cfg.Repositories {
|
||||
@@ -470,11 +485,8 @@ nextRepo:
|
||||
|
||||
// UPnP
|
||||
|
||||
var externalPort = 0
|
||||
if cfg.Options.UPnPEnabled {
|
||||
// We seed the random number generator with the node ID to get a
|
||||
// repeatable sequence of random external ports.
|
||||
externalPort = setupUPnP(rand.NewSource(certSeed(cert.Certificate[0])))
|
||||
setupUPnP()
|
||||
}
|
||||
|
||||
// Routine to connect out to configured nodes
|
||||
@@ -498,7 +510,7 @@ nextRepo:
|
||||
}
|
||||
|
||||
if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
|
||||
f, err := os.Create(cpuprof)
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -561,8 +573,7 @@ func waitForParentExit() {
|
||||
l.Infoln("Continuing")
|
||||
}
|
||||
|
||||
func setupUPnP(r rand.Source) int {
|
||||
var externalPort = 0
|
||||
func setupUPnP() {
|
||||
if len(cfg.Options.ListenAddress) == 1 {
|
||||
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
|
||||
if err != nil {
|
||||
@@ -572,17 +583,11 @@ func setupUPnP(r rand.Source) int {
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
igd, err := upnp.Discover()
|
||||
if err == nil {
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + int(r.Int63()%(65535-1024))
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
|
||||
if err == nil {
|
||||
externalPort = r
|
||||
l.Infoln("Created UPnP port mapping - external port", externalPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
externalPort = setupExternalPort(igd, port)
|
||||
if externalPort == 0 {
|
||||
l.Warnln("Failed to create UPnP port mapping")
|
||||
} else {
|
||||
l.Infoln("Created UPnP port mapping - external port", externalPort)
|
||||
}
|
||||
} else {
|
||||
l.Infof("No UPnP gateway detected")
|
||||
@@ -590,11 +595,57 @@ func setupUPnP(r rand.Source) int {
|
||||
l.Debugf("UPnP: %v", err)
|
||||
}
|
||||
}
|
||||
if cfg.Options.UPnPRenewal > 0 {
|
||||
go renewUPnP(port)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
l.Warnln("Multiple listening addresses; not attempting UPnP port mapping")
|
||||
}
|
||||
return externalPort
|
||||
}
|
||||
|
||||
func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||
// We seed the random number generator with the node ID to get a
|
||||
// repeatable sequence of random external ports.
|
||||
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + int(rnd.Int63()%(65535-1024))
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", cfg.Options.UPnPLease*60)
|
||||
if err == nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func renewUPnP(port int) {
|
||||
for {
|
||||
time.Sleep(time.Duration(cfg.Options.UPnPRenewal) * time.Minute)
|
||||
|
||||
igd, err := upnp.Discover()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Just renew the same port that we already have
|
||||
err = igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", cfg.Options.UPnPLease*60)
|
||||
if err == nil {
|
||||
l.Infoln("Renewed UPnP port mapping - external port", externalPort)
|
||||
continue
|
||||
}
|
||||
|
||||
// Something strange has happened. Perhaps the gateway has changed?
|
||||
// Retry the same port sequence from the beginning.
|
||||
r := setupExternalPort(igd, port)
|
||||
if r != 0 {
|
||||
externalPort = r
|
||||
l.Infoln("Updated UPnP port mapping - external port", externalPort)
|
||||
discoverer.StopGlobal()
|
||||
discoverer.StartGlobal(cfg.Options.GlobalAnnServer, uint16(r))
|
||||
continue
|
||||
}
|
||||
l.Warnln("Failed to update UPnP port mapping - external port", externalPort)
|
||||
}
|
||||
}
|
||||
|
||||
func resetRepositories() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build perfstats
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,7 +15,9 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
if os.Getenv("STPERFSTATS") != "" {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
}
|
||||
}
|
||||
|
||||
func savePerfStats(file string) {
|
||||
@@ -51,21 +51,21 @@ func main() {
|
||||
|
||||
var langs []string
|
||||
for code, stat := range stats {
|
||||
shortCode := code[:2]
|
||||
if !curValidLangs[shortCode] {
|
||||
code = strings.Replace(code, "_", "-", 1)
|
||||
if !curValidLangs[code] {
|
||||
if pct := 100 * stat.Translated / (stat.Translated + stat.Untranslated); pct < 95 {
|
||||
log.Printf("Skipping language %q (too low completion ratio %d%%)", shortCode, pct)
|
||||
os.Remove("lang-" + shortCode + ".json")
|
||||
log.Printf("Skipping language %q (too low completion ratio %d%%)", code, pct)
|
||||
os.Remove("lang-" + code + ".json")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
langs = append(langs, shortCode)
|
||||
if shortCode == "en" {
|
||||
langs = append(langs, code)
|
||||
if code == "en" {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Updating language %q", shortCode)
|
||||
log.Printf("Updating language %q", code)
|
||||
|
||||
resp := req("https://www.transifex.com/api/2/project/syncthing/resource/gui/translation/" + code)
|
||||
var t translation
|
||||
@@ -75,7 +75,7 @@ func main() {
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
fd, err := os.Create("lang-" + shortCode + ".json")
|
||||
fd, err := os.Create("lang-" + code + ".json")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func loadValidLangs() []string {
|
||||
}
|
||||
|
||||
var langs []string
|
||||
exp := regexp.MustCompile(`\[([a-z",]+)\]`)
|
||||
exp := regexp.MustCompile(`\[([a-zA-Z",-]+)\]`)
|
||||
if matches := exp.FindSubmatch(bs); len(matches) == 2 {
|
||||
langs = strings.Split(string(matches[1]), ",")
|
||||
for i := range langs {
|
||||
|
||||
@@ -112,6 +112,8 @@ type OptionsConfiguration struct {
|
||||
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
|
||||
StartBrowser bool `xml:"startBrowser" default:"true"`
|
||||
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
|
||||
UPnPLease int `xml:"upnpLeaseMinutes" default:"0"`
|
||||
UPnPRenewal int `xml:"upnpRenewalMinutes" default:"30"`
|
||||
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
|
||||
|
||||
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
|
||||
@@ -138,6 +140,15 @@ func (cfg *Configuration) NodeMap() map[protocol.NodeID]NodeConfiguration {
|
||||
return m
|
||||
}
|
||||
|
||||
func (cfg *Configuration) GetNodeConfiguration(nodeid protocol.NodeID) *NodeConfiguration {
|
||||
for i, node := range cfg.Nodes {
|
||||
if node.NodeID == nodeid {
|
||||
return &cfg.Nodes[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
|
||||
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
|
||||
for _, r := range cfg.Repositories {
|
||||
|
||||
@@ -36,6 +36,8 @@ func TestDefaultValues(t *testing.T) {
|
||||
ReconnectIntervalS: 60,
|
||||
StartBrowser: true,
|
||||
UPnPEnabled: true,
|
||||
UPnPLease: 0,
|
||||
UPnPRenewal: 30,
|
||||
}
|
||||
|
||||
cfg, err := Load(bytes.NewReader(nil), node1)
|
||||
@@ -190,6 +192,8 @@ func TestOverriddenValues(t *testing.T) {
|
||||
<reconnectionIntervalS>6000</reconnectionIntervalS>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<upnpLeaseMinutes>60</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>15</upnpRenewalMinutes>
|
||||
</options>
|
||||
</configuration>
|
||||
`)
|
||||
@@ -206,6 +210,8 @@ func TestOverriddenValues(t *testing.T) {
|
||||
ReconnectIntervalS: 6000,
|
||||
StartBrowser: false,
|
||||
UPnPEnabled: false,
|
||||
UPnPLease: 60,
|
||||
UPnPRenewal: 15,
|
||||
}
|
||||
|
||||
cfg, err := Load(bytes.NewReader(data), node1)
|
||||
|
||||
@@ -24,15 +24,25 @@ type Discoverer struct {
|
||||
listenAddrs []string
|
||||
localBcastIntv time.Duration
|
||||
globalBcastIntv time.Duration
|
||||
errorRetryIntv time.Duration
|
||||
cacheLifetime time.Duration
|
||||
beacon *beacon.Beacon
|
||||
registry map[protocol.NodeID][]string
|
||||
registry map[protocol.NodeID][]cacheEntry
|
||||
registryLock sync.RWMutex
|
||||
extServer string
|
||||
extPort uint16
|
||||
localBcastTick <-chan time.Time
|
||||
stopGlobal chan struct{}
|
||||
globalWG sync.WaitGroup
|
||||
forcedBcastTick chan time.Time
|
||||
extAnnounceOK bool
|
||||
extAnnounceOKmut sync.Mutex
|
||||
globalBcastStop chan bool
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
addr string
|
||||
seen time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -54,8 +64,10 @@ func NewDiscoverer(id protocol.NodeID, addresses []string, localPort int) (*Disc
|
||||
listenAddrs: addresses,
|
||||
localBcastIntv: 30 * time.Second,
|
||||
globalBcastIntv: 1800 * time.Second,
|
||||
errorRetryIntv: 60 * time.Second,
|
||||
cacheLifetime: 5 * time.Minute,
|
||||
beacon: b,
|
||||
registry: make(map[protocol.NodeID][]string),
|
||||
registry: make(map[protocol.NodeID][]cacheEntry),
|
||||
}
|
||||
|
||||
go disc.recvAnnouncements()
|
||||
@@ -70,11 +82,20 @@ func (d *Discoverer) StartLocal() {
|
||||
}
|
||||
|
||||
func (d *Discoverer) StartGlobal(server string, extPort uint16) {
|
||||
// Wait for any previous announcer to stop before starting a new one.
|
||||
d.globalWG.Wait()
|
||||
d.extServer = server
|
||||
d.extPort = extPort
|
||||
d.stopGlobal = make(chan struct{})
|
||||
d.globalWG.Add(1)
|
||||
go d.sendExternalAnnouncements()
|
||||
}
|
||||
|
||||
func (d *Discoverer) StopGlobal() {
|
||||
close(d.stopGlobal)
|
||||
d.globalWG.Wait()
|
||||
}
|
||||
|
||||
func (d *Discoverer) ExtAnnounceOK() bool {
|
||||
d.extAnnounceOKmut.Lock()
|
||||
defer d.extAnnounceOKmut.Unlock()
|
||||
@@ -83,14 +104,28 @@ func (d *Discoverer) ExtAnnounceOK() bool {
|
||||
|
||||
func (d *Discoverer) Lookup(node protocol.NodeID) []string {
|
||||
d.registryLock.Lock()
|
||||
addr, ok := d.registry[node]
|
||||
cached := d.filterCached(d.registry[node])
|
||||
d.registryLock.Unlock()
|
||||
|
||||
if ok {
|
||||
return addr
|
||||
if len(cached) > 0 {
|
||||
addrs := make([]string, len(cached))
|
||||
for i := range cached {
|
||||
addrs[i] = cached[i].addr
|
||||
}
|
||||
return addrs
|
||||
} else if len(d.extServer) != 0 {
|
||||
// We might want to cache this, but not permanently so it needs some intelligence
|
||||
return d.externalLookup(node)
|
||||
addrs := d.externalLookup(node)
|
||||
cached = make([]cacheEntry, len(addrs))
|
||||
for i := range addrs {
|
||||
cached[i] = cacheEntry{
|
||||
addr: addrs[i],
|
||||
seen: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
d.registryLock.Lock()
|
||||
d.registry[node] = cached
|
||||
d.registryLock.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -105,11 +140,11 @@ func (d *Discoverer) Hint(node string, addrs []string) {
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Discoverer) All() map[protocol.NodeID][]string {
|
||||
func (d *Discoverer) All() map[protocol.NodeID][]cacheEntry {
|
||||
d.registryLock.RLock()
|
||||
nodes := make(map[protocol.NodeID][]string, len(d.registry))
|
||||
nodes := make(map[protocol.NodeID][]cacheEntry, len(d.registry))
|
||||
for node, addrs := range d.registry {
|
||||
addrsCopy := make([]string, len(addrs))
|
||||
addrsCopy := make([]cacheEntry, len(addrs))
|
||||
copy(addrsCopy, addrs)
|
||||
nodes[node] = addrsCopy
|
||||
}
|
||||
@@ -149,21 +184,10 @@ func (d *Discoverer) sendLocalAnnouncements() {
|
||||
Magic: AnnouncementMagic,
|
||||
This: Node{d.myID[:], addrs},
|
||||
}
|
||||
msg := pkt.MarshalXDR()
|
||||
|
||||
for {
|
||||
pkt.Extra = nil
|
||||
d.registryLock.RLock()
|
||||
for node, addrs := range d.registry {
|
||||
if len(pkt.Extra) == 16 {
|
||||
break
|
||||
}
|
||||
|
||||
anode := Node{node[:], resolveAddrs(addrs)}
|
||||
pkt.Extra = append(pkt.Extra, anode)
|
||||
}
|
||||
d.registryLock.RUnlock()
|
||||
|
||||
d.beacon.Send(pkt.MarshalXDR())
|
||||
d.beacon.Send(msg)
|
||||
|
||||
select {
|
||||
case <-d.localBcastTick:
|
||||
@@ -173,20 +197,19 @@ func (d *Discoverer) sendLocalAnnouncements() {
|
||||
}
|
||||
|
||||
func (d *Discoverer) sendExternalAnnouncements() {
|
||||
// this should go in the Discoverer struct
|
||||
errorRetryIntv := 60 * time.Second
|
||||
defer d.globalWG.Done()
|
||||
|
||||
remote, err := net.ResolveUDPAddr("udp", d.extServer)
|
||||
for err != nil {
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
|
||||
time.Sleep(errorRetryIntv)
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
|
||||
time.Sleep(d.errorRetryIntv)
|
||||
remote, err = net.ResolveUDPAddr("udp", d.extServer)
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", nil)
|
||||
for err != nil {
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
|
||||
time.Sleep(errorRetryIntv)
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
|
||||
time.Sleep(d.errorRetryIntv)
|
||||
conn, err = net.ListenUDP("udp", nil)
|
||||
}
|
||||
|
||||
@@ -201,7 +224,10 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
buf = d.announcementPkt()
|
||||
}
|
||||
|
||||
for {
|
||||
var bcastTick = time.Tick(d.globalBcastIntv)
|
||||
var errTick <-chan time.Time
|
||||
|
||||
sendOneAnnouncement := func() {
|
||||
var ok bool
|
||||
|
||||
if debug {
|
||||
@@ -230,11 +256,32 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
d.extAnnounceOKmut.Unlock()
|
||||
|
||||
if ok {
|
||||
time.Sleep(d.globalBcastIntv)
|
||||
} else {
|
||||
time.Sleep(errorRetryIntv)
|
||||
errTick = nil
|
||||
} else if errTick != nil {
|
||||
errTick = time.Tick(d.errorRetryIntv)
|
||||
}
|
||||
}
|
||||
|
||||
// Announce once, immediately
|
||||
sendOneAnnouncement()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-d.stopGlobal:
|
||||
break loop
|
||||
|
||||
case <-errTick:
|
||||
sendOneAnnouncement()
|
||||
|
||||
case <-bcastTick:
|
||||
sendOneAnnouncement()
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("discover: stopping global")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Discoverer) recvAnnouncements() {
|
||||
@@ -242,7 +289,7 @@ func (d *Discoverer) recvAnnouncements() {
|
||||
buf, addr := d.beacon.Recv()
|
||||
|
||||
if debug {
|
||||
l.Debugf("discover: read announcement:\n%s", hex.Dump(buf))
|
||||
l.Debugf("discover: read announcement from %s:\n%s", addr, hex.Dump(buf))
|
||||
}
|
||||
|
||||
var pkt Announce
|
||||
@@ -251,20 +298,9 @@ func (d *Discoverer) recvAnnouncements() {
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("discover: parsed announcement: %#v", pkt)
|
||||
}
|
||||
|
||||
var newNode bool
|
||||
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
|
||||
newNode = d.registerNode(addr, pkt.This)
|
||||
for _, node := range pkt.Extra {
|
||||
if bytes.Compare(node.ID, d.myID[:]) != 0 {
|
||||
if d.registerNode(nil, node) {
|
||||
newNode = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newNode {
|
||||
@@ -276,41 +312,57 @@ func (d *Discoverer) recvAnnouncements() {
|
||||
}
|
||||
|
||||
func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
|
||||
var addrs []string
|
||||
var id protocol.NodeID
|
||||
copy(id[:], node.ID)
|
||||
|
||||
d.registryLock.RLock()
|
||||
current := d.filterCached(d.registry[id])
|
||||
d.registryLock.RUnlock()
|
||||
|
||||
orig := current
|
||||
|
||||
for _, a := range node.Addresses {
|
||||
var nodeAddr string
|
||||
if len(a.IP) > 0 {
|
||||
nodeAddr = fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
|
||||
addrs = append(addrs, nodeAddr)
|
||||
} else if addr != nil {
|
||||
ua := addr.(*net.UDPAddr)
|
||||
ua.Port = int(a.Port)
|
||||
nodeAddr = ua.String()
|
||||
addrs = append(addrs, nodeAddr)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
if debug {
|
||||
l.Debugln("discover: no valid address for", node.ID)
|
||||
for i := range current {
|
||||
if current[i].addr == nodeAddr {
|
||||
current[i].seen = time.Now()
|
||||
goto done
|
||||
}
|
||||
}
|
||||
current = append(current, cacheEntry{
|
||||
addr: nodeAddr,
|
||||
seen: time.Now(),
|
||||
})
|
||||
done:
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("discover: register: %s -> %#v", node.ID, addrs)
|
||||
l.Debugf("discover: register: %v -> %v", id, current)
|
||||
}
|
||||
var id protocol.NodeID
|
||||
copy(id[:], node.ID)
|
||||
|
||||
d.registryLock.Lock()
|
||||
_, seen := d.registry[id]
|
||||
d.registry[id] = addrs
|
||||
d.registry[id] = current
|
||||
d.registryLock.Unlock()
|
||||
|
||||
if !seen {
|
||||
if len(current) > len(orig) {
|
||||
addrs := make([]string, len(current))
|
||||
for i := range current {
|
||||
addrs[i] = current[i].addr
|
||||
}
|
||||
events.Default.Log(events.NodeDiscovered, map[string]interface{}{
|
||||
"node": id.String(),
|
||||
"addrs": addrs,
|
||||
})
|
||||
}
|
||||
return !seen
|
||||
|
||||
return len(current) > len(orig)
|
||||
}
|
||||
|
||||
func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
||||
@@ -374,10 +426,6 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("discover: parsed external: %#v", pkt)
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
for _, a := range pkt.This.Addresses {
|
||||
nodeAddr := fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
|
||||
@@ -386,6 +434,21 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (d *Discoverer) filterCached(c []cacheEntry) []cacheEntry {
|
||||
for i := 0; i < len(c); {
|
||||
if ago := time.Since(c[i].seen); ago > d.cacheLifetime {
|
||||
if debug {
|
||||
l.Debugf("removing cached address %s: seen %v ago", c[i].addr, ago)
|
||||
}
|
||||
c[i] = c[len(c)-1]
|
||||
c = c[:len(c)-1]
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func addrToAddr(addr *net.TCPAddr) Address {
|
||||
if len(addr.IP) == 0 || addr.IP.IsUnspecified() {
|
||||
return Address{Port: uint16(addr.Port)}
|
||||
|
||||
11
files/filenames_darwin.go
Normal file
11
files/filenames_darwin.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package files
|
||||
|
||||
import "code.google.com/p/go.text/unicode/norm"
|
||||
|
||||
func normalizedFilename(s string) string {
|
||||
return norm.NFC.String(s)
|
||||
}
|
||||
|
||||
func nativeFilename(s string) string {
|
||||
return norm.NFD.String(s)
|
||||
}
|
||||
13
files/filenames_unix.go
Normal file
13
files/filenames_unix.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build !windows,!darwin
|
||||
|
||||
package files
|
||||
|
||||
import "code.google.com/p/go.text/unicode/norm"
|
||||
|
||||
func normalizedFilename(s string) string {
|
||||
return norm.NFC.String(s)
|
||||
}
|
||||
|
||||
func nativeFilename(s string) string {
|
||||
return s
|
||||
}
|
||||
15
files/filenames_windows.go
Normal file
15
files/filenames_windows.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
)
|
||||
|
||||
func normalizedFilename(s string) string {
|
||||
return norm.NFC.String(filepath.ToSlash(s))
|
||||
}
|
||||
|
||||
func nativeFilename(s string) string {
|
||||
return filepath.FromSlash(s)
|
||||
}
|
||||
53
files/set.go
53
files/set.go
@@ -2,7 +2,12 @@
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package files provides a set type to track local/remote files with newness checks.
|
||||
// Package files provides a set type to track local/remote files with newness
|
||||
// checks. We must do a certain amount of normalization in here. We will get
|
||||
// fed paths with either native or wire-format separators and encodings
|
||||
// depending on who calls us. We transform paths to wire-format (NFC and
|
||||
// slashes) on the way to the database, and transform to native format
|
||||
// (varying separator and encoding) on the way back out.
|
||||
package files
|
||||
|
||||
import (
|
||||
@@ -56,6 +61,7 @@ func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
|
||||
@@ -65,6 +71,7 @@ func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if lv := ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
|
||||
@@ -76,6 +83,7 @@ func (s *Set) Update(node protocol.NodeID, fs []protocol.FileInfo) {
|
||||
if debug {
|
||||
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if lv := ldbUpdate(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
|
||||
@@ -87,54 +95,58 @@ func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithNeed(%v)", s.repo, node)
|
||||
}
|
||||
ldbWithNeed(s.db, []byte(s.repo), node[:], false, fn)
|
||||
ldbWithNeed(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
|
||||
}
|
||||
ldbWithNeed(s.db, []byte(s.repo), node[:], true, fn)
|
||||
ldbWithNeed(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithHave(%v)", s.repo, node)
|
||||
}
|
||||
ldbWithHave(s.db, []byte(s.repo), node[:], false, fn)
|
||||
ldbWithHave(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
|
||||
}
|
||||
ldbWithHave(s.db, []byte(s.repo), node[:], true, fn)
|
||||
ldbWithHave(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) WithGlobal(fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithGlobal()", s.repo)
|
||||
}
|
||||
ldbWithGlobal(s.db, []byte(s.repo), false, fn)
|
||||
ldbWithGlobal(s.db, []byte(s.repo), false, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) WithGlobalTruncated(fn fileIterator) {
|
||||
if debug {
|
||||
l.Debugf("%s WithGlobalTruncated()", s.repo)
|
||||
}
|
||||
ldbWithGlobal(s.db, []byte(s.repo), true, fn)
|
||||
ldbWithGlobal(s.db, []byte(s.repo), true, nativeFileIterator(fn))
|
||||
}
|
||||
|
||||
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {
|
||||
return ldbGet(s.db, []byte(s.repo), node[:], []byte(file))
|
||||
f := ldbGet(s.db, []byte(s.repo), node[:], []byte(normalizedFilename(file)))
|
||||
f.Name = nativeFilename(f.Name)
|
||||
return f
|
||||
}
|
||||
|
||||
func (s *Set) GetGlobal(file string) protocol.FileInfo {
|
||||
return ldbGetGlobal(s.db, []byte(s.repo), []byte(file))
|
||||
f := ldbGetGlobal(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
|
||||
f.Name = nativeFilename(f.Name)
|
||||
return f
|
||||
}
|
||||
|
||||
func (s *Set) Availability(file string) []protocol.NodeID {
|
||||
return ldbAvailability(s.db, []byte(s.repo), []byte(file))
|
||||
return ldbAvailability(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
|
||||
}
|
||||
|
||||
func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
|
||||
@@ -142,3 +154,24 @@ func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
|
||||
defer s.mutex.Unlock()
|
||||
return s.localVersion[node]
|
||||
}
|
||||
|
||||
func normalizeFilenames(fs []protocol.FileInfo) {
|
||||
for i := range fs {
|
||||
fs[i].Name = normalizedFilename(fs[i].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func nativeFileIterator(fn fileIterator) fileIterator {
|
||||
return func(fi protocol.FileIntf) bool {
|
||||
switch f := fi.(type) {
|
||||
case protocol.FileInfo:
|
||||
f.Name = nativeFilename(f.Name)
|
||||
return fn(f)
|
||||
case protocol.FileInfoTruncated:
|
||||
f.Name = nativeFilename(f.Name)
|
||||
return fn(f)
|
||||
default:
|
||||
panic("unknown interface type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
24
gui/app.js
24
gui/app.js
@@ -86,11 +86,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
||||
$scope.upgradeInfo = {};
|
||||
|
||||
$http.get(urlbase+"/lang").success(function (langs) {
|
||||
var lang;
|
||||
// Find the first language in the list provided by the user's browser
|
||||
// that is a prefix of a language we have available. That is, "en"
|
||||
// sent by the browser will match "en" or "en-US", while "zh-TW" will
|
||||
// match only "zh-TW" and not "zh-CN".
|
||||
|
||||
var lang, matching;
|
||||
for (var i = 0; i < langs.length; i++) {
|
||||
lang = langs[i];
|
||||
if (validLangs.indexOf(lang) >= 0) {
|
||||
$translate.use(lang);
|
||||
matching = validLangs.filter(function (l) {
|
||||
return l.indexOf(lang) == 0;
|
||||
});
|
||||
if (matching.length >= 1) {
|
||||
$translate.use(matching[0]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -747,8 +755,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
||||
cfg.APIKey = randomString(30, 32);
|
||||
};
|
||||
|
||||
|
||||
|
||||
$scope.acceptUR = function () {
|
||||
$scope.config.Options.URAccepted = 1000; // Larger than the largest existing report version
|
||||
$scope.saveConfig();
|
||||
@@ -786,9 +792,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
||||
};
|
||||
|
||||
$scope.override = function (repo) {
|
||||
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo)).success(function () {
|
||||
$scope.refresh();
|
||||
});
|
||||
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo));
|
||||
};
|
||||
|
||||
$scope.about = function () {
|
||||
@@ -799,6 +803,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
|
||||
$scope.reportPreview = true;
|
||||
};
|
||||
|
||||
$scope.rescanRepo = function (repo) {
|
||||
$http.post(urlbase + "/scan?repo=" + encodeURIComponent(repo));
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
setInterval($scope.refresh, 10000);
|
||||
});
|
||||
|
||||
@@ -230,6 +230,7 @@
|
||||
</table>
|
||||
</div>
|
||||
<span class="pull-right">
|
||||
<a class="btn btn-sm btn-default" href="" ng-show="repoStatus(repo.ID) == 'idle'" ng-click="rescanRepo(repo.ID)"><span class="glyphicon glyphicon-refresh"></span> <span translate>Rescan</span></a>
|
||||
<a class="btn btn-sm btn-primary" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span> <span translate>Edit</span></a>
|
||||
<a class="btn btn-sm btn-danger" ng-if="repo.ReadOnly && model[repo.ID].needFiles > 0" ng-click="override(repo.ID)" href=""><span class="glyphicon glyphicon-upload"></span> <span translate>Override Changes</span></a>
|
||||
</span>
|
||||
@@ -439,7 +440,8 @@
|
||||
<div class="form-group">
|
||||
<label translate for="name">Node Name</label>
|
||||
<input placeholder="Home Server" id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
|
||||
<p translate class="help-block">Shown instead of Node ID in the cluster status.</p>
|
||||
<p translate ng-if="currentNode.NodeID == myID" class="help-block">Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.</p>
|
||||
<p translate ng-if="currentNode.NodeID != myID" class="help-block">Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label translate for="addresses">Addresses</label>
|
||||
@@ -752,7 +754,7 @@
|
||||
|
||||
<p translate>Syncthing includes the following software or portions thereof:</p>
|
||||
<ul>
|
||||
<li><a href="http://golang.org/">The Go Programming Languange</a>, Copyright © 2012 The Go Authors.</li>
|
||||
<li><a href="http://golang.org/">The Go Programming Language</a>, Copyright © 2012 The Go Authors.</li>
|
||||
<li><a href="https://bitbucket.org/kardianos/osext">kardianos/osext</a>, Copyright © 2012 Daniel Theophanes.</li>
|
||||
<li><a href="https://code.google.com/p/snappy-go/">snappy-go</a>, Copyright © 2011 The Snappy-Go Authors.</li>
|
||||
<li><a href="https://github.com/golang/groupcache">groupcache/lru</a>, Copyright © 2013 Google Inc.</li>
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Lagrings-ID",
|
||||
"Repository Master": "Hovedlagring",
|
||||
"Repository Path": "Sti til lagring",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Genscanningsinterval (s)",
|
||||
"Restart": "Genstart",
|
||||
"Restart Needed": "Programmet kræver genstart",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Kort identifikation for denne lagring. Skal være ens på alle noder i clusteret.",
|
||||
"Show ID": "Vis ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Vises i stedet for node-ID under clusterstatus.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Luk ned",
|
||||
"Source Code": "Kildekode",
|
||||
"Start Browser": "Start browser",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Verzeichnis-ID",
|
||||
"Repository Master": "Originalverzeichnis",
|
||||
"Repository Path": "Pfad zum Verzeichnis",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Suchintervall (s)",
|
||||
"Restart": "Neustart",
|
||||
"Restart Needed": "Neustart notwendig",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Kurze ID für das Verzeichnis. Muss auf allen Verbunds-Knoten gleich sein.",
|
||||
"Show ID": "ID anzeigen",
|
||||
"Shown instead of Node ID in the cluster status.": "Wird anstatt der Knoten-ID im Verbunds-Status angezeigt.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Herunterfahren",
|
||||
"Source Code": "Sourcecode",
|
||||
"Start Browser": "Starte Browser",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID Αποθετηρίου",
|
||||
"Repository Master": "Repository Master",
|
||||
"Repository Path": "Μονοπάτι Αποθετηρίου",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Χρονικό διάστημα Επανασάρρωσης (s)",
|
||||
"Restart": "Επανεκκίνηση",
|
||||
"Restart Needed": "Απαιτείται Επανεκκίνηση",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Σύντομη περιγραφή του αποθετηρίου. Θα πρέπει να είναι το ίδιο σε όλους τους κόμβους του cluster.",
|
||||
"Show ID": "Εμφάνιση ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Εμφάνιση στη θέση του ID Αποθετηρίου, στην κατάσταση του cluster.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Απενεργοποίηση",
|
||||
"Source Code": "Πηγαίος Κώδικας",
|
||||
"Start Browser": "Έναρξη Φυλλομετρητή",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Repository ID",
|
||||
"Repository Master": "Repository Master",
|
||||
"Repository Path": "Repository Path",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Rescan Interval (s)",
|
||||
"Restart": "Restart",
|
||||
"Restart Needed": "Restart Needed",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Short identifier for the repository. Must be the same on all cluster nodes.",
|
||||
"Show ID": "Show ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Shown instead of Node ID in the cluster status.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Shutdown",
|
||||
"Source Code": "Source Code",
|
||||
"Start Browser": "Start Browser",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID de repositorio",
|
||||
"Repository Master": "Repositorio maestro",
|
||||
"Repository Path": "Ruta del repositorio",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Intervalo de reescaneo (s)",
|
||||
"Restart": "Reiniciar",
|
||||
"Restart Needed": "Es necesario reiniciar",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Identificador corto para el repositorio. Debe ser el mismo en todos los nodos del clúster.",
|
||||
"Show ID": "Mostrar ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Mostrar en lugar de ID de nodo en estado de cluster.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Apagar",
|
||||
"Source Code": "Código fuente",
|
||||
"Start Browser": "Iniciar navegador",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID du répertoire",
|
||||
"Repository Master": "Répertoire maître",
|
||||
"Repository Path": "Chemin du répertoire",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Intervalle de rescan (s)",
|
||||
"Restart": "Redémarrer",
|
||||
"Restart Needed": "Redémarrage nécessaire",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Identifiant court pour le répertoire. Il doit être le même sur l'ensemble des nœuds du cluster.",
|
||||
"Show ID": "Montrer l'ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Affiché à la place de l'ID du nœud au sein du cluster.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Éteindre",
|
||||
"Source Code": "Code source",
|
||||
"Start Browser": "Démarrer le navigateur web",
|
||||
|
||||
123
gui/lang-hu.json
Normal file
123
gui/lang-hu.json
Normal file
@@ -0,0 +1,123 @@
|
||||
{
|
||||
"API Key": "API kulcs",
|
||||
"About": "Névjegy",
|
||||
"Add Node": "Csomópont hozzáadása",
|
||||
"Add Repository": "Tároló hozzáadása",
|
||||
"Address": "Cím",
|
||||
"Addresses": "Címek",
|
||||
"Allow Anonymous Usage Reporting?": "Engedélyezed a névtelen felhasználási statisztikai adatok küldését?",
|
||||
"Announce Server": "Cím hirdető szerver",
|
||||
"Anonymous Usage Reporting": "Névtelen felhasználási statisztikák küldése",
|
||||
"Bugs": "Hibák",
|
||||
"CPU Utilization": "Processzor használat",
|
||||
"Close": "Bezárás",
|
||||
"Connection Error": "Kapcsolódási hiba",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg és a következő Közreműködők",
|
||||
"Delete": "Törlés",
|
||||
"Disconnected": "Kapcsolat bontása",
|
||||
"Documentation": "Dokumentáció",
|
||||
"Download Rate": "Letöltési sebesség",
|
||||
"Edit": "Szerkesztés",
|
||||
"Edit Node": "Csomópont szerkesztése",
|
||||
"Edit Repository": "Tároló szerkesztése",
|
||||
"Enable UPnP": "UPnP engedélyezése",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Add meg kettősponttal elválasztva \"ip:port\" a címet vagy add meg a \"dynamic\" szót az a cím automatikus észleléséhez.",
|
||||
"Error": "Hiba",
|
||||
"File Versioning": "File verziózás",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "A file jogosultásgok figyelmen kívül hagyása. FAT file-rendszernél haszálatos.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "A file-ok időpecsételt verziói a .stversions mappában kerülnek áthelyezésre, amikor felülírásra vagy törlésre kerülnek a Síncthing által.",
|
||||
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "A file-ok védve lesznek, a többi csomóponton történt változástól, de ezen a csomóponton történt változás el lesz küldve a többi csomópontra.",
|
||||
"Folder": "Mappa",
|
||||
"GUI Authentication Password": "GUI jelszó",
|
||||
"GUI Authentication User": "GUI felhastnáló",
|
||||
"GUI Listen Addresses": "GUI port",
|
||||
"Generate": "Generálás",
|
||||
"Global Discovery": "Globális csomópont keresés",
|
||||
"Global Discovery Server": "Globális csomópont kereséshez használt szerver",
|
||||
"Global Repository": "Globális tároló",
|
||||
"Idle": "Tétlen",
|
||||
"Ignore Permissions": "Jogosultságok figyelmen kívül hagyása",
|
||||
"Keep Versions": "Verziók megtartása",
|
||||
"Latest Release": "Utolsó kiadás",
|
||||
"Local Discovery": "Helyi csomópont keresés",
|
||||
"Local Discovery Port": "Helyi csomópont keresés port-ja",
|
||||
"Local Repository": "Helyi tároló",
|
||||
"Master Repo": "Központi tároló",
|
||||
"Max File Change Rate (KiB/s)": "Maximális file változás sebessége (KiB/mp)",
|
||||
"Max Outstanding Requests": "Maximális kimenő kérés",
|
||||
"No": "Nem",
|
||||
"Node ID": "Csomópont azonosító",
|
||||
"Node Identification": "Csomópont azonosítás",
|
||||
"Node Name": "Csomópont név",
|
||||
"Notice": "Megjegyzés",
|
||||
"OK": "Rendben",
|
||||
"Offline": "Nincs kpcsolat",
|
||||
"Online": "Kapcsolódva",
|
||||
"Out Of Sync": "Nincs szinkronban",
|
||||
"Outgoing Rate Limit (KiB/s)": "Kimenő sávszélesség (KiB/mp)",
|
||||
"Override Changes": "Változtatások felülbírálása",
|
||||
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "A tároló útvonala ezen a számítógépen. Amennyiben nem létezik automatikusan létrehozzuk. A hullámvonal (~) karakter használható a rövidítésre",
|
||||
"Please wait": "Kérem várj",
|
||||
"Preview Usage Report": "Felhasználási adatok átnézése",
|
||||
"RAM Utilization": "Memória használat",
|
||||
"Reconnect Interval (s)": "Újracsatlakozási intervallum (mp)",
|
||||
"Repository ID": "Tároló azonosító",
|
||||
"Repository Master": "Központi tároló",
|
||||
"Repository Path": "Tároló útvonala",
|
||||
"Rescan": "Újraátvizsgálás",
|
||||
"Rescan Interval (s)": "Átnézési intervallum (mp)",
|
||||
"Restart": "Újraindítás",
|
||||
"Restart Needed": "Újraindítás szükséges",
|
||||
"Restarting": "Újraindulás",
|
||||
"Save": "Mentés",
|
||||
"Scanning": "Átnézés",
|
||||
"Select the nodes to share this repository with.": "Válaszd ki a csomópontokat amikkel a tárolót megosszuk",
|
||||
"Settings": "Beállítások",
|
||||
"Share With Nodes": "Megosztás a csomópontokkal",
|
||||
"Shared With": "Megosztva velük",
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "A tároló rövid azonosítója. Ugyanannak kell lennie minden fürtbeli csomóponton.",
|
||||
"Show ID": "Azonosító mutatáas",
|
||||
"Shown instead of Node ID in the cluster status.": "A csomópont azonosító helyett jelenik meg a fürt állapotánál.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Leállítás",
|
||||
"Source Code": "Forráskód",
|
||||
"Start Browser": "Böngésző indítása",
|
||||
"Stopped": "Leállítva",
|
||||
"Support / Forum": "Támogatás / Fórum",
|
||||
"Sync Protocol Listen Addresses": "Szinkronizációs protokoll hallgatózási címe",
|
||||
"Synchronization": "Szinkronizálás",
|
||||
"Syncing": "Syncthing",
|
||||
"Syncthing has been shut down.": "Syncthing leállítva",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing a következő programokat, vagy programkomponenseket tartalmazza.",
|
||||
"Syncthing is restarting.": "Syncthing újraindul",
|
||||
"Syncthing is upgrading.": "Syncthing frissül",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "A Syncthing úgy tűnik, hogy nem működik, vagy valami probléma van az hálózati kapcsolattal. Újra próbálom...",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Az összevont statisztikák nyilvánosan elérhetők a {{url}} címen.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "A beállítások ok elmentésre kerültek, de nem lettek aktiválva. Indítsad újra a Syncthing-et, hogy aktíváld őket.",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "A titkosított felhasználási jelentés naponta kerül elküldésre. Arra használjuk, hogy a futtató platformot, tároló méreteket illetve program verziókat kövessük nyomon. Amennyiben ez változik akkor újra meg fog jelenni ez az ablak.",
|
||||
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "A beírt csomópont azonosító nem tűnik megfelelőnek. 52 karakter hosszúnak kell lennie és csak számokat illetve betűket tartalmazhat, amit szóközök illetve kötőjelek tagolhatnak.",
|
||||
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "A beírt csomópont azonosító nem tűnik megfelelőnek. 52 vagy 56 karakteresnek kell lennie csak számot és betűt kell tartalmaznia és szóközökkel vagy kötőjelekkel lehet tagolva.",
|
||||
"The node ID cannot be blank.": "A csomópont azonosító nem lehet üres",
|
||||
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "A csomópont azonosító a túloldalon a \"Beállítások > Azonosító mutatása\" alatt található. A szóközök illetve a kötőjelek opcionálisak (kihagyhatóak). ",
|
||||
"The number of old versions to keep, per file.": "Mennyi régi változatot tartsunk meg a file-okból",
|
||||
"The number of versions must be a number and cannot be blank.": "A megtartott változatok száma nem lehet üres",
|
||||
"The repository ID cannot be blank.": "A tároló azonosító nem lehet üres",
|
||||
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "A tároló azonosító egy rövid (maximálisan 64 karakteres) csak számokat, betűket, pontot (.), kötőjelet (-) illetve aláhúzást (_) tartalmazó karakterlánc",
|
||||
"The repository ID must be unique.": "A tároló azonosítónak egyedinek kell lennie",
|
||||
"The repository path cannot be blank.": "A tároló útvonala nem lehet üres",
|
||||
"Unknown": "Ismeretlen",
|
||||
"Up to Date": "Friss",
|
||||
"Upgrade To {%version%}": "Frissítés a {{version}} verzióra",
|
||||
"Upgrading": "Frissítés",
|
||||
"Upload Rate": "Feltöltési sebesség",
|
||||
"Usage": "Használat",
|
||||
"Use Compression": "Tömörítés használata",
|
||||
"Use HTTPS for GUI": "HTTPS használata a GUI-hoz",
|
||||
"Version": "Verzió",
|
||||
"When adding a new node, keep in mind that this node must be added on the other side too.": "Amikor új csomópontot adsz hozzá, tartsd észben, hogy azt a túloldalhoz is hozzá kell majd adni.",
|
||||
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Amikor hozzáadod a tárolót, tartsad észben, hogy a Tároló azonosító az ami összeköti a csomópontokat. Kis-nagybetű érzékeny, és pontosan ugyan úgy kell azokat megadni mindegyik csomóponton.",
|
||||
"Yes": "Igen",
|
||||
"You must keep at least one version.": "Legalább egy verziót meg kell tartanod",
|
||||
"items": "tételek"
|
||||
}
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID Deposito",
|
||||
"Repository Master": "Deposito Principale",
|
||||
"Repository Path": "Percorso del Deposito",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Intervallo di Scansione dei File (s)",
|
||||
"Restart": "Riavvia",
|
||||
"Restart Needed": "Riavvio Necessario",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Breve identificatore del deposito. Deve essere lo stesso su tutti i nodi del cluster.",
|
||||
"Show ID": "Mostra ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Visibile al posto dell'ID Nodo nello stato del cluster.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Arresta",
|
||||
"Source Code": "Codice Sorgente",
|
||||
"Start Browser": "Avvia Browser",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Repository ID",
|
||||
"Repository Master": "Hoofd repository",
|
||||
"Repository Path": "Pad van repository",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Herscan interval (s)",
|
||||
"Restart": "Herstart",
|
||||
"Restart Needed": "Herstart nodig",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Korte naam voor de repository. Moet hetzelfde zijn op alle nodes in het cluster.",
|
||||
"Show ID": "Toon ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Wordt weergegeven i.p.v. het node ID in de cluster status",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Sluit af",
|
||||
"Source Code": "Broncode",
|
||||
"Start Browser": "Start browser",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID do repositório",
|
||||
"Repository Master": "Repositório mestre",
|
||||
"Repository Path": "Caminho do repositório",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Intervalo entre varrimentos (s)",
|
||||
"Restart": "Reiniciar",
|
||||
"Restart Needed": "É preciso reiniciar",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Identificador curto para o repositório. Tem que ser igual em todos os nós do agrupamento.",
|
||||
"Show ID": "Mostrar ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Apresentado ao invés do ID do nó no estado do agrupamento.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Desligar",
|
||||
"Source Code": "Código fonte",
|
||||
"Start Browser": "Iniciar navegador",
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID Репозитория",
|
||||
"Repository Master": "Главный Репозиторий",
|
||||
"Repository Path": "Путь к Репозиторию",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Интервал между сканированием (сек)",
|
||||
"Restart": "Перезапуск",
|
||||
"Restart Needed": "Требуется перезапуск",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Короткий идентификатор репозитория. Должен быть одинаковый на всех узлах кластера.",
|
||||
"Show ID": "Показать ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Отображается вместо ID узла в статусе кластера.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Выключить",
|
||||
"Source Code": "Исходный код",
|
||||
"Start Browser": "Открыть браузер",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Lagrings-ID",
|
||||
"Repository Master": "Huvudlagring",
|
||||
"Repository Path": "Lagringskatalog",
|
||||
"Rescan": "Scanna om",
|
||||
"Rescan Interval (s)": "Förnyelseintervall (s)",
|
||||
"Restart": "Starta om",
|
||||
"Restart Needed": "Omstart behövs",
|
||||
@@ -77,8 +78,10 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Kort identifieringssträng för förvaringen. Måste vara samma på alla noder i klustern.",
|
||||
"Show ID": "Visa ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Visas i stället för nod-ID",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Visas istället för nod-ID. Skickas till andra noder som namn på denna nod.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Visas istället för nod-ID. Sätts till namnet på den andra noden vid första anslutning om det lämnas blankt.",
|
||||
"Shutdown": "Stäng av",
|
||||
"Source Code": "Kälkod",
|
||||
"Source Code": "Källkod",
|
||||
"Start Browser": "Starta browser",
|
||||
"Stopped": "Stoppad",
|
||||
"Support / Forum": "Support / Forum",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "Depo ID",
|
||||
"Repository Master": "Ana depo",
|
||||
"Repository Path": "Depo Yolu",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Yeni tarama süresi (sn)",
|
||||
"Restart": "Yeniden Başlat",
|
||||
"Restart Needed": "Yeniden başlatma gereklidir",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Depo için kısa tanımlayıcı. Kümedeki tüm düğümlerde aynı olmalı.",
|
||||
"Show ID": "ID Göster",
|
||||
"Shown instead of Node ID in the cluster status.": "Ana ekranda Düğüm ID yerine bunu göster.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Kapat",
|
||||
"Source Code": "Kaynak Kodu",
|
||||
"Start Browser": "Tarayıcıyı Başlat",
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"Repository ID": "ID репозиторія",
|
||||
"Repository Master": "Центральний репозиторій",
|
||||
"Repository Path": "Шлях до репозиторія",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "Інтервал для повторного сканування (с)",
|
||||
"Restart": "Перезапуск",
|
||||
"Restart Needed": "Необхідний перезапуск",
|
||||
@@ -77,6 +78,8 @@
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "Короткий ідентифікатор репозиторія. Повинен бути однаковим на всіх вузлах кластера.",
|
||||
"Show ID": "Показати ID",
|
||||
"Shown instead of Node ID in the cluster status.": "Показано замість ID вузла в статусі кластера.",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "Вимкнути",
|
||||
"Source Code": "Сирцевий код",
|
||||
"Start Browser": "Запустити браузер",
|
||||
|
||||
123
gui/lang-zh-CN.json
Normal file
123
gui/lang-zh-CN.json
Normal file
@@ -0,0 +1,123 @@
|
||||
{
|
||||
"API Key": "API Key",
|
||||
"About": "关于",
|
||||
"Add Node": "添加节点",
|
||||
"Add Repository": "添加仓库",
|
||||
"Address": "地址",
|
||||
"Addresses": "地址列表",
|
||||
"Allow Anonymous Usage Reporting?": "允许匿名使用报告?",
|
||||
"Announce Server": "Announce服务器",
|
||||
"Anonymous Usage Reporting": "匿名使用报告",
|
||||
"Bugs": "程序错误",
|
||||
"CPU Utilization": "CPU使用率",
|
||||
"Close": "关闭",
|
||||
"Connection Error": "连接出错",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "版权© 2014 Jakob Borg 及以下贡献者:",
|
||||
"Delete": "删除",
|
||||
"Disconnected": "连接已断开",
|
||||
"Documentation": "文档",
|
||||
"Download Rate": "下载速度",
|
||||
"Edit": "选项",
|
||||
"Edit Node": "编辑节点",
|
||||
"Edit Repository": "编辑仓库",
|
||||
"Enable UPnP": "开启UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "输入以半角逗号分隔的\"ip:端口\"设置可用地址列表,或者输入\"dynamic\"表示自动寻找地址。",
|
||||
"Error": "错误",
|
||||
"File Versioning": "版本控制",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "监控文件改变时,忽略文件权限位。用于FAT文件系统。",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "当文件被syncthing修改或者删除时,将会被移动到.stversions文件夹已保留历史版本。",
|
||||
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "其他节点上的改变不会影响本节点,但对本节点上作出的改变会被发送到节点群中的其他节点。",
|
||||
"Folder": "文件夹",
|
||||
"GUI Authentication Password": "登录管理页面的密码",
|
||||
"GUI Authentication User": "登录管理页面的用户名",
|
||||
"GUI Listen Addresses": "管理页面监听地址",
|
||||
"Generate": "生成",
|
||||
"Global Discovery": "在互联网上寻找节点",
|
||||
"Global Discovery Server": "用以在互联网上寻找节点的Announce服务器地址",
|
||||
"Global Repository": "全局仓库",
|
||||
"Idle": "空闲",
|
||||
"Ignore Permissions": "忽略文件权限",
|
||||
"Keep Versions": "保留历史版本数量",
|
||||
"Latest Release": "最新发布",
|
||||
"Local Discovery": "在局域网上寻找节点",
|
||||
"Local Discovery Port": "局域网寻找监听端口",
|
||||
"Local Repository": "本地仓库",
|
||||
"Master Repo": "主仓库",
|
||||
"Max File Change Rate (KiB/s)": "最大文件变化速率(千字节/每秒)",
|
||||
"Max Outstanding Requests": "同时可响应的请求数上限",
|
||||
"No": "否",
|
||||
"Node ID": "节点ID",
|
||||
"Node Identification": "节点ID",
|
||||
"Node Name": "节点名",
|
||||
"Notice": "提示",
|
||||
"OK": "确定",
|
||||
"Offline": "离线",
|
||||
"Online": "在线",
|
||||
"Out Of Sync": "未同步",
|
||||
"Outgoing Rate Limit (KiB/s)": "上传速度限制(千字节/秒)",
|
||||
"Override Changes": "撤销改变",
|
||||
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "该仓库的本地路径。如果该路径不存在,则会自动创建。波浪线字符(~)代表了下面的文件夹:",
|
||||
"Please wait": "请稍候",
|
||||
"Preview Usage Report": "预览使用报告",
|
||||
"RAM Utilization": "内存使用情况",
|
||||
"Reconnect Interval (s)": "重连间隔(秒)",
|
||||
"Repository ID": "仓库ID",
|
||||
"Repository Master": "主仓库",
|
||||
"Repository Path": "仓库路径",
|
||||
"Rescan": "Rescan",
|
||||
"Rescan Interval (s)": "扫描间隔(秒)",
|
||||
"Restart": "重启syncthing",
|
||||
"Restart Needed": "需要重启Syncthing",
|
||||
"Restarting": "重启中",
|
||||
"Save": "保存",
|
||||
"Scanning": "扫描中",
|
||||
"Select the nodes to share this repository with.": "选择将本仓库共享给哪些节点",
|
||||
"Settings": "设置",
|
||||
"Share With Nodes": "共享给下列节点",
|
||||
"Shared With": "共享给",
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "该仓库的ID。在所有节点上,必须一致。",
|
||||
"Show ID": "显示节点ID",
|
||||
"Shown instead of Node ID in the cluster status.": "在目标节点状态中显示该名字,以替代节点ID",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "关闭Syncthing",
|
||||
"Source Code": "源代码",
|
||||
"Start Browser": "启动浏览器",
|
||||
"Stopped": "已停止",
|
||||
"Support / Forum": "支持/论坛",
|
||||
"Sync Protocol Listen Addresses": "协议监听地址",
|
||||
"Synchronization": "同步",
|
||||
"Syncing": "同步中",
|
||||
"Syncthing has been shut down.": "Syncthing已关闭",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing使用了下列软件或其中的一部分",
|
||||
"Syncthing is restarting.": "Syncthing正在重启",
|
||||
"Syncthing is upgrading.": "Syncthing正在升级",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing似乎关闭了,或者您的网络连接存在故障。重试中...",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "全局统计公布于 {{url}}",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "设置已经保存,但是还未生效。Syncthing需要重启以启用新的设置。",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "经过加密的使用报告会每天发送。它用来跟踪统计使用本软件的平台,仓库大小,以及本软件的版本。如果报告的内容有任何变化,本对话框会再次弹出提示您。",
|
||||
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "输入的节点ID似乎无效。节点ID长度必须为52的字母和数字。空格和横线不算在内。",
|
||||
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "输入的节点ID似乎无效。节点ID长度必须为52或56的字母和数字。空格和横线不算在内。",
|
||||
"The node ID cannot be blank.": "节点ID不能为空",
|
||||
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "在这里需要输入的节点ID,可以在目标节点管理页面右上角的\"选项>显示节点ID\"获得。空格和横线会被自动忽略。",
|
||||
"The number of old versions to keep, per file.": "每个文件保留的版本数量。",
|
||||
"The number of versions must be a number and cannot be blank.": "保留版本数量必须为数字,且不能为空。",
|
||||
"The repository ID cannot be blank.": "仓库ID不能为空。",
|
||||
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "仓库ID的长度,必须在64字节或更少,只能包含字母,数字,点(.),以及下划线(_)",
|
||||
"The repository ID must be unique.": "仓库ID必须唯一。",
|
||||
"The repository path cannot be blank.": "仓库路径不能为空。",
|
||||
"Unknown": "未知",
|
||||
"Up to Date": "同步完成",
|
||||
"Upgrade To {%version%}": "升级至版本{{version}}",
|
||||
"Upgrading": "升级中",
|
||||
"Upload Rate": "上传速度",
|
||||
"Usage": "使用情况",
|
||||
"Use Compression": "使用压缩",
|
||||
"Use HTTPS for GUI": "使用HTTPS连接管理页面",
|
||||
"Version": "版本",
|
||||
"When adding a new node, keep in mind that this node must be added on the other side too.": "当您在本节点上添加新节点时,也必须在您所添加的节点上添加本节点。",
|
||||
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "添加新仓库时需要注意,仓库ID是用来在不同节点间绑定关联的仓库的。仓库ID是大小写敏感的,而且在所有的节点中必须完全相同,才能正确建立同步。",
|
||||
"Yes": "是",
|
||||
"You must keep at least one version.": "您必须保留至少一个版本。",
|
||||
"items": "条目"
|
||||
}
|
||||
123
gui/lang-zh-TW.json
Normal file
123
gui/lang-zh-TW.json
Normal file
@@ -0,0 +1,123 @@
|
||||
{
|
||||
"API Key": "API 金鑰",
|
||||
"About": "關於",
|
||||
"Add Node": "增加節點",
|
||||
"Add Repository": "新增儲存庫",
|
||||
"Address": "位址",
|
||||
"Addresses": "位址",
|
||||
"Allow Anonymous Usage Reporting?": "允許匿名的使用資訊回報?",
|
||||
"Announce Server": "發佈伺服器",
|
||||
"Anonymous Usage Reporting": "匿名的使用資訊回報",
|
||||
"Bugs": "程式錯誤",
|
||||
"CPU Utilization": "CPU 使用率",
|
||||
"Close": "關閉",
|
||||
"Connection Error": "連線錯誤",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "版權所有 © 2014 Jakob Borg 及以下貢獻者:",
|
||||
"Delete": "刪除",
|
||||
"Disconnected": "斷線",
|
||||
"Documentation": "說明文件",
|
||||
"Download Rate": "下載速率",
|
||||
"Edit": "編輯",
|
||||
"Edit Node": "編輯節點",
|
||||
"Edit Repository": "編輯儲存庫",
|
||||
"Enable UPnP": "啟用 UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "輸入以半形逗號區隔的 \"ip:連接埠\" 位址,或著輸入 \"dynamic\" 以進行位址的自動探索",
|
||||
"Error": "錯誤",
|
||||
"File Versioning": "檔案版本控制",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "當在尋找變化時,檔案權限位元會被忽略。用於 FAT 檔案系統。",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "當檔案被 syncthing 取代或刪除時,它們將被移至 .stversions 資料夾並添加日期戳記。",
|
||||
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "其他節點做的改變不會影響到此節點的檔案,但在此節點上的變化將被發送到叢集中的其他部分。",
|
||||
"Folder": "資料夾",
|
||||
"GUI Authentication Password": "GUI 認證密碼",
|
||||
"GUI Authentication User": "GUI 認證使用者名稱",
|
||||
"GUI Listen Addresses": "GUI 監聽位址",
|
||||
"Generate": "產生",
|
||||
"Global Discovery": "全域探索",
|
||||
"Global Discovery Server": "全域探索伺服器",
|
||||
"Global Repository": "全域儲存庫",
|
||||
"Idle": "閒置",
|
||||
"Ignore Permissions": "忽略權限",
|
||||
"Keep Versions": "保留版本數",
|
||||
"Latest Release": "最新發佈",
|
||||
"Local Discovery": "本地探索",
|
||||
"Local Discovery Port": "本地探索連接埠",
|
||||
"Local Repository": "本地儲存庫",
|
||||
"Master Repo": "支配此儲存庫",
|
||||
"Max File Change Rate (KiB/s)": "最大檔案改變速率 (KiB/s)",
|
||||
"Max Outstanding Requests": "最大未完成的請求",
|
||||
"No": "否",
|
||||
"Node ID": "節點識別碼",
|
||||
"Node Identification": "節點識別",
|
||||
"Node Name": "節點名稱",
|
||||
"Notice": "注意",
|
||||
"OK": "確定",
|
||||
"Offline": "離線",
|
||||
"Online": "上線",
|
||||
"Out Of Sync": "不同步",
|
||||
"Outgoing Rate Limit (KiB/s)": "連出速率限制 (KiB/s)",
|
||||
"Override Changes": "覆蓋改變",
|
||||
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "儲存庫在本地電腦的路徑。若資料夾不存在則會建立。波浪符號 (~) 可用作下列資料夾的捷徑:",
|
||||
"Please wait": "請稍後",
|
||||
"Preview Usage Report": "預覽使用資訊報告",
|
||||
"RAM Utilization": "記憶體使用率",
|
||||
"Reconnect Interval (s)": "重新連接間隔 (秒)",
|
||||
"Repository ID": "儲存庫識別碼",
|
||||
"Repository Master": "儲存庫支配者",
|
||||
"Repository Path": "儲存庫路徑",
|
||||
"Rescan": "重新掃描",
|
||||
"Rescan Interval (s)": "掃描間隔 (秒)",
|
||||
"Restart": "重新啟動",
|
||||
"Restart Needed": "需要重新啟動",
|
||||
"Restarting": "正在重新啟動",
|
||||
"Save": "儲存",
|
||||
"Scanning": "正在掃描",
|
||||
"Select the nodes to share this repository with.": "選擇要共享這個儲存庫的節點。",
|
||||
"Settings": "設定",
|
||||
"Share With Nodes": "與節點共享",
|
||||
"Shared With": "與誰共享",
|
||||
"Short identifier for the repository. Must be the same on all cluster nodes.": "儲存庫的簡短識別碼。必須在所有叢集節點上皆相同。",
|
||||
"Show ID": "顯示識別碼",
|
||||
"Shown instead of Node ID in the cluster status.": "在叢集狀態中代替節點識別碼的顯示。",
|
||||
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
|
||||
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
|
||||
"Shutdown": "關閉",
|
||||
"Source Code": "原始碼",
|
||||
"Start Browser": "啟動瀏覽器",
|
||||
"Stopped": "已停止",
|
||||
"Support / Forum": "支援 / 論壇",
|
||||
"Sync Protocol Listen Addresses": "同步通訊協定監聽位址",
|
||||
"Synchronization": "同步作業",
|
||||
"Syncing": "正在同步",
|
||||
"Syncthing has been shut down.": "Syncthing 已經關閉。",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing 包括以下軟體或其中的一部分:",
|
||||
"Syncthing is restarting.": "Syncthing 正在重新啟動。",
|
||||
"Syncthing is upgrading.": "Syncthing 正在進行升級。",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing 似乎下線了,或者您的網際網路連線出現問題。正在重試...",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "匯總統計公佈於 {{url}}。",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "組態已經儲存但尚未啟用。Syncthing 必須重新啟動以便啟用新的組態。",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "經過加密的使用資訊報告會每天傳送。報告是用來追蹤常用的平台、儲存庫的大小以及應用程式的版本。若傳送的資料集有異動,您會再次看到這個對話框。",
|
||||
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "輸入的節點識別碼似乎無效。它應該為一串包含半形英文字母及數字,並可能會含有空白或連接符號的字串,且長度為 52 個字元。",
|
||||
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "輸入的節點識別碼似乎無效。它應該為一串包含半形英文字母及數字,並可能會含有空白或連接符號的字串,且長度為 52 或 56 個字元。",
|
||||
"The node ID cannot be blank.": "節點識別碼不能為空白。",
|
||||
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "要輸入在這裡的節點識別碼可以在其他節點的 \"編輯 > 顯示識別碼\" 對話框找到。空白以及連接符號可不輸入 (省略)",
|
||||
"The number of old versions to keep, per file.": "每個檔案要保留的舊版本數量。",
|
||||
"The number of versions must be a number and cannot be blank.": "每個檔案要保留的舊版本數量必須是數字且不能為空白。",
|
||||
"The repository ID cannot be blank.": "儲存庫識別碼不能為空白。",
|
||||
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "儲存庫識別碼必須為一段只包含半形英文字母、數字、點 (.)、連接符號 (-) 以及底線 (_) 的簡短識別碼 (不多於 64 個字元)",
|
||||
"The repository ID must be unique.": "儲存庫識別碼必須為獨一無二的。",
|
||||
"The repository path cannot be blank.": "儲存庫路徑不能空白。",
|
||||
"Unknown": "未知",
|
||||
"Up to Date": "最新",
|
||||
"Upgrade To {%version%}": "升級到 {{version}}",
|
||||
"Upgrading": "正在升級",
|
||||
"Upload Rate": "上載速率",
|
||||
"Usage": "使用",
|
||||
"Use Compression": "使用壓縮",
|
||||
"Use HTTPS for GUI": "為 GUI 使用 HTTPS",
|
||||
"Version": "版本",
|
||||
"When adding a new node, keep in mind that this node must be added on the other side too.": "當新增一個節點時,請記住,這個節點也必須被添加至另一邊。",
|
||||
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "當新增一個儲存庫時,請記住,儲存庫識別碼是用來將節點之間的儲存庫綁定在一起的。它們有區分大小寫,且必須在所有節點之間完全相同。",
|
||||
"Yes": "是",
|
||||
"You must keep at least one version.": "您必須保留至少一個版本。",
|
||||
"items": "個物件"
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
var validLangs = ["da","de","el","en","es","fr","it","nl","pt","ru","sv","tr","uk"]
|
||||
var validLangs = ["da","de","el","en","es","fr","hu","it","nl","pt-PT","ru","sv","tr","uk","zh-CN","zh-TW"]
|
||||
|
||||
@@ -68,6 +68,7 @@ type Model struct {
|
||||
cfg *config.Configuration
|
||||
db *leveldb.DB
|
||||
|
||||
nodeName string
|
||||
clientName string
|
||||
clientVersion string
|
||||
|
||||
@@ -101,11 +102,12 @@ var (
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
// where it sends index information to connected peers and responds to requests
|
||||
// for file data without altering the local repository in any way.
|
||||
func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVersion string, db *leveldb.DB) *Model {
|
||||
func NewModel(indexDir string, cfg *config.Configuration, nodeName, clientName, clientVersion string, db *leveldb.DB) *Model {
|
||||
m := &Model{
|
||||
indexDir: indexDir,
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
nodeName: nodeName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
repoCfgs: make(map[string]config.RepositoryConfiguration),
|
||||
@@ -409,6 +411,14 @@ func (m *Model) ClusterConfig(nodeID protocol.NodeID, config protocol.ClusterCon
|
||||
m.pmut.Unlock()
|
||||
|
||||
l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion)
|
||||
|
||||
if name := config.GetOption("name"); name != "" {
|
||||
l.Infof("Node %s hostname is %q", nodeID, name)
|
||||
node := m.cfg.GetNodeConfiguration(nodeID)
|
||||
if node != nil && node.Name == "" {
|
||||
node.Name = name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes the peer from the model and closes the underlying connection if possible.
|
||||
@@ -838,6 +848,12 @@ func (m *Model) clusterConfig(node protocol.NodeID) protocol.ClusterConfigMessag
|
||||
cm := protocol.ClusterConfigMessage{
|
||||
ClientName: m.clientName,
|
||||
ClientVersion: m.clientVersion,
|
||||
Options: []protocol.Option{
|
||||
{
|
||||
Key: "name",
|
||||
Value: m.nodeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m.rmut.RLock()
|
||||
|
||||
@@ -57,7 +57,7 @@ func init() {
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", &config.Configuration{}, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", &config.Configuration{}, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
|
||||
@@ -94,7 +94,7 @@ func genFiles(n int) []protocol.FileInfo {
|
||||
|
||||
func BenchmarkIndex10000(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
@@ -107,7 +107,7 @@ func BenchmarkIndex10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndex00100(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(100)
|
||||
@@ -120,7 +120,7 @@ func BenchmarkIndex00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
@@ -134,7 +134,7 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
@@ -149,7 +149,7 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
@@ -207,7 +207,7 @@ func (FakeConnection) Statistics() protocol.Statistics {
|
||||
|
||||
func BenchmarkRequest(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev", db)
|
||||
m := NewModel("/tmp", nil, "node", "syncthing", "dev", db)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
|
||||
@@ -259,3 +259,45 @@ func TestActivityMap(t *testing.T) {
|
||||
t.Errorf("Incorrect least busy node %q", node)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeRename(t *testing.T) {
|
||||
ccm := protocol.ClusterConfigMessage{
|
||||
ClientName: "syncthing",
|
||||
ClientVersion: "v0.9.4",
|
||||
}
|
||||
|
||||
cfg, _ := config.Load(nil, node1)
|
||||
cfg.Nodes = []config.NodeConfiguration{
|
||||
{
|
||||
NodeID: node1,
|
||||
},
|
||||
}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db)
|
||||
if cfg.Nodes[0].Name != "" {
|
||||
t.Errorf("Node already has a name")
|
||||
}
|
||||
|
||||
m.ClusterConfig(node1, ccm)
|
||||
if cfg.Nodes[0].Name != "" {
|
||||
t.Errorf("Node already has a name")
|
||||
}
|
||||
|
||||
ccm.Options = []protocol.Option{
|
||||
{
|
||||
Key: "name",
|
||||
Value: "tester",
|
||||
},
|
||||
}
|
||||
m.ClusterConfig(node1, ccm)
|
||||
if cfg.Nodes[0].Name != "tester" {
|
||||
t.Errorf("Node did not get a name")
|
||||
}
|
||||
|
||||
ccm.Options[0].Value = "tester2"
|
||||
m.ClusterConfig(node1, ccm)
|
||||
if cfg.Nodes[0].Name != "tester" {
|
||||
t.Errorf("Node name got overwritten")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,6 +98,15 @@ type ClusterConfigMessage struct {
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) GetOption(key string) string {
|
||||
for _, option := range o.Options {
|
||||
if option.Key == key {
|
||||
return option.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Repository struct {
|
||||
ID string // max:64
|
||||
Nodes []Node // max:64
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
|
||||
"github.com/syncthing/syncthing/lamport"
|
||||
@@ -229,7 +230,7 @@ func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
|
||||
for prefix, pats := range patterns {
|
||||
if prefix == "." || prefix == first || strings.HasPrefix(first, fmt.Sprintf("%s%c", prefix, os.PathSeparator)) {
|
||||
for _, pattern := range pats {
|
||||
if match, _ := filepath.Match(pattern, last); match {
|
||||
if match, _ := filepath.Match(pattern, last); match || pattern == last {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ package scanner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -124,38 +126,45 @@ func TestWalkError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
pattern := "q\\[abc\\]y"
|
||||
// On Windows, escaping is disabled.
|
||||
// Instead, '\\' is treated as path separator.
|
||||
if runtime.GOOS == "windows" {
|
||||
pattern = "q[abc]y"
|
||||
}
|
||||
|
||||
var patterns = map[string][]string{
|
||||
".": {"t2"},
|
||||
"foo": {"bar", "z*", "q[abc]x", "q\\[abc\\]y"},
|
||||
"foo/baz": {"quux", ".*"},
|
||||
".": {"t2"},
|
||||
"foo": {"bar", "z*", "q[abc]x", pattern},
|
||||
filepath.Join("foo", "baz"): {"quux", ".*"},
|
||||
}
|
||||
var tests = []struct {
|
||||
f string
|
||||
r bool
|
||||
}{
|
||||
{"foo/bar", true},
|
||||
{"foofoo", false},
|
||||
{"foo/quux", false},
|
||||
{"foo/zuux", true},
|
||||
{"foo/qzuux", false},
|
||||
{"foo/baz/t1", false},
|
||||
{"foo/baz/t2", true},
|
||||
{"foo/baz/bar", true},
|
||||
{"foo/baz/quuxa", false},
|
||||
{"foo/baz/aquux", false},
|
||||
{"foo/baz/.quux", true},
|
||||
{"foo/baz/zquux", true},
|
||||
{"foo/baz/quux", true},
|
||||
{"foo/bazz/quux", false},
|
||||
{"foo/bazz/q[abc]x", false},
|
||||
{"foo/bazz/qax", true},
|
||||
{"foo/bazz/q[abc]y", true},
|
||||
{filepath.Join("foo", "bar"), true},
|
||||
{filepath.Join("foofoo"), false},
|
||||
{filepath.Join("foo", "quux"), false},
|
||||
{filepath.Join("foo", "zuux"), true},
|
||||
{filepath.Join("foo", "qzuux"), false},
|
||||
{filepath.Join("foo", "baz", "t1"), false},
|
||||
{filepath.Join("foo", "baz", "t2"), true},
|
||||
{filepath.Join("foo", "baz", "bar"), true},
|
||||
{filepath.Join("foo", "baz", "quuxa"), false},
|
||||
{filepath.Join("foo", "baz", "aquux"), false},
|
||||
{filepath.Join("foo", "baz", ".quux"), true},
|
||||
{filepath.Join("foo", "baz", "zquux"), true},
|
||||
{filepath.Join("foo", "baz", "quux"), true},
|
||||
{filepath.Join("foo", "bazz", "quux"), false},
|
||||
{filepath.Join("foo", "bazz", "q[abc]x"), true},
|
||||
{filepath.Join("foo", "bazz", "qax"), true},
|
||||
{filepath.Join("foo", "bazz", "q[abc]y"), true},
|
||||
}
|
||||
|
||||
w := Walker{}
|
||||
for i, tc := range tests {
|
||||
if r := w.ignoreFile(patterns, tc.f); r != tc.r {
|
||||
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
|
||||
t.Errorf("Incorrect ignoreFile() #%d (%s); E: %v, A: %v", i, tc.f, tc.r, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user