Compare commits

...

44 Commits

Author SHA1 Message Date
Jakob Borg
921b90936b Revert "Use a monitor process to handle panics and restarts (fixes #586)"
This reverts commit 10f0713257.

Conflicts:
	cmd/syncthing/monitor.go
2014-09-06 06:50:38 +02:00
Jakob Borg
5b51f4d058 Revert "Proper signal handling in monitor process"
This reverts commit 33e9a88b56.
2014-09-06 06:49:51 +02:00
Jakob Borg
97cb3fa5a5 Translation update (add Catalan) 2014-09-05 14:24:20 +02:00
Jakob Borg
b5368db704 Update assets 2014-09-05 13:26:17 +02:00
Jakob Borg
8c442b72f3 Merge remote-tracking branch 'origin/pr/634'
* origin/pr/634:
  Removed unused `optionEditor` directive from app.js
  Removed unused `clean` filter from app.js.
  Removed unused `shortPath` filter from app.js.
  Removed  unused `short` filter from app.js.
2014-09-05 13:25:53 +02:00
Jakob Borg
f8f6791d39 Add pyfisch 2014-09-05 13:25:40 +02:00
Pyfisch
0c09f077aa Removed unused optionEditor directive from app.js 2014-09-05 12:42:52 +02:00
Pyfisch
af2831d7b6 Removed unused clean filter from app.js. 2014-09-05 12:40:45 +02:00
Pyfisch
64d5d4aec7 Removed unused shortPath filter from app.js. 2014-09-05 12:39:35 +02:00
Pyfisch
619a6b2adb Removed unused short filter from app.js. 2014-09-05 12:38:21 +02:00
Jakob Borg
33a26bc0cf Merge pull request #631 from AudriusButkevicius/upnp
Check if we had successfully acquired a UPnP mapping before (fixes #627)
2014-09-05 09:09:23 +02:00
Audrius Butkevicius
b445a7c4d3 Check if we had successfully acquired a UPnP mapping before (fixes #627) 2014-09-04 23:02:10 +01:00
Jakob Borg
e6892d0c3e Autogen warning in lang dir 2014-09-04 23:37:23 +02:00
Jakob Borg
33e9a88b56 Proper signal handling in monitor process 2014-09-04 23:31:22 +02:00
Jakob Borg
df00a2251e Pesky copyright is pesky 2014-09-04 22:33:01 +02:00
Jakob Borg
92c44c8abe Rework .stignore functionality (fixes #561) (...)
- Only one .stignore is supported, at the repo root
 - Negative patterns (!) are supported
 - Ignore patterns affect sent and received indexes, not only scanning
2014-09-04 22:30:42 +02:00
Jakob Borg
8e4f7bbd3e Merge pull request #626 from alex2108/master
staggered versioner: count directories as files (fixes #607)
2014-09-04 21:59:38 +02:00
Jakob Borg
a40217cf07 Trim dead bits of code 2014-09-04 22:07:59 +02:00
Jakob Borg
e586fda5f2 Woops, close the right fd 2014-09-04 22:03:25 +02:00
Alexander Graf
a58564ff88 count directories as files (fixes #607) 2014-09-04 16:48:24 +02:00
Jakob Borg
89885b9fb9 Clean up GUI directory 2014-09-04 08:53:28 +02:00
Jakob Borg
5c7d977ae0 Use woff instead of ttf font 2014-09-04 08:47:23 +02:00
Jakob Borg
2cd3ee9698 Dead code cleanup 2014-09-04 08:39:39 +02:00
Jakob Borg
dd3080e018 Copyright cleanup 2014-09-04 08:31:38 +02:00
Jakob Borg
5915e8e86a Don't trust mime.TypeByExtension for the easy stuff (fixes #598) 2014-09-04 08:26:12 +02:00
Jakob Borg
3c67c06654 Merge pull request #619 from marcindziadus/sorting-order
Change sorting order (fix #618)
2014-09-03 23:26:20 +02:00
Marcin
76232ca573 change sorting order 2014-09-03 18:41:45 +02:00
Jakob Borg
5235e82bda Limit number of open db files (fixes #587) 2014-09-02 14:47:36 +02:00
Jakob Borg
10f0713257 Use a monitor process to handle panics and restarts (fixes #586) 2014-09-02 13:24:41 +02:00
Jakob Borg
e9c7970ea4 Only create assets map on demand 2014-09-02 13:07:33 +02:00
Jakob Borg
1a6ac4aeb1 Integration tests should use v4 localhost 2014-09-02 12:10:18 +02:00
Jakob Borg
f633bdddf0 Update goleveldb 2014-09-02 09:44:07 +02:00
Jakob Borg
de0b91d157 Show IPv6 GUI URL correctly 2014-09-01 20:04:22 +02:00
Jakob Borg
2e77e498f5 Use more compact base64 encoding for assets 2014-09-01 20:04:22 +02:00
Jakob Borg
4ac67eb1f9 Merge pull request #589 from AudriusButkevicius/include
Add #include directive to .stignore (fixes #424)
2014-09-01 18:08:53 +02:00
Jakob Borg
2b536de37f Don't fake indexes for stopped repos 2014-09-01 17:48:39 +02:00
Jakob Borg
2ffa92ba1b Warn on startup for stopped repositories 2014-09-01 17:47:18 +02:00
Jakob Borg
6ecddd8388 Don't fail build on Solaris 2014-09-01 17:26:28 +02:00
Jakob Borg
bd2772ea4c If all instances of the global version is invalid, the file should not be on the need list 2014-09-01 09:07:51 +02:00
Audrius Butkevicius
92bf79d53b Fix tests 2014-08-31 22:34:13 +01:00
Audrius Butkevicius
eebe0eeb71 Handle recursive includes 2014-08-31 22:33:49 +01:00
Jakob Borg
c2daedbd11 Try not to crash the box with failing tests 2014-08-31 15:36:05 +01:00
Jakob Borg
7c604beb73 Test cases for ignore #include 2014-08-31 15:35:48 +01:00
Audrius Butkevicius
8c42aea827 Add #include directive to .stignore (fixes #424)
Though breaks #502 in a way, as .stignore is not the only place where
stuff gets defined anymore.

Though it never was, as .stignore can be placed in each dir, but I think we
should phase that out in favor of globbing which means that we can then
have a single file, which means that we can have a UI for editing that.

Alternative would be as suggested to include a .stglobalignore which is then synced
as a normal file, but gets included by default.

Then when the UI would have two editors, a local ignore, and a global ignore.
2014-08-31 15:32:22 +01:00
119 changed files with 1631 additions and 990 deletions

View File

@@ -9,6 +9,7 @@ Gilli Sigurdsson <gilli@vx.is>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
Marcin Dziadus <dziadus.marcin@gmail.com>
Michael Tilli <pyfisch@gmail.com>
Philippe Schommers <philippe@schommers.be>
Ryan Sullivan <kayoticsully@gmail.com>
Tully Robinson <tully@tojr.org>

2
Godeps/Godeps.json generated
View File

@@ -49,7 +49,7 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "59d87758aeaab5ab6ed289c773349500228a1557"
"Rev": "2b99e8d4757bf06eeab1b0485d80b8ae1c088874"
},
{
"ImportPath": "github.com/vitrun/qart/coding",

View File

@@ -40,10 +40,21 @@ type Cache interface {
// Size returns entire alive cache objects size.
Size() int
// NumObjects returns number of alive objects.
NumObjects() int
// GetNamespace gets cache namespace with the given id.
// GetNamespace is never return nil.
GetNamespace(id uint64) Namespace
// PurgeNamespace purges cache namespace with the given id from this cache tree.
// Also read Namespace.Purge.
PurgeNamespace(id uint64, fin PurgeFin)
// ZapNamespace detaches cache namespace with the given id from this cache tree.
// Also read Namespace.Zap.
ZapNamespace(id uint64)
// Purge purges all cache namespace from this cache tree.
// This is behave the same as calling Namespace.Purge method on all cache namespace.
Purge(fin PurgeFin)

View File

@@ -15,11 +15,11 @@ import (
// lruCache represent a LRU cache state.
type lruCache struct {
mu sync.Mutex
recent lruNode
table map[uint64]*lruNs
capacity int
used, size int
mu sync.Mutex
recent lruNode
table map[uint64]*lruNs
capacity int
used, size, alive int
}
// NewLRUCache creates a new initialized LRU cache with the given capacity.
@@ -51,6 +51,12 @@ func (c *lruCache) Size() int {
return c.size
}
func (c *lruCache) NumObjects() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.alive
}
// SetCapacity set cache capacity.
func (c *lruCache) SetCapacity(capacity int) {
c.mu.Lock()
@@ -77,6 +83,23 @@ func (c *lruCache) GetNamespace(id uint64) Namespace {
return ns
}
func (c *lruCache) ZapNamespace(id uint64) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.zapNB()
delete(c.table, id)
}
c.mu.Unlock()
}
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.purgeNB(fin)
}
c.mu.Unlock()
}
// Purge purge entire cache.
func (c *lruCache) Purge(fin PurgeFin) {
c.mu.Lock()
@@ -158,11 +181,12 @@ func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
}
ns.table[key] = node
ns.lru.size += charge
ns.lru.alive++
if charge > 0 {
node.ref++
node.rInsert(&ns.lru.recent)
ns.lru.used += charge
ns.lru.size += charge
ns.lru.evict()
}
}
@@ -322,8 +346,10 @@ func (n *lruNode) derefNB() {
// Remove elemement.
delete(n.ns.table, n.key)
n.ns.lru.size -= n.charge
n.ns.lru.alive--
n.fin()
}
n.value = nil
} else if n.ref < 0 {
panic("leveldb/cache: lruCache: negative node reference")
}

View File

@@ -14,6 +14,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -35,7 +36,7 @@ type DB struct {
// MemDB.
memMu sync.RWMutex
memPool *util.Pool
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
@@ -47,6 +48,9 @@ type DB struct {
snapsMu sync.Mutex
snapsRoot snapshotElement
// Stats.
aliveSnaps, aliveIters int32
// Write.
writeC chan *Batch
writeMergedC chan bool
@@ -80,7 +84,7 @@ func openDB(s *session) (*DB, error) {
// Initial sequence
seq: s.stSeq,
// MemDB
memPool: util.NewPool(1),
memPool: make(chan *memdb.DB, 1),
// Write
writeC: make(chan *Batch),
writeMergedC: make(chan bool),
@@ -122,6 +126,7 @@ func openDB(s *session) (*DB, error) {
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
go db.mpoolDrain()
s.logf("db@open done T·%v", time.Since(start))
@@ -568,7 +573,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
}
defer m.decref()
mk, mv, me := m.db.Find(ikey)
mk, mv, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, t, ok := parseIkey(mk)
if ok && db.s.icmp.uCompare(ukey, key) == 0 {
@@ -657,6 +662,14 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
// Returns sstables list for each level.
// leveldb.blockpool
// Returns block pool stats.
// leveldb.cachedblock
// Returns size of cached block.
// leveldb.openedtables
// Returns number of opened tables.
// leveldb.alivesnaps
// Returns number of alive snapshots.
// leveldb.aliveiters
// Returns number of alive iterators.
func (db *DB) GetProperty(name string) (value string, err error) {
err = db.ok()
if err != nil {
@@ -712,6 +725,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
}
case p == "openedtables":
value = fmt.Sprintf("%d", db.s.tops.cache.Size())
case p == "alivesnaps":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}

View File

@@ -221,10 +221,10 @@ func (db *DB) memCompaction() {
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.db.Len(), shortenb(mem.db.Size()))
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
if mem.db.Len() == 0 {
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
db.dropFrozenMem()
@@ -242,7 +242,7 @@ func (db *DB) memCompaction() {
db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.flush(mem.db, -1)
return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range c.rec.addedTables {
db.logf("mem@flush rollback @%d", r.num)

View File

@@ -10,6 +10,7 @@ import (
"errors"
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -38,11 +39,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
emi := em.db.NewIterator(slice)
emi := em.mdb.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
i = append(i, emi)
if fm != nil {
fmi := fm.db.NewIterator(slice)
fmi := fm.mdb.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
i = append(i, fmi)
}
@@ -66,6 +67,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
}
rawIter := db.newRawIterator(islice, ro)
iter := &dbIter{
db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
@@ -73,6 +75,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
key: make([]byte, 0),
value: make([]byte, 0),
}
atomic.AddInt32(&db.aliveIters, 1)
runtime.SetFinalizer(iter, (*dbIter).Release)
return iter
}
@@ -89,6 +92,7 @@ const (
// dbIter represent an interator states over a database session.
type dbIter struct {
db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
@@ -303,6 +307,7 @@ func (i *dbIter) Release() {
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
}
i.dir = dirReleased
@@ -310,6 +315,8 @@ func (i *dbIter) Release() {
i.value = nil
i.iter.Release()
i.iter = nil
atomic.AddInt32(&i.db.aliveIters, -1)
i.db = nil
}
}

View File

@@ -9,6 +9,7 @@ package leveldb
import (
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -81,7 +82,7 @@ func (db *DB) minSeq() uint64 {
type Snapshot struct {
db *DB
elem *snapshotElement
mu sync.Mutex
mu sync.RWMutex
released bool
}
@@ -91,6 +92,7 @@ func (db *DB) newSnapshot() *Snapshot {
db: db,
elem: db.acquireSnapshot(),
}
atomic.AddInt32(&db.aliveSnaps, 1)
runtime.SetFinalizer(snap, (*Snapshot).Release)
return snap
}
@@ -105,8 +107,8 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
if err != nil {
return
}
snap.mu.Lock()
defer snap.mu.Unlock()
snap.mu.RLock()
defer snap.mu.RUnlock()
if snap.released {
err = ErrSnapshotReleased
return
@@ -160,6 +162,7 @@ func (snap *Snapshot) Release() {
snap.released = true
snap.db.releaseSnapshot(snap.elem)
atomic.AddInt32(&snap.db.aliveSnaps, -1)
snap.db = nil
snap.elem = nil
}

View File

@@ -8,16 +8,16 @@ package leveldb
import (
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/util"
)
type memDB struct {
pool *util.Pool
db *memdb.DB
ref int32
db *DB
mdb *memdb.DB
ref int32
}
func (m *memDB) incref() {
@@ -26,7 +26,13 @@ func (m *memDB) incref() {
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
m.pool.Put(m)
// Only put back memdb with std capacity.
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
m.mdb.Reset()
m.db.mpoolPut(m.mdb)
}
m.db = nil
m.mdb = nil
} else if ref < 0 {
panic("negative memdb ref")
}
@@ -42,6 +48,41 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()
}()
select {
case db.memPool <- mem:
default:
}
}
func (db *DB) mpoolGet() *memdb.DB {
select {
case mem := <-db.memPool:
return mem
default:
return nil
}
}
func (db *DB) mpoolDrain() {
ticker := time.NewTicker(30 * time.Second)
for {
select {
case <-ticker.C:
select {
case <-db.memPool:
default:
}
case _, _ = <-db.closeC:
close(db.memPool)
return
}
}
}
// Create new memdb and froze the old one; need external synchronization.
// newMem only called synchronously by the writer.
func (db *DB) newMem(n int) (mem *memDB, err error) {
@@ -70,18 +111,15 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
db.journalWriter = w
db.journalFile = file
db.frozenMem = db.mem
mem, ok := db.memPool.Get().(*memDB)
if ok && mem.db.Capacity() >= n {
mem.db.Reset()
mem.incref()
} else {
mem = &memDB{
pool: db.memPool,
db: memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)),
ref: 1,
}
mdb := db.mpoolGet()
if mdb == nil || mdb.Capacity() < n {
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
}
mem = &memDB{
db: db,
mdb: mdb,
ref: 2,
}
mem.incref()
db.mem = mem
// The seq only incremented by the writer. And whoever called newMem
// should hold write lock, so no need additional synchronization here.

View File

@@ -1577,7 +1577,11 @@ func TestDb_BloomFilter(t *testing.T) {
return fmt.Sprintf("key%06d", i)
}
n := 10000
const (
n = 10000
indexOverheat = 19898
filterOverheat = 19799
)
// Populate multiple layers
for i := 0; i < n; i++ {
@@ -1601,7 +1605,7 @@ func TestDb_BloomFilter(t *testing.T) {
cnt := int(h.stor.ReadCounter())
t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
if min, max := n, n+2*n/100; cnt < min || cnt > max {
if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max {
t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
}
@@ -1612,7 +1616,7 @@ func TestDb_BloomFilter(t *testing.T) {
}
cnt = int(h.stor.ReadCounter())
t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
if max := 3 * n / 100; cnt > max {
if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max {
t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
}

View File

@@ -75,7 +75,7 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
mem = nil
}
}()
nn = mem.db.Free()
nn = mem.mdb.Free()
switch {
case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
delayed = true
@@ -90,13 +90,13 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
}
default:
// Allow memdb to grow if it has no entry.
if mem.db.Len() == 0 {
if mem.mdb.Len() == 0 {
nn = n
} else {
mem.decref()
mem, err = db.rotateMem(n)
if err == nil {
nn = mem.db.Free()
nn = mem.mdb.Free()
} else {
nn = 0
}
@@ -190,7 +190,7 @@ drain:
return
case db.journalC <- b:
// Write into memdb
b.memReplay(mem.db)
b.memReplay(mem.mdb)
}
// Wait for journal writer
select {
@@ -200,7 +200,7 @@ drain:
case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
b.revertMemReplay(mem.db)
b.revertMemReplay(mem.mdb)
return
}
}
@@ -209,7 +209,7 @@ drain:
if err != nil {
return
}
b.memReplay(mem.db)
b.memReplay(mem.mdb)
}
// Set last seq number.
@@ -271,7 +271,7 @@ func (db *DB) CompactRange(r util.Range) error {
// Check for overlaps in memdb.
mem := db.getEffectiveMem()
defer mem.decref()
if isMemOverlaps(db.s.icmp, mem.db, r.Start, r.Limit) {
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0); err != nil {
<-db.writeLockC

View File

@@ -30,13 +30,16 @@ const (
type noCache struct{}
func (noCache) SetCapacity(capacity int) {}
func (noCache) Capacity() int { return 0 }
func (noCache) Used() int { return 0 }
func (noCache) Size() int { return 0 }
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
func (noCache) Purge(fin cache.PurgeFin) {}
func (noCache) Zap() {}
func (noCache) SetCapacity(capacity int) {}
func (noCache) Capacity() int { return 0 }
func (noCache) Used() int { return 0 }
func (noCache) Size() int { return 0 }
func (noCache) NumObjects() int { return 0 }
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
func (noCache) ZapNamespace(id uint64) {}
func (noCache) Purge(fin cache.PurgeFin) {}
func (noCache) Zap() {}
var NoCache cache.Cache = noCache{}

View File

@@ -7,7 +7,6 @@
package leveldb
import (
"io"
"sort"
"sync/atomic"
@@ -323,15 +322,6 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
return
}
type tableWrapper struct {
*table.Reader
closer io.Closer
}
func (tr tableWrapper) Release() {
tr.closer.Close()
}
// Opens table. It returns a cache handle, which should
// be released after use.
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
@@ -347,7 +337,7 @@ func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
if bc := t.s.o.GetBlockCache(); bc != nil {
bcacheNS = bc.GetNamespace(num)
}
return 1, tableWrapper{table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o), r}
return 1, table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o)
})
if ch == nil && err == nil {
err = ErrClosed
@@ -363,7 +353,7 @@ func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []b
return nil, nil, err
}
defer ch.Release()
return ch.Value().(tableWrapper).Find(key, ro)
return ch.Value().(*table.Reader).Find(key, ro)
}
// Returns approximate offset of the given key.
@@ -372,10 +362,9 @@ func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
if err != nil {
return
}
_offset, err := ch.Value().(tableWrapper).OffsetOf(key)
offset = uint64(_offset)
ch.Release()
return
defer ch.Release()
offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
return uint64(offset_), err
}
// Creates an iterator from the given table.
@@ -384,7 +373,7 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
if err != nil {
return iterator.NewEmptyIterator(err)
}
iter := ch.Value().(tableWrapper).NewIterator(slice, ro)
iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
iter.SetReleaser(ch)
return iter
}
@@ -401,7 +390,7 @@ func (t *tOps) remove(f *tFile) {
t.s.logf("table@remove removed @%d", num)
}
if bc := t.s.o.GetBlockCache(); bc != nil {
bc.GetNamespace(num).Zap()
bc.ZapNamespace(num)
}
}
})
@@ -411,6 +400,7 @@ func (t *tOps) remove(f *tFile) {
// regadless still used or not.
func (t *tOps) close() {
t.cache.Zap()
t.bpool.Close()
}
// Creates new initialized table ops instance.

View File

@@ -40,7 +40,7 @@ var _ = testutil.Defer(func() {
data := bw.buf.Bytes()
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
return &block{
cmp: comparer.DefaultComparer,
tr: &Reader{cmp: comparer.DefaultComparer},
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,

View File

@@ -37,8 +37,7 @@ func max(x, y int) int {
}
type block struct {
bpool *util.BufferPool
cmp comparer.BasicComparer
tr *Reader
data []byte
restartsLen int
restartsOffset int
@@ -47,31 +46,25 @@ type block struct {
}
func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
n := b.restartsOffset
data := b.data
cmp := b.cmp
index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
offset := int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):]))
offset += 1 // shared always zero, since this is a restart point
v1, n1 := binary.Uvarint(data[offset:]) // key length
_, n2 := binary.Uvarint(data[offset+n1:]) // value length
offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
offset += 1 // shared always zero, since this is a restart point
v1, n1 := binary.Uvarint(b.data[offset:]) // key length
_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
m := offset + n1 + n2
return cmp.Compare(data[m:m+int(v1)], key) > 0
return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0
}) + rstart - 1
if index < rstart {
// The smallest key is greater-than key sought.
index = rstart
}
offset = int(binary.LittleEndian.Uint32(data[n+4*index:]))
offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
return
}
func (b *block) restartIndex(rstart, rlimit, offset int) int {
n := b.restartsOffset
data := b.data
return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
return int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) > offset
return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
}) + rstart - 1
}
@@ -141,10 +134,10 @@ func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releas
}
func (b *block) Release() {
if b.bpool != nil {
b.bpool.Put(b.data)
b.bpool = nil
if b.tr.bpool != nil {
b.tr.bpool.Put(b.data)
}
b.tr = nil
b.data = nil
}
@@ -270,7 +263,7 @@ func (i *blockIter) Seek(key []byte) bool {
i.dir = dirForward
}
for i.Next() {
if i.block.cmp.Compare(i.key, key) >= 0 {
if i.block.tr.cmp.Compare(i.key, key) >= 0 {
return true
}
}
@@ -479,7 +472,7 @@ func (i *blockIter) Error() error {
}
type filterBlock struct {
filter filter.Filter
tr *Reader
data []byte
oOffset int
baseLg uint
@@ -493,7 +486,7 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
n := int(binary.LittleEndian.Uint32(o))
m := int(binary.LittleEndian.Uint32(o[4:]))
if n < m && m <= b.oOffset {
return b.filter.Contains(b.data[n:m], key)
return b.tr.filter.Contains(b.data[n:m], key)
} else if n == m {
return false
}
@@ -501,10 +494,17 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
return true
}
func (b *filterBlock) Release() {
if b.tr.bpool != nil {
b.tr.bpool.Put(b.data)
}
b.tr = nil
b.data = nil
}
type indexIter struct {
blockIter
tableReader *Reader
slice *util.Range
*blockIter
slice *util.Range
// Options
checksum bool
fillCache bool
@@ -523,7 +523,7 @@ func (i *indexIter) Get() iterator.Iterator {
if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
slice = i.slice
}
return i.tableReader.getDataIter(dataBH, slice, i.checksum, i.fillCache)
return i.blockIter.block.tr.getDataIter(dataBH, slice, i.checksum, i.fillCache)
}
// Reader is a table reader.
@@ -538,9 +538,8 @@ type Reader struct {
checksum bool
strictIter bool
dataEnd int64
indexBlock *block
filterBlock *filterBlock
dataEnd int64
indexBH, filterBH blockHandle
}
func verifyChecksum(data []byte) bool {
@@ -557,6 +556,7 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
}
if checksum || r.checksum {
if !verifyChecksum(data) {
r.bpool.Put(data)
return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
}
}
@@ -575,6 +575,7 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
return nil, err
}
default:
r.bpool.Put(data)
return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
}
return data, nil
@@ -587,7 +588,7 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
}
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
b := &block{
cmp: r.cmp,
tr: r,
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,
@@ -596,7 +597,44 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
return b, nil
}
func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterBlock, error) {
func (r *Reader) readBlockCached(bh blockHandle, checksum, fillCache bool) (*block, util.Releaser, error) {
if r.cache != nil {
var err error
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return 0, nil
}
var b *block
b, err = r.readBlock(bh, checksum)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
if ch != nil {
b, ok := ch.Value().(*block)
if !ok {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type")
}
if !b.checksum && (r.checksum || checksum) {
if !verifyChecksum(b.data) {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
}
b.checksum = true
}
return b, ch, err
} else if err != nil {
return nil, nil, err
}
}
b, err := r.readBlock(bh, checksum)
return b, b, err
}
func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
data, err := r.readRawBlock(bh, true)
if err != nil {
return nil, err
@@ -611,7 +649,7 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
}
b := &filterBlock{
filter: filter,
tr: r,
data: data,
oOffset: oOffset,
baseLg: uint(data[n-1]),
@@ -620,42 +658,42 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return b, nil
}
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
if r.cache != nil {
// Get/set block cache.
var err error
cache := r.cache.Get(dataBH.offset, func() (charge int, value interface{}) {
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return 0, nil
}
var dataBlock *block
dataBlock, err = r.readBlock(dataBH, checksum)
var b *filterBlock
b, err = r.readFilterBlock(bh)
if err != nil {
return 0, nil
}
return int(dataBH.length), dataBlock
return cap(b.data), b
})
if err != nil {
return iterator.NewEmptyIterator(err)
}
if cache != nil {
dataBlock := cache.Value().(*block)
if !dataBlock.checksum && (r.checksum || checksum) {
if !verifyChecksum(dataBlock.data) {
return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid block (checksum mismatch)"))
}
dataBlock.checksum = true
if ch != nil {
b, ok := ch.Value().(*filterBlock)
if !ok {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type")
}
iter := dataBlock.newIterator(slice, false, cache)
return iter
return b, ch, err
} else if err != nil {
return nil, nil, err
}
}
dataBlock, err := r.readBlock(dataBH, checksum)
b, err := r.readFilterBlock(bh)
return b, b, err
}
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
b, rel, err := r.readBlockCached(dataBH, checksum, fillCache)
if err != nil {
return iterator.NewEmptyIterator(err)
}
iter := dataBlock.newIterator(slice, false, dataBlock)
return iter
return b.newIterator(slice, false, rel)
}
// NewIterator creates an iterator from the table.
@@ -669,18 +707,21 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
// when not used.
//
// Also read Iterator documentation of the leveldb/iterator package.
func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
if r.err != nil {
return iterator.NewEmptyIterator(r.err)
}
fillCache := !ro.GetDontFillCache()
b, rel, err := r.readBlockCached(r.indexBH, true, fillCache)
if err != nil {
return iterator.NewEmptyIterator(err)
}
index := &indexIter{
blockIter: *r.indexBlock.newIterator(slice, true, nil),
tableReader: r,
slice: slice,
checksum: ro.GetStrict(opt.StrictBlockChecksum),
fillCache: !ro.GetDontFillCache(),
blockIter: b.newIterator(slice, true, rel),
slice: slice,
checksum: ro.GetStrict(opt.StrictBlockChecksum),
fillCache: !ro.GetDontFillCache(),
}
return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false)
}
@@ -697,7 +738,13 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
return
}
index := r.indexBlock.newIterator(nil, true, nil)
indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
if err != nil {
return
}
defer rel.Release()
index := indexBlock.newIterator(nil, true, nil)
defer index.Release()
if !index.Seek(key) {
err = index.Error()
@@ -711,9 +758,15 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
return
}
if r.filterBlock != nil && !r.filterBlock.contains(dataBH.offset, key) {
err = ErrNotFound
return
if r.filter != nil {
filterBlock, rel, ferr := r.readFilterBlockCached(r.filterBH, true)
if ferr == nil {
if !filterBlock.contains(dataBH.offset, key) {
rel.Release()
return nil, nil, ErrNotFound
}
rel.Release()
}
}
data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache())
defer data.Release()
@@ -760,7 +813,13 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
return
}
index := r.indexBlock.newIterator(nil, true, nil)
indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
if err != nil {
return
}
defer rel.Release()
index := indexBlock.newIterator(nil, true, nil)
defer index.Release()
if index.Seek(key) {
dataBH, n := decodeBlockHandle(index.Value())
@@ -778,6 +837,17 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
return
}
// Release implements util.Releaser.
// It also close the file if it is an io.Closer.
func (r *Reader) Release() {
if closer, ok := r.reader.(io.Closer); ok {
closer.Close()
}
r.reader = nil
r.cache = nil
r.bpool = nil
}
// NewReader creates a new initialized table reader for the file.
// The cache and bpool is optional and can be nil.
//
@@ -817,16 +887,11 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.Buf
return r
}
// Decode the index block handle.
indexBH, n := decodeBlockHandle(footer[n:])
r.indexBH, n = decodeBlockHandle(footer[n:])
if n == 0 {
r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)")
return r
}
// Read index block.
r.indexBlock, r.err = r.readBlock(indexBH, true)
if r.err != nil {
return r
}
// Read metaindex block.
metaBlock, err := r.readBlock(metaBH, true)
if err != nil {
@@ -842,32 +907,28 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.Buf
continue
}
fn := key[7:]
var filter filter.Filter
if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
filter = f0
r.filter = f0
} else {
for _, f0 := range o.GetAltFilters() {
if f0.Name() == fn {
filter = f0
r.filter = f0
break
}
}
}
if filter != nil {
if r.filter != nil {
filterBH, n := decodeBlockHandle(metaIter.Value())
if n == 0 {
continue
}
r.filterBH = filterBH
// Update data end.
r.dataEnd = int64(filterBH.offset)
filterBlock, err := r.readFilterBlock(filterBH, filter)
if err != nil {
continue
}
r.filterBlock = filterBlock
break
}
}
metaIter.Release()
metaBlock.Release()
return r
}

View File

@@ -111,7 +111,9 @@ var _ = testutil.Defer(func() {
testutil.AllKeyValueTesting(nil, Build)
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
It("should have correct blocks number", func() {
Expect(r.indexBlock.restartsLen).Should(Equal(9))
indexBlock, err := r.readBlock(r.indexBH, true)
Expect(err).To(BeNil())
Expect(indexBlock.restartsLen).Should(Equal(9))
})
}))
})

View File

@@ -19,15 +19,21 @@ type buffer struct {
// BufferPool is a 'buffer pool'.
type BufferPool struct {
pool [4]chan []byte
size [3]uint32
sizeMiss [3]uint32
baseline0 int
baseline1 int
baseline2 int
pool [6]chan []byte
size [5]uint32
sizeMiss [5]uint32
sizeHalf [5]uint32
baseline [4]int
baselinex0 int
baselinex1 int
baseline0 int
baseline1 int
baseline2 int
close chan struct{}
get uint32
put uint32
half uint32
less uint32
equal uint32
greater uint32
@@ -35,16 +41,15 @@ type BufferPool struct {
}
func (p *BufferPool) poolNum(n int) int {
switch {
case n <= p.baseline0:
if n <= p.baseline0 && n > p.baseline0/2 {
return 0
case n <= p.baseline1:
return 1
case n <= p.baseline2:
return 2
default:
return 3
}
for i, x := range p.baseline {
if n <= x {
return i + 1
}
}
return len(p.baseline) + 1
}
// Get returns buffer with length of n.
@@ -59,13 +64,22 @@ func (p *BufferPool) Get(n int) []byte {
case b := <-pool:
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
select {
case pool <- b:
default:
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
panic("not reached")
atomic.AddUint32(&p.greater, 1)
}
default:
atomic.AddUint32(&p.miss, 1)
@@ -79,8 +93,23 @@ func (p *BufferPool) Get(n int) []byte {
case b := <-pool:
switch {
case cap(b) > n:
atomic.AddUint32(&p.less, 1)
return b[:n]
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
sizeHalfPtr := &p.sizeHalf[poolNum-1]
if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
atomic.StoreUint32(sizeHalfPtr, 0)
} else {
select {
case pool <- b:
default:
}
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
@@ -126,20 +155,34 @@ func (p *BufferPool) Put(b []byte) {
}
func (p *BufferPool) Close() {
select {
case p.close <- struct{}{}:
default:
}
}
func (p *BufferPool) String() string {
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v G·%d P·%d <·%d =·%d >·%d M·%d}",
p.baseline0, p.size, p.sizeMiss, p.get, p.put, p.less, p.equal, p.greater, p.miss)
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
}
func (p *BufferPool) drain() {
ticker := time.NewTicker(2 * time.Second)
for {
time.Sleep(1 * time.Second)
select {
case <-p.pool[0]:
case <-p.pool[1]:
case <-p.pool[2]:
case <-p.pool[3]:
default:
case <-ticker.C:
for _, ch := range p.pool {
select {
case <-ch:
default:
}
}
case <-p.close:
for _, ch := range p.pool {
close(ch)
}
return
}
}
}
@@ -151,10 +194,10 @@ func NewBufferPool(baseline int) *BufferPool {
}
p := &BufferPool{
baseline0: baseline,
baseline1: baseline * 2,
baseline2: baseline * 4,
baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
close: make(chan struct{}, 1),
}
for i, cap := range []int{6, 6, 3, 1} {
for i, cap := range []int{2, 2, 4, 4, 2, 1} {
p.pool[i] = make(chan []byte, cap)
}
go p.drain()

View File

@@ -4,4 +4,20 @@
package auto_test
// Empty test file to generate 0% coverage rather than no coverage
import (
"bytes"
"testing"
"github.com/syncthing/syncthing/auto"
)
func TestAssets(t *testing.T) {
assets := auto.Assets()
idx, ok := assets["index.html"]
if !ok {
t.Fatal("No index.html in compiled in assets")
}
if !bytes.Contains(idx, []byte("<html")) {
t.Fatal("No html in index.html")
}
}

View File

@@ -1,2 +1,6 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package auto contains auto generated files for web assets.
package auto

View File

File diff suppressed because one or more lines are too long

View File

@@ -11,11 +11,6 @@ type recv struct {
src net.Addr
}
type dst struct {
intf string
conn *net.UDPConn
}
type Interface interface {
Send(data []byte)
Recv() ([]byte, net.Addr)

View File

@@ -9,7 +9,6 @@ import "net"
type Broadcast struct {
conn *net.UDPConn
port int
conns []dst
inbox chan []byte
outbox chan recv
}

View File

@@ -9,7 +9,6 @@ import "net"
type Multicast struct {
conn *net.UDPConn
addr *net.UDPAddr
conns []dst
inbox chan []byte
outbox chan recv
}

View File

@@ -163,7 +163,7 @@ func setup() {
}
func test(pkg string) {
runPrint("godep", "go", "test", pkg)
runPrint("godep", "go", "test", "-short", "-timeout", "10s", pkg)
}
func install(pkg string) {
@@ -243,20 +243,20 @@ func xdr() {
}
func translate() {
os.Chdir("gui")
runPipe("lang-en-new.json", "go", "run", "../cmd/translate/main.go", "lang-en.json", "index.html")
os.Chdir("gui/lang")
runPipe("lang-en-new.json", "go", "run", "../../cmd/translate/main.go", "lang-en.json", "../index.html")
os.Remove("lang-en.json")
err := os.Rename("lang-en-new.json", "lang-en.json")
if err != nil {
log.Fatal(err)
}
os.Chdir("..")
os.Chdir("../..")
}
func transifex() {
os.Chdir("gui")
runPrint("go", "run", "../cmd/transifexdl/main.go")
os.Chdir("..")
os.Chdir("gui/lang")
runPrint("go", "run", "../../cmd/transifexdl/main.go")
os.Chdir("../..")
assets()
}

View File

@@ -14,7 +14,16 @@ no-docs-typos() {
grep -v f1120d7aa936c0658429edef0037792520b46334
}
for email in $(missing-contribs) ; do
git log --author="$email" --format="%H %ae %s" | no-docs-typos
done
print-missing-contribs() {
for email in $(missing-contribs) ; do
git log --author="$email" --format="%H %ae %s" | no-docs-typos
done
}
print-missing-copyright() {
find . -name \*.go | xargs grep -L 'Copyright (C)' | grep -v Godeps
}
print-missing-contribs
print-missing-copyright

View File

@@ -9,8 +9,8 @@ package main
import (
"bytes"
"compress/gzip"
"encoding/base64"
"flag"
"fmt"
"go/format"
"io"
"os"
@@ -23,27 +23,27 @@ var tpl = template.Must(template.New("assets").Parse(`package auto
import (
"bytes"
"compress/gzip"
"encoding/hex"
"encoding/base64"
"io/ioutil"
)
var Assets = make(map[string][]byte)
func init() {
func Assets() map[string][]byte {
var assets = make(map[string][]byte, {{.assets | len}})
var bs []byte
var gr *gzip.Reader
{{range $asset := .assets}}
bs, _ = hex.DecodeString("{{$asset.HexData}}")
bs, _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
gr, _ = gzip.NewReader(bytes.NewBuffer(bs))
bs, _ = ioutil.ReadAll(gr)
Assets["{{$asset.Name}}"] = bs
assets["{{$asset.Name}}"] = bs
{{end}}
return assets
}
`))
type asset struct {
Name string
HexData string
Name string
Data string
}
var assets []asset
@@ -69,8 +69,8 @@ func walkerFor(basePath string) filepath.WalkFunc {
name, _ = filepath.Rel(basePath, name)
assets = append(assets, asset{
Name: filepath.ToSlash(name),
HexData: fmt.Sprintf("%x", buf.Bytes()),
Name: filepath.ToSlash(name),
Data: base64.StdEncoding.EncodeToString(buf.Bytes()),
})
}

View File

@@ -11,7 +11,6 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math/rand"
"mime"
"net"
@@ -45,7 +44,6 @@ var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
static func(http.ResponseWriter, *http.Request, *log.Logger)
apiKey string
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
@@ -257,7 +255,7 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
m.Override(repo)
go m.Override(repo)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
@@ -652,6 +650,8 @@ func validAPIKey(k string) bool {
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
@@ -672,13 +672,13 @@ func embeddedStatic(assetDir string) http.Handler {
}
}
bs, ok := auto.Assets[file]
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mime.TypeByExtension(filepath.Ext(r.URL.Path))
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
@@ -688,3 +688,28 @@ func embeddedStatic(assetDir string) http.Handler {
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -39,6 +39,7 @@ import (
"github.com/syncthing/syncthing/upgrade"
"github.com/syncthing/syncthing/upnp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
var (
@@ -389,7 +390,7 @@ func main() {
// If this is the first time the user runs v0.9, archive the old indexes and config.
archiveLegacyConfig()
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), nil)
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{MaxOpenFiles: 100})
if err != nil {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
@@ -420,6 +421,7 @@ nextRepo:
// that all files have been deleted which might not be the case,
// so mark it as invalid instead.
if err != nil || !fi.IsDir() {
l.Warnf("Stopping repository %q - directory missing, but has files in index", repo.ID)
cfg.Repositories[i].Invalid = "repo directory missing"
continue nextRepo
}
@@ -432,6 +434,7 @@ nextRepo:
if err != nil {
// If there was another error or we could not create the
// directory, the repository is invalid.
l.Warnf("Stopping repository %q - %v", err)
cfg.Repositories[i].Invalid = err.Error()
continue nextRepo
}
@@ -466,7 +469,7 @@ nextRepo:
proto = "https"
}
l.Infof("Starting web GUI on %s://%s:%d/", proto, hostShow, addr.Port)
l.Infof("Starting web GUI on %s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
err := startGUI(guiCfg, os.Getenv("STGUIASSETS"), m)
if err != nil {
l.Fatalln("Cannot start GUI:", err)
@@ -481,7 +484,13 @@ nextRepo:
// start needing a bunch of files which are nowhere to be found. This
// needs to be changed when we correctly do persistent indexes.
for _, repoCfg := range cfg.Repositories {
if repoCfg.Invalid != "" {
continue
}
for _, node := range repoCfg.NodeIDs() {
if node == myID {
continue
}
m.Index(node, repoCfg.ID, nil)
}
}
@@ -663,13 +672,16 @@ func renewUPnP(port int) {
}
// Just renew the same port that we already have
err = igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", cfg.Options.UPnPLease*60)
if err == nil {
l.Infoln("Renewed UPnP port mapping - external port", externalPort)
continue
if externalPort != 0 {
err = igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", cfg.Options.UPnPLease*60)
if err == nil {
l.Infoln("Renewed UPnP port mapping - external port", externalPort)
continue
}
}
// Something strange has happened. Perhaps the gateway has changed?
// Something strange has happened. We didn't have an external port before?
// Or perhaps the gateway has changed?
// Retry the same port sequence from the beginning.
r := setupExternalPort(igd, port)
if r != 0 {
@@ -725,7 +737,7 @@ func archiveLegacyConfig() {
l.Warnf("Cannot archive config:", err)
return
}
defer src.Close()
defer dst.Close()
l.Infoln("Archiving config.xml")
io.Copy(dst, src)

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build solaris
package main

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build freebsd
package main

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -2,7 +2,7 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows
// +build !solaris,!windows
package main

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -38,7 +38,6 @@ type Discoverer struct {
forcedBcastTick chan time.Time
extAnnounceOK bool
extAnnounceOKmut sync.Mutex
globalBcastStop chan bool
}
type cacheEntry struct {
@@ -50,11 +49,6 @@ var (
ErrIncorrectMagic = errors.New("incorrect magic number")
)
// We tolerate a certain amount of errors because we might be running on
// laptops that sleep and wake, have intermittent network connectivity, etc.
// When we hit this many errors in succession, we stop.
const maxErrors = 30
func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
return &Discoverer{
myID: id,

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import "code.google.com/p/go.text/unicode/norm"

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows,!darwin
package files

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import (
@@ -182,18 +186,28 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions.
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a node has
// marked a file as invalid, so handle that too.
var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version {
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
}
}
// Iterate both sides.
fsi++
@@ -276,7 +290,11 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
}
continue
}
@@ -285,11 +303,17 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
if err != nil {
panic(err)
}
if ef.Version != f.Version {
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if ef.Version != f.Version || ef.Flags != f.Flags {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
}
}
}
@@ -381,7 +405,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
gk := globalKey(repo, file)
svl, err := db.Get(gk, nil)
if err != nil {
panic(err)
// We might be called to "remove" a global version that doesn't exist
// if the first update for the file is already marked invalid.
return
}
var fl versionList
@@ -591,6 +617,7 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
outer:
for dbi.Next() {
var vl versionList
err := vl.UnmarshalXDR(dbi.Value())
@@ -616,28 +643,41 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterat
if need || !have {
name := globalKeyName(dbi.Key())
fk := nodeKey(repo, vl.versions[0].node, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
}
needVersion := vl.versions[0].version
inner:
for i := range vl.versions {
if vl.versions[i].version != needVersion {
// We haven't found a valid copy of the file with the needed version.
continue outer
}
fk := nodeKey(repo, vl.versions[i].node, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
}
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
continue
}
if gf.IsInvalid() {
// The file is marked invalid for whatever reason, don't use it.
continue inner
}
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
continue outer
}
if cont := fn(gf); !cont {
return
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
}
if cont := fn(gf); !cont {
return
}
}
}
}

View File

@@ -18,10 +18,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var remoteNode protocol.NodeID
var remoteNode0, remoteNode1 protocol.NodeID
func init() {
remoteNode, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode0, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode1, _ = protocol.NodeIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
}
func genBlocks(n int) []protocol.BlockInfo {
@@ -81,6 +82,16 @@ func (l fileList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l fileList) String() string {
var b bytes.Buffer
b.WriteString("[]protocol.FileList{\n")
for _, f := range l {
fmt.Fprintf(&b, " %q: #%d, %d bytes, %d blocks, flags=%o\n", f.Name, f.Version, f.Size(), len(f.Blocks), f.Flags)
}
b.WriteString("}")
return b.String()
}
func TestGlobalSet(t *testing.T) {
lamport.Default = lamport.Clock{}
@@ -91,20 +102,20 @@ func TestGlobalSet(t *testing.T) {
m := files.NewSet("test", db)
local0 := []protocol.FileInfo{
local0 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "z", Version: 1000, Blocks: genBlocks(8)},
}
local1 := []protocol.FileInfo{
local1 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
}
localTot := []protocol.FileInfo{
localTot := fileList{
local0[0],
local0[1],
local0[2],
@@ -112,76 +123,76 @@ func TestGlobalSet(t *testing.T) {
protocol.FileInfo{Name: "z", Version: 1001, Flags: protocol.FlagDeleted},
}
remote0 := []protocol.FileInfo{
remote0 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5)},
}
remote1 := []protocol.FileInfo{
remote1 := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(6)},
protocol.FileInfo{Name: "e", Version: 1000, Blocks: genBlocks(7)},
}
remoteTot := []protocol.FileInfo{
remoteTot := fileList{
remote0[0],
remote1[0],
remote0[2],
remote1[1],
}
expectedGlobal := []protocol.FileInfo{
remote0[0],
remote1[0],
remote0[2],
localTot[3],
remote1[1],
localTot[4],
expectedGlobal := fileList{
remote0[0], // a
remote1[0], // b
remote0[2], // c
localTot[3], // d
remote1[1], // e
localTot[4], // z
}
expectedLocalNeed := []protocol.FileInfo{
expectedLocalNeed := fileList{
remote1[0],
remote0[2],
remote1[1],
}
expectedRemoteNeed := []protocol.FileInfo{
expectedRemoteNeed := fileList{
local0[3],
}
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.Replace(remoteNode, remote0)
m.Update(remoteNode, remote1)
m.Replace(remoteNode0, remote0)
m.Update(remoteNode0, remote1)
g := globalList(m)
sort.Sort(fileList(g))
g := fileList(globalList(m))
sort.Sort(g)
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
}
h := haveList(m, protocol.LocalNodeID)
sort.Sort(fileList(h))
h := fileList(haveList(m, protocol.LocalNodeID))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(localTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
}
h = haveList(m, remoteNode)
sort.Sort(fileList(h))
h = fileList(haveList(m, remoteNode0))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
}
n := needList(m, protocol.LocalNodeID)
sort.Sort(fileList(n))
n := fileList(needList(m, protocol.LocalNodeID))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
}
n = needList(m, remoteNode)
sort.Sort(fileList(n))
n = fileList(needList(m, remoteNode0))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
@@ -192,7 +203,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
}
f = m.Get(remoteNode, "b")
f = m.Get(remoteNode0, "b")
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
@@ -212,14 +223,14 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode0}
a := m.Availability("a")
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = m.Availability("b")
if len(a) != 1 || a[0] != remoteNode {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode)
if len(a) != 1 || a[0] != remoteNode0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode0)
}
a = m.Availability("d")
if len(a) != 1 || a[0] != protocol.LocalNodeID {
@@ -227,6 +238,128 @@ func TestGlobalSet(t *testing.T) {
}
}
func TestNeedWithInvalid(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
}
remote0Have := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
remote1Have := fileList{
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "e", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
expectedNeed := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
need := fileList(needList(s, protocol.LocalNodeID))
sort.Sort(need)
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, expectedNeed)
}
}
func TestUpdateToInvalid(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
have := fileList(haveList(s, protocol.LocalNodeID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
}
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
s.Update(protocol.LocalNodeID, localHave[1:2])
have = fileList(haveList(s, protocol.LocalNodeID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
}
}
func TestInvalidAvailability(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
remote0Have := fileList{
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
remote1Have := fileList{
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
if av := s.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av)
}
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
t.Error("Incorrect availability for 'r0only':", av)
}
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
t.Error("Incorrect availability for 'r1only':", av)
}
if av := s.Availability("none"); len(av) != 0 {
t.Error("Incorrect availability for 'none':", av)
}
}
func TestLocalDeleted(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
@@ -332,7 +465,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
@@ -363,7 +496,7 @@ func Benchmark10kUpdateSme(b *testing.B) {
b.Fatal(err)
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
@@ -390,7 +523,7 @@ func Benchmark10kNeed2k(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 8000; i++ {
@@ -423,7 +556,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -456,7 +589,7 @@ func Benchmark10kGlobal(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -507,8 +640,8 @@ func TestGlobalReset(t *testing.T) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.Replace(remoteNode, remote)
m.Replace(remoteNode, nil)
m.Replace(remoteNode0, remote)
m.Replace(remoteNode0, nil)
g = globalList(m)
sort.Sort(fileList(g))
@@ -547,7 +680,7 @@ func TestNeed(t *testing.T) {
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
need := needList(m, protocol.LocalNodeID)
@@ -618,7 +751,7 @@ func TestListDropRepo(t *testing.T) {
protocol.FileInfo{Name: "e", Version: 1002},
protocol.FileInfo{Name: "f", Version: 1002},
}
s1.Replace(remoteNode, local2)
s1.Replace(remoteNode0, local2)
// Check that we have both repos and their data is in the global list
@@ -704,7 +837,7 @@ func TestStressGlobalVersion(t *testing.T) {
m := files.NewSet("test", db)
done := make(chan struct{})
go stressWriter(m, remoteNode, set1, nil, done)
go stressWriter(m, remoteNode0, set1, nil, done)
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
t0 := time.Now()

View File

View File

@@ -15,7 +15,7 @@ syncthing.config(function ($httpProvider, $translateProvider) {
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token';
$translateProvider.useStaticFilesLoader({
prefix: 'lang-',
prefix: 'lang/lang-',
suffix: '.json'
});
});
@@ -906,10 +906,10 @@ function nodeCompare(a, b) {
}
function repoCompare(a, b) {
if (a.Directory < b.Directory) {
if (a.ID < b.ID) {
return -1;
}
return a.Directory > b.Directory;
return a.ID > b.ID;
}
function repoMap(l) {
@@ -971,9 +971,9 @@ function debounce(func, wait) {
} else {
timeout = null;
if (again) {
again = false;
result = func.apply(context, args);
context = args = null;
again = false;
}
}
};
@@ -1043,12 +1043,6 @@ syncthing.filter('metric', function () {
};
});
syncthing.filter('short', function () {
return function (input) {
return input.substr(0, 6);
};
});
syncthing.filter('alwaysNumber', function () {
return function (input) {
if (input === undefined) {
@@ -1058,18 +1052,6 @@ syncthing.filter('alwaysNumber', function () {
};
});
syncthing.filter('shortPath', function () {
return function (input) {
if (input === undefined)
return "";
var parts = input.split(/[\/\\]/);
if (!parts || parts.length <= 3) {
return input;
}
return ".../" + parts.slice(parts.length-2).join("/");
};
});
syncthing.filter('basename', function () {
return function (input) {
if (input === undefined)
@@ -1082,24 +1064,6 @@ syncthing.filter('basename', function () {
};
});
syncthing.filter('clean', function () {
return function (input) {
return encodeURIComponent(input).replace(/%/g, '');
};
});
syncthing.directive('optionEditor', function () {
return {
restrict: 'C',
replace: true,
transclude: true,
scope: {
setting: '=setting',
},
template: '<input type="text" ng-model="config.Options[setting.id]"></input>',
};
});
syncthing.directive('uniqueRepo', function() {
return {
require: 'ngModel',

BIN
gui/font/raleway-500.woff Normal file
View File

Binary file not shown.

View File

@@ -2,5 +2,5 @@
font-family: 'Raleway';
font-style: normal;
font-weight: 500;
src: local('Raleway'), url(raleway-500.ttf) format('truetype');
src: local('Raleway'), url(raleway-500.woff) format('woff');
}

View File

Before

Width:  |  Height:  |  Size: 6.4 KiB

After

Width:  |  Height:  |  Size: 6.4 KiB

View File

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 47 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -11,93 +11,12 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="">
<link rel="shortcut icon" href="favicon.png">
<link rel="shortcut icon" href="img/favicon.png">
<title>Syncthing | {{thisNodeName()}}</title>
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
<link href="raleway.css" rel="stylesheet">
<style type="text/css">
body {
padding-bottom: 70px;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
h1, h2, h3, h4, h5 {
font-family: "Raleway", "Helvetica Neue", Helvetica, Arial, sans-serif;
}
ul+h5 {
margin-top: 1.5em;
}
.text-monospace {
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
}
.table-condensed>thead>tr>th, .table-condensed>tbody>tr>th, .table-condensed>tfoot>tr>th, .table-condensed>thead>tr>td, .table-condensed>tbody>tr>td, .table-condensed>tfoot>tr>td {
border-top: none;
}
.logo {
margin: 0;
padding: 0;
top: -5px;
position: relative;
}
.list-no-bullet {
list-style-type: none
}
.li-column {
display: inline-block;
min-width: 7em;
margin-right: 1em;
background-color: rgb(236, 240, 241);
border-radius: 3px;
padding: 1px 4px;
margin: 2px 2px;
}
.li-column span.data {
margin-left: 0.5em;
min-width: 10em;
text-align: right;
display: inline-block;
}
.ng-cloak {
display: none !important;
}
.table th {
white-space: nowrap;
font-weight: 400;
}
.table td {
padding-left: 20px !important;
}
.table td.small-data {
white-space: nowrap;
}
table.table-condensed {
table-layout: fixed;
}
table.table-condensed td {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
@media (max-width:767px) {
table.table-condensed td {
/* for mobile phones to allow linebreaks in long repro folder/shared with
* columns. */
white-space: normal;
}
}
</style>
<link href="font/raleway.css" rel="stylesheet">
<link href="overrides.css" rel="stylesheet">
</head>
<body>
@@ -107,7 +26,7 @@
<nav class="navbar navbar-top navbar-default" role="navigation">
<div class="container">
<span class="navbar-brand"><img class="logo" src="logo-text-64.png" height="32" width="117"/></span>
<span class="navbar-brand"><img class="logo" src="img/logo-text-64.png" height="32" width="117"/></span>
<p class="navbar-text hidden-xs">{{thisNodeName()}}</p>
<ul class="nav navbar-nav navbar-right">
<li ng-if="upgradeInfo.newer">
@@ -757,7 +676,7 @@
<!-- About modal -->
<modal id="about" large="yes" close="yes" status="info" title="About">
<h1 class="text-center"><img alt="Syncthing" title="Syncthing" src="logo-text-256.png" style="vertical-align: -16px" height="100" width="366"/><br/><small>{{version}}</small></h1>
<h1 class="text-center"><img alt="Syncthing" title="Syncthing" src="img/logo-text-256.png" style="vertical-align: -16px" height="100" width="366"/><br/><small>{{version}}</small></h1>
<hr/>
<p translate>Copyright &copy; 2014 Jakob Borg and the following Contributors:</p>
@@ -779,6 +698,7 @@
<li>James Patterson</li>
<li>Jens Diemer</li>
<li>Marcin Dziadus</li>
<li>Michael Tilli</li>
<li>Philippe Schommers</li>
<li>Ryan Sullivan</li>
<li>Tully Robinson</li>
@@ -803,12 +723,12 @@
</modal>
<script src="angular.min.js"></script>
<script src="angular-translate.min.js"></script>
<script src="angular-translate-loader.min.js"></script>
<script src="jquery-2.0.3.min.js"></script>
<script src="angular/angular.min.js"></script>
<script src="angular/angular-translate.min.js"></script>
<script src="angular/angular-translate-loader.min.js"></script>
<script src="jquery/jquery-2.0.3.min.js"></script>
<script src="bootstrap/js/bootstrap.min.js"></script>
<script src="valid-langs.js"></script>
<script src="lang/valid-langs.js"></script>
<script src="app.js"></script>
</body>
</html>

View File

@@ -0,0 +1,7 @@
All files in this directory are auto generated. Do not change any of
them. To contribute translations, please head over to
https://www.transifex.com/projects/p/syncthing/
Any updates made on Transifex will be automatically pulled into these
files.

View File

137
gui/lang/lang-ca.json Normal file
View File

@@ -0,0 +1,137 @@
{
"API Key": "Clau API",
"About": "Sobre",
"Add Node": "Afegir Node",
"Add Repository": "Afegir Repositori",
"Address": "Adreça",
"Addresses": "Adreces",
"Allow Anonymous Usage Reporting?": "Permetre l'enviament anònim d'informes d'ús?",
"Announce Server": "Servidor d'anunciament",
"Anonymous Usage Reporting": "Informe anònim d'ús",
"Bugs": "Bugs",
"CPU Utilization": "Utilització del CPU",
"Close": "Tancar",
"Connection Error": "Error de connexió",
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg i els següents contribuïdors",
"Delete": "Esborrar",
"Disconnected": "Desconnectat",
"Documentation": "Documentació",
"Download Rate": "Tasa de descarrega",
"Edit": "Editar",
"Edit Node": "Editar Node",
"Edit Repository": "Editar Repositori",
"Enable UPnP": "Habilitat UPnP",
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Introduir, separat per comes, adreces \"ip:port\" o \"dynamic\" per descobrir automàticament les adreces.",
"Error": "Error",
"File Versioning": "Versionat de Fitxers",
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Els bits de permisos dels fitxers son ignorats quan es cerquen canvis. Utilitzar en sistemes de fitxers FAT.",
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by syncthing.": "Els fitxers es mouen amb l'estampat de la data a la carpeta .stversions quan son substituïts o esborrats per syncthing.",
"Files are protected from changes made on other nodes, but changes made on this node will be sent to the rest of the cluster.": "Els fitxers estan protegits de canvis fets per altres nodes, però els canvis fets en aquest node seran enviats a la resta del cluster.",
"Folder": "Carpeta",
"GUI Authentication Password": "Contrasenya d'autenticació GUI",
"GUI Authentication User": "Usuari d'autenticació GUI",
"GUI Listen Addresses": "Adreça d'escolta del GUI",
"Generate": "Generar",
"Global Discovery": "Descobriment Global",
"Global Discovery Server": "Servidor de Descobriment Global",
"Global Repository": "Repositori Global",
"Idle": "Inactiu",
"Ignore Permissions": "Ignora Permisos",
"Keep Versions": "Mantenir Versions",
"Last seen": "Vist per última vegada",
"Latest Release": "Última publicació",
"Local Discovery": "Descobriment Local",
"Local Discovery Port": "Port de Descobriment Local",
"Local Repository": "Repositori Local",
"Master Repo": "Rep Master",
"Max File Change Rate (KiB/s)": "Tasa Màxima d'intercanvi de fitxer (KiB/s)",
"Max Outstanding Requests": "Màxim de Peticions Pendents",
"Maximum Age": "Antiguitat Màxima",
"Never": "Mai",
"No": "No",
"No File Versioning": "Sense Versionat de Fitxer",
"Node ID": "ID del Node",
"Node Identification": "Identificació del Node",
"Node Name": "Nom Del Node",
"Notice": "Avís",
"OK": "OK",
"Offline": "Desconnectat",
"Online": "Connectat",
"Out Of Sync": "Fora de la Sincronització",
"Outgoing Rate Limit (KiB/s)": "Tasa Límit de Sortida (KiB/s)",
"Override Changes": "Sobreescriure Canvis",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Ruta del repositori a l'equip local. Si no existeix serà creada. El caràcter (~) es pot fer servir com a drecera de",
"Path where versions should be stored (leave empty for the default .stversions folder in the repository).": "Ruta on les versions s'haurien de guardar (deixa-ho buit per fer servir el directori .stversions per defecte al repositori)",
"Please wait": "Si-us-plau espera",
"Preview Usage Report": "Vista Prèvia de l'Informe d'Ús",
"RAM Utilization": "Utilització de la RAM",
"Reconnect Interval (s)": "Interval de Reconnexió (s)",
"Repository ID": "ID del Repositori",
"Repository Master": "Repositori Mestre",
"Repository Path": "Ruta del Repositori",
"Rescan": "Re-escanejar",
"Rescan Interval": "Interval de re-escaneig",
"Rescan Interval (s)": "Interval de re-escaneig (s)",
"Restart": "Reiniciar",
"Restart Needed": "És Necessari Reiniciar",
"Restarting": "Reiniciant",
"Save": "Guardar",
"Scanning": "Escanejant",
"Select the nodes to share this repository with.": "Seleccionar els nodes amb els que es comparteix el repositori.",
"Settings": "Preferències",
"Share With Nodes": "Compartir Amb Els Nodes",
"Shared With": "Compartir Amb",
"Short identifier for the repository. Must be the same on all cluster nodes.": "Identificador curt pel repositori. Ha de ser el mateix per tots els nodes del cluster.",
"Show ID": "Mostrar ID",
"Shown instead of Node ID in the cluster status.": "Mostrat en comptes del ID del Node en l'estat del cluster.",
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Mostrat en comptes del ID del Node en l'estat del cluster. Serà advertit als altres nodes com un nom opcional per defecte.",
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Mostrat en comptes del ID del Node en l'estat del cluster. S'actualitzara al nom del node si es deixa buit.",
"Shutdown": "Apagar",
"Simple File Versioning": "Versionat de Fitxers Senzill",
"Source Code": "Codi Font",
"Staggered File Versioning": "Versionat de Fitxers Esglaonat",
"Start Browser": "Arrancar Navegador",
"Stopped": "Aturat",
"Support / Forum": "Suport / Fòrum",
"Sync Protocol Listen Addresses": "Adreça d'escolta del Protocol Sync",
"Synchronization": "Sincronització",
"Syncing": "Synthing",
"Syncthing has been shut down.": "S'ha aturat el synthing.",
"Syncthing includes the following software or portions thereof:": "Syncthing inclou el següent programari o parts dels mateixos:",
"Syncthing is restarting.": "Reiniciant syncthing.",
"Syncthing is upgrading.": "Actualitzant syncthing.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Synthing sembla parat, o hi ha algun problema amb la connexió a Internet. Reintentant...",
"The aggregated statistics are publicly available at {%url%}.": "Les estadístiques agregades estan públicament disponibles a {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuració s'ha guardar però no s'ha activat. S'ha de reiniciar el synthing per activar la nova configuració.",
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "L'informe d'ús encriptat s'envia diàriament. Es fa servir per rastrejar plataformes habituals, mides de repositoris i versions de l'aplicació. Si es canvia el conjunt de dades reportades es demanarà amb aquest diàleg de nou.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "El ID del Node introduït no sembla vàlid. Hauria de tenir 52 caràcters amb lletres i números, els espais i les barres son opcionals.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "El ID del Node introduït no sembla vàlid. Hauria de tenir 52 o 56 caràcters amb lletres i números, els espais i les barres son opcionals.",
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Es fan servir els següents intervals: per la primera hora es manté una versió cada 30 segons, pel primer dia es manté una versió cada hora, pel primer cada 30 dies es manté una versió cada dia, fins el màxim d'antiguitat es manté una versió cada setmana.",
"The maximum age must be a number and cannot be blank.": "La màxima antiguitat ha de ser un número i no pot estar en blanc.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Temps màxim en mantenir una versió (en dies, si es deixa en 0 es mantenen les versions per sempre).",
"The node ID cannot be blank.": "El ID del node no pot estar en blanc.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "El ID del node per introduir aquí es pot trobar al diàleg \"Editar > Mostrar ID\" en l'altre node. Els espais i les barres son opcionals (s'ignoren).",
"The number of old versions to keep, per file.": "El nombre de versions antigues que es mantenen per fitxer.",
"The number of versions must be a number and cannot be blank.": "El nombre de versions ha de ser un número i no es pot deixar en blanc.",
"The repository ID cannot be blank.": "El ID del repositori no pot estar en blanc.",
"The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the dot (.), dash (-) and underscode (_) characters only.": "El ID del repositori ha de ser un identificador curt (64 caràcters o menys) format només per lletres, nombres i el punt (.), barra (-) i barra baixa (_).",
"The repository ID must be unique.": "El ID del repositori ha de ser únic",
"The repository path cannot be blank.": "La carpeta del repositori no pot estar en blanc.",
"The rescan interval must be at least 5 seconds.": "El interval de re-escaneig ha de ser com a mínim de 5 segons.",
"Unknown": "Desconegut",
"Up to Date": "Actualitzat",
"Upgrade To {%version%}": "Actualitzar a {{version}}",
"Upgrading": "Actualitzant",
"Upload Rate": "Tasa de Pujada",
"Usage": "Ús",
"Use Compression": "Utilitza compressió",
"Use HTTPS for GUI": "Utilitzar HTTPS pel GUI",
"Version": "Versió",
"Versions Path": "Carpeta de les Versions",
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions son automàticament eliminades si son més antigues que el màxim d'antiguitat o si excedeixen del nombre de fitxers permesos en un interval.",
"When adding a new node, keep in mind that this node must be added on the other side too.": "Quan s'afegeix un nou node recorda que aquest node s'ha d'afegir tambe a l'altre banda.",
"When adding a new repository, keep in mind that the Repository ID is used to tie repositories together between nodes. They are case sensitive and must match exactly between all nodes.": "Quan s'afegeix un nou repositori recorda que el ID del repositori s'utilitza per lligar repositoris entre nodes. Es distingeix entre majúscules i minúscules i ha de ser exactament iguals entre tots els nodes.",
"Yes": "Si",
"You must keep at least one version.": "Has de mantenir com a mínim una versió.",
"items": "Elements"
}

View File

View File

View File

View File

View File

View File

@@ -38,7 +38,7 @@
"Idle": "Au repos",
"Ignore Permissions": "Ignorer les permissions",
"Keep Versions": "Conserver les versions",
"Last seen": "Dernière appartition",
"Last seen": "Dernière apparition",
"Latest Release": "Dernière version",
"Local Discovery": "Recherche locale",
"Local Discovery Port": "Port de recherche locale",
@@ -61,7 +61,7 @@
"Outgoing Rate Limit (KiB/s)": "Limite du débit sortant (KiB/s)",
"Override Changes": "Écraser les changements",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin du répertoire sur l'ordinateur local. Il sera créé si il n'existe pas. Le caractère tilde (~) peut être utilisé comme raccourci vers",
"Path where versions should be stored (leave empty for the default .stversions folder in the repository).": "Path where versions should be stored (leave empty for the default .stversions folder in the repository).",
"Path where versions should be stored (leave empty for the default .stversions folder in the repository).": "Chemin où les versions doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire)",
"Please wait": "Merci de patienter",
"Preview Usage Report": "Aperçu du rapport de statistiques d'utilisation",
"RAM Utilization": "Utilisation de la RAM",
@@ -84,8 +84,8 @@
"Short identifier for the repository. Must be the same on all cluster nodes.": "Identifiant court pour le répertoire. Il doit être le même sur l'ensemble des nœuds du cluster.",
"Show ID": "Montrer l'ID",
"Shown instead of Node ID in the cluster status.": "Affiché à la place de l'ID du nœud au sein du cluster.",
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.",
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.",
"Shown instead of Node ID in the cluster status. Will be advertised to other nodes as an optional default name.": "Affiché à la place de l'ID du nœud dans le statut du cluster. Sera annoncé aux autres nœuds comme un nom par défaut optionnel.",
"Shown instead of Node ID in the cluster status. Will be updated to the name the node advertises if left empty.": "Affiché à la place de l'ID du nœud dans le statut du cluster. Sera mis à jour par le nom que le nœud annonce si laissé vide.",
"Shutdown": "Éteindre",
"Simple File Versioning": "Versions simples de fichier",
"Source Code": "Code source",
@@ -97,7 +97,7 @@
"Synchronization": "Synchronisation",
"Syncing": "En cours de synchronisation",
"Syncthing has been shut down.": "Syncthing a été éteint.",
"Syncthing includes the following software or portions thereof:": "Syncthing inclut les logiciels, ou portion de ceux-ci, suivants:",
"Syncthing includes the following software or portions thereof:": "Syncthing intègre les logiciels suivants (ou des éléments provenant de ces logiciels) :",
"Syncthing is restarting.": "Syncthing est cours de redémarrage.",
"Syncthing is upgrading.": "Syncthing est cours de mise à jour.",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing semble être éteint, ou il y a un problème avec votre connexion Internet. Nouvelle tentative ...",
@@ -106,9 +106,9 @@
"The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Le rapport d'utilisation chiffré est envoyé quotidiennement. Il sert à répertorier les plateformes utilisées, la taille des répertoires et les versions de l'application. Si le jeu de données rapportées devait être changé, il vous sera demandé de le valider de nouveau via ce dialogue.",
"The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID du nœud ne semble pas être valide. Il devrait ressembler à une chaine de 52 caractères comprenant lettres et chiffres, avec des espaces et des traits d'union optionnels.",
"The entered node ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID du nœud inséré ne semble pas être valide. Il devrait ressembler à une chaîne de 52 ou 56 comprenant lettres et chiffres, avec des espaces et des traits d'union optionnels.",
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.",
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Les intervalles suivant sont utilisés: la première heure une version est conservée chaque 30 secondes, le premier jour une version est conservée chaque heure, les premiers 30 jours une version est conservée chaque jour, jusqu'à la limite d'âge maximum une version est conservée chaque semaine.",
"The maximum age must be a number and cannot be blank.": "L'ancienneté maximum doit être un nombre et ne peut être vide.",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "The maximum time to keep a version (in days, set to 0 to keep versions forever).",
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Le temps maximum de conservation d'une version (en jours, mettre à 0 pour conserver les versions pour toujours)",
"The node ID cannot be blank.": "L'ID du nœud ne peut être vide.",
"The node ID to enter here can be found in the \"Edit > Show ID\" dialog on the other node. Spaces and dashes are optional (ignored).": "L'ID du nœud à insérer peut être trouvé à travers le menu \"Éditer > Montrer l'ID\" des autres nœuds. Les espaces et les traits d'union sont optionnels (ils seront ignorés).",
"The number of old versions to keep, per file.": "Le nombre d'anciennes versions à garder, par fichier.",
@@ -120,7 +120,7 @@
"The rescan interval must be at least 5 seconds.": "L'intervalle de scan doit être d'au minimum 5 secondes.",
"Unknown": "Inconnu",
"Up to Date": "Synchronisation à jour",
"Upgrade To {%version%}": "Upgrader vers {{version}}",
"Upgrade To {%version%}": "Mettre à jour vers {{version}}",
"Upgrading": "Mise à jour de Syncthing",
"Upload Rate": "Débit d'envoi",
"Usage": "Utilisation",

View File

View File

@@ -61,7 +61,7 @@
"Outgoing Rate Limit (KiB/s)": "Limite di Velocità in Uscita (KiB/s)",
"Override Changes": "Ignora Modifiche",
"Path to the repository on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Percorso del deposito nel computer locale. Verrà creato se non esiste già. Il carattere tilde (~) può essere utilizzato come scorciatoia per",
"Path where versions should be stored (leave empty for the default .stversions folder in the repository).": "Path where versions should be stored (leave empty for the default .stversions folder in the repository).",
"Path where versions should be stored (leave empty for the default .stversions folder in the repository).": "Percorso di salvataggio delle versioni (lasciare vuoto per utilizzare la cartella predefinita .stversions nel deposito).",
"Please wait": "Attendere prego",
"Preview Usage Report": "Anteprima Statistiche di Utilizzo",
"RAM Utilization": "Utilizzo RAM",

View File

View File

View File

View File

View File

View File

1
gui/lang/valid-langs.js Normal file
View File

@@ -0,0 +1 @@
var validLangs = ["bg","ca","da","de","el","en","es","fr","hu","it","lt","nl","pt-PT","ru","sv","tr","uk","zh-CN","zh-TW"]

80
gui/overrides.css Normal file
View File

@@ -0,0 +1,80 @@
body {
padding-bottom: 70px;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
h1, h2, h3, h4, h5 {
font-family: "Raleway", "Helvetica Neue", Helvetica, Arial, sans-serif;
}
ul+h5 {
margin-top: 1.5em;
}
.text-monospace {
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
}
.table-condensed>thead>tr>th, .table-condensed>tbody>tr>th, .table-condensed>tfoot>tr>th, .table-condensed>thead>tr>td, .table-condensed>tbody>tr>td, .table-condensed>tfoot>tr>td {
border-top: none;
}
.logo {
margin: 0;
padding: 0;
top: -5px;
position: relative;
}
.list-no-bullet {
list-style-type: none
}
.li-column {
display: inline-block;
min-width: 7em;
margin-right: 1em;
background-color: rgb(236, 240, 241);
border-radius: 3px;
padding: 1px 4px;
margin: 2px 2px;
}
.li-column span.data {
margin-left: 0.5em;
min-width: 10em;
text-align: right;
display: inline-block;
}
.ng-cloak {
display: none !important;
}
.table th {
white-space: nowrap;
font-weight: 400;
}
.table td {
padding-left: 20px !important;
}
.table td.small-data {
white-space: nowrap;
}
table.table-condensed {
table-layout: fixed;
}
table.table-condensed td {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
@media (max-width:767px) {
table.table-condensed td {
/* for mobile phones to allow linebreaks in long repro folder/shared with
* columns. */
white-space: normal;
}
}

View File

Binary file not shown.

View File

@@ -1 +0,0 @@
var validLangs = ["bg","da","de","el","en","es","fr","hu","it","lt","nl","pt-PT","ru","sv","tr","uk","zh-CN","zh-TW"]

146
ignore/ignore.go Normal file
View File

@@ -0,0 +1,146 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package ignore
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/syncthing/syncthing/fnmatch"
)
type Pattern struct {
match *regexp.Regexp
include bool
}
type Patterns []Pattern
func Load(file string) (Patterns, error) {
seen := make(map[string]bool)
return loadIgnoreFile(file, seen)
}
func Parse(r io.Reader, file string) (Patterns, error) {
seen := map[string]bool{
file: true,
}
return parseIgnoreFile(r, file, seen)
}
func (l Patterns) Match(file string) bool {
for _, pattern := range l {
if pattern.match.MatchString(file) {
return pattern.include
}
}
return false
}
func loadIgnoreFile(file string, seen map[string]bool) (Patterns, error) {
if seen[file] {
return nil, fmt.Errorf("Multiple include of ignore file %q", file)
}
seen[file] = true
fd, err := os.Open(file)
if err != nil {
return nil, err
}
defer fd.Close()
return parseIgnoreFile(fd, file, seen)
}
func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (Patterns, error) {
var exps Patterns
addPattern := func(line string) error {
include := true
if strings.HasPrefix(line, "!") {
line = line[1:]
include = false
}
if strings.HasPrefix(line, "/") {
// Pattern is rooted in the current dir only
exp, err := fnmatch.Convert(line[1:], fnmatch.FNM_PATHNAME)
if err != nil {
return fmt.Errorf("Invalid pattern %q in ignore file", line)
}
exps = append(exps, Pattern{exp, include})
} else if strings.HasPrefix(line, "**/") {
// Add the pattern as is, and without **/ so it matches in current dir
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
if err != nil {
return fmt.Errorf("Invalid pattern %q in ignore file", line)
}
exps = append(exps, Pattern{exp, include})
exp, err = fnmatch.Convert(line[3:], fnmatch.FNM_PATHNAME)
if err != nil {
return fmt.Errorf("Invalid pattern %q in ignore file", line)
}
exps = append(exps, Pattern{exp, include})
} else if strings.HasPrefix(line, "#include ") {
includeFile := filepath.Join(filepath.Dir(currentFile), line[len("#include "):])
includes, err := loadIgnoreFile(includeFile, seen)
if err != nil {
return err
} else {
exps = append(exps, includes...)
}
} else {
// Path name or pattern, add it so it matches files both in
// current directory and subdirs.
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
if err != nil {
return fmt.Errorf("Invalid pattern %q in ignore file", line)
}
exps = append(exps, Pattern{exp, include})
exp, err = fnmatch.Convert("**/"+line, fnmatch.FNM_PATHNAME)
if err != nil {
return fmt.Errorf("Invalid pattern %q in ignore file", line)
}
exps = append(exps, Pattern{exp, include})
}
return nil
}
scanner := bufio.NewScanner(fd)
var err error
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
switch {
case line == "":
continue
case strings.HasPrefix(line, "#"):
err = addPattern(line)
case strings.HasSuffix(line, "/**"):
err = addPattern(line)
case strings.HasSuffix(line, "/"):
err = addPattern(line)
if err == nil {
err = addPattern(line + "**")
}
default:
err = addPattern(line)
if err == nil {
err = addPattern(line + "/**")
}
}
if err != nil {
return nil, err
}
}
return exps, nil
}

108
ignore/ignore_test.go Normal file
View File

@@ -0,0 +1,108 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package ignore_test
import (
"bytes"
"path/filepath"
"testing"
"github.com/syncthing/syncthing/ignore"
)
func TestIgnore(t *testing.T) {
pats, err := ignore.Load("testdata/.stignore")
if err != nil {
t.Fatal(err)
}
var tests = []struct {
f string
r bool
}{
{"afile", false},
{"bfile", true},
{"cfile", false},
{"dfile", false},
{"efile", true},
{"ffile", true},
{"dir1", false},
{filepath.Join("dir1", "cfile"), true},
{filepath.Join("dir1", "dfile"), false},
{filepath.Join("dir1", "efile"), true},
{filepath.Join("dir1", "ffile"), false},
{"dir2", false},
{filepath.Join("dir2", "cfile"), false},
{filepath.Join("dir2", "dfile"), true},
{filepath.Join("dir2", "efile"), true},
{filepath.Join("dir2", "ffile"), false},
{filepath.Join("dir3"), true},
{filepath.Join("dir3", "afile"), true},
}
for i, tc := range tests {
if r := pats.Match(tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d (%s); E: %v, A: %v", i, tc.f, tc.r, r)
}
}
}
func TestExcludes(t *testing.T) {
stignore := `
!iex2
!ign1/ex
ign1
i*2
!ign2
`
pats, err := ignore.Parse(bytes.NewBufferString(stignore), ".stignore")
if err != nil {
t.Fatal(err)
}
var tests = []struct {
f string
r bool
}{
{"ign1", true},
{"ign2", true},
{"ibla2", true},
{"iex2", false},
{"ign1/ign", true},
{"ign1/ex", false},
{"ign1/iex2", false},
{"iex2/ign", false},
{"foo/bar/ign1", true},
{"foo/bar/ign2", true},
{"foo/bar/iex2", false},
}
for _, tc := range tests {
if r := pats.Match(tc.f); r != tc.r {
t.Errorf("Incorrect match for %s: %v != %v", tc.f, r, tc.r)
}
}
}
func TestBadPatterns(t *testing.T) {
var badPatterns = []string{
"[",
"/[",
"**/[",
"#include nonexistent",
"#include .stignore",
"!#include makesnosense",
}
for _, pat := range badPatterns {
parsed, err := ignore.Parse(bytes.NewBufferString(pat), ".stignore")
if err == nil {
t.Errorf("No error for pattern %q: %v", pat, parsed)
}
}
}

6
ignore/testdata/.stignore vendored Normal file
View File

@@ -0,0 +1,6 @@
#include excludes
bfile
dir1/cfile
**/efile
/ffile

1
ignore/testdata/dir3/cfile vendored Normal file
View File

@@ -0,0 +1 @@
baz

1
ignore/testdata/dir3/dfile vendored Normal file
View File

@@ -0,0 +1 @@
quux

2
ignore/testdata/excludes vendored Normal file
View File

@@ -0,0 +1,2 @@
dir2/dfile
#include further-excludes

1
ignore/testdata/further-excludes vendored Normal file
View File

@@ -0,0 +1 @@
dir3

View File

@@ -2,7 +2,7 @@
set -euo pipefail
IFS=$'\n\t'
go test -tags integration -v
#go test -tags integration -v
./test-http.sh
./test-merge.sh
./test-delupd.sh

View File

@@ -59,7 +59,7 @@ func (p *syncthingProcess) stop() {
}
func (p *syncthingProcess) peerCompletion() (map[string]int, error) {
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/rest/debug/peerCompletion", p.port))
resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/rest/debug/peerCompletion", p.port))
if err != nil {
return nil, err
}

View File

@@ -23,7 +23,7 @@ start() {
stop() {
for i in 1 2 3 ; do
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
curl -HX-API-Key:abc123 -X POST "http://127.0.0.1:808$i/rest/shutdown"
done
exit $1
}
@@ -31,9 +31,9 @@ stop() {
testConvergence() {
while true ; do
sleep 5
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8083/rest/debug/peerCompletion" | ./json "$id2")
s3comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/debug/peerCompletion" | ./json "$id3")
s1comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8083/rest/debug/peerCompletion" | ./json "$id2")
s3comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8081/rest/debug/peerCompletion" | ./json "$id3")
s1comp=${s1comp:-0}
s2comp=${s2comp:-0}
s3comp=${s3comp:-0}
@@ -119,7 +119,7 @@ alterFiles() {
pkill -CONT syncthing
echo "Restarting instance 2"
curl -HX-API-Key:abc123 -X POST "http://localhost:8082/rest/restart"
curl -HX-API-Key:abc123 -X POST "http://127.0.0.1:8082/rest/restart"
}
rm -rf h?/*.idx.gz h?/index

View File

@@ -21,7 +21,7 @@ start() {
stop() {
echo "Stopping..."
for i in 1 2 ; do
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
curl -HX-API-Key:abc123 -X POST "http://127.0.0.1:808$i/rest/shutdown"
done
}
@@ -46,8 +46,8 @@ setup() {
testConvergence() {
while true ; do
sleep 5
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/debug/peerCompletion" | ./json "$id2")
s1comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8081/rest/debug/peerCompletion" | ./json "$id2")
s1comp=${s1comp:-0}
s2comp=${s2comp:-0}
tot=$(($s1comp + $s2comp))

View File

@@ -10,8 +10,8 @@ id3=373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU
stop() {
echo Stopping
curl -s -o/dev/null -HX-API-Key:abc123 -X POST http://localhost:8081/rest/shutdown
curl -s -o/dev/null -HX-API-Key:abc123 -X POST http://localhost:8082/rest/shutdown
curl -s -o/dev/null -HX-API-Key:abc123 -X POST http://127.0.0.1:8081/rest/shutdown
curl -s -o/dev/null -HX-API-Key:abc123 -X POST http://127.0.0.1:8082/rest/shutdown
exit $1
}
@@ -26,14 +26,14 @@ syncthing -home h2 > 2.out 2>&1 &
sleep 1
echo Fetching CSRF tokens
curl -s -o /dev/null http://testuser:testpass@localhost:8081/index.html
curl -s -o /dev/null http://localhost:8082/index.html
curl -s -o /dev/null http://testuser:testpass@127.0.0.1:8081/index.html
curl -s -o /dev/null http://127.0.0.1:8082/index.html
sleep 1
echo Testing
./http -target localhost:8081 -user testuser -pass testpass -csrf h1/csrftokens.txt || stop 1
./http -target localhost:8081 -api abc123 || stop 1
./http -target localhost:8082 -csrf h2/csrftokens.txt || stop 1
./http -target localhost:8082 -api abc123 || stop 1
./http -target 127.0.0.1:8081 -user testuser -pass testpass -csrf h1/csrftokens.txt || stop 1
./http -target 127.0.0.1:8081 -api abc123 || stop 1
./http -target 127.0.0.1:8082 -csrf h2/csrftokens.txt || stop 1
./http -target 127.0.0.1:8082 -api abc123 || stop 1
stop 0

View File

@@ -23,7 +23,7 @@ start() {
stop() {
for i in 1 2 3 4 ; do
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
curl -HX-API-Key:abc123 -X POST "http://127.0.0.1:808$i/rest/shutdown"
done
exit $1
}
@@ -40,9 +40,9 @@ clean() {
testConvergence() {
while true ; do
sleep 5
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8083/rest/debug/peerCompletion" | ./json "$id2")
s3comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/debug/peerCompletion" | ./json "$id3")
s1comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8082/rest/debug/peerCompletion" | ./json "$id1")
s2comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8083/rest/debug/peerCompletion" | ./json "$id2")
s3comp=$(curl -HX-API-Key:abc123 -s "http://127.0.0.1:8081/rest/debug/peerCompletion" | ./json "$id3")
s1comp=${s1comp:-0}
s2comp=${s2comp:-0}
s3comp=${s3comp:-0}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package luhn generates and validates Luhn mod N check digits.
package luhn

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package luhn_test
import (

View File

@@ -19,6 +19,7 @@ import (
"github.com/syncthing/syncthing/config"
"github.com/syncthing/syncthing/events"
"github.com/syncthing/syncthing/files"
"github.com/syncthing/syncthing/ignore"
"github.com/syncthing/syncthing/lamport"
"github.com/syncthing/syncthing/protocol"
"github.com/syncthing/syncthing/scanner"
@@ -50,12 +51,6 @@ func (s repoState) String() string {
}
}
// Somewhat arbitrary amount of bytes that we choose to let represent the size
// of an unsynchronized directory entry or a deleted file. We need it to be
// larger than zero so that it's visible that there is some amount of bytes to
// transfer to bring the systems into synchronization.
const zeroEntrySize = 128
// How many files to send in each Index/IndexUpdate message.
const (
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
@@ -78,6 +73,7 @@ type Model struct {
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
nodeRepos map[protocol.NodeID][]string // nodeID -> repos
nodeStatRefs map[protocol.NodeID]*stats.NodeStatisticsReference // nodeID -> statsRef
repoIgnores map[string]ignore.Patterns // repo -> list of ignore patterns
rmut sync.RWMutex // protects the above
repoState map[string]repoState // repo -> state
@@ -89,9 +85,6 @@ type Model struct {
nodeVer map[protocol.NodeID]string
pmut sync.RWMutex // protects protoConn and rawConn
sentLocalVer map[protocol.NodeID]map[string]uint64
slMut sync.Mutex
addedRepo bool
started bool
}
@@ -117,12 +110,12 @@ func NewModel(indexDir string, cfg *config.Configuration, nodeName, clientName,
repoNodes: make(map[string][]protocol.NodeID),
nodeRepos: make(map[protocol.NodeID][]string),
nodeStatRefs: make(map[protocol.NodeID]*stats.NodeStatisticsReference),
repoIgnores: make(map[string]ignore.Patterns),
repoState: make(map[string]repoState),
repoStateChanged: make(map[string]time.Time),
protoConn: make(map[protocol.NodeID]protocol.Connection),
rawConn: make(map[protocol.NodeID]io.Closer),
nodeVer: make(map[protocol.NodeID]string),
sentLocalVer: make(map[protocol.NodeID]map[string]uint64),
}
for _, node := range cfg.Nodes {
@@ -299,6 +292,9 @@ func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
rf.WithHaveTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
if f.IsInvalid() {
return true
}
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
@@ -358,24 +354,32 @@ func (m *Model) Index(nodeID protocol.NodeID, repo string, fs []protocol.FileInf
return
}
for i := range fs {
lamport.Default.Tick(fs[i].Version)
}
m.rmut.RLock()
r, ok := m.repoFiles[repo]
files, ok := m.repoFiles[repo]
ignores, _ := m.repoIgnores[repo]
m.rmut.RUnlock()
if ok {
r.Replace(nodeID, fs)
} else {
if !ok {
l.Fatalf("Index for nonexistant repo %q", repo)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if ignores.Match(fs[i].Name) {
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Replace(nodeID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"node": nodeID.String(),
"repo": repo,
"items": len(fs),
"version": r.LocalVersion(nodeID),
"version": files.LocalVersion(nodeID),
})
}
@@ -391,24 +395,32 @@ func (m *Model) IndexUpdate(nodeID protocol.NodeID, repo string, fs []protocol.F
return
}
for i := range fs {
lamport.Default.Tick(fs[i].Version)
}
m.rmut.RLock()
r, ok := m.repoFiles[repo]
files, ok := m.repoFiles[repo]
ignores, _ := m.repoIgnores[repo]
m.rmut.RUnlock()
if ok {
r.Update(nodeID, fs)
} else {
if !ok {
l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
}
for i := 0; i < len(fs); {
lamport.Default.Tick(fs[i].Version)
if ignores.Match(fs[i].Name) {
fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1]
} else {
i++
}
}
files.Update(nodeID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"node": nodeID.String(),
"repo": repo,
"items": len(fs),
"version": r.LocalVersion(nodeID),
"version": files.LocalVersion(nodeID),
})
}
@@ -582,7 +594,7 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
m.rmut.RLock()
for _, repo := range m.nodeRepos[nodeID] {
fs := m.repoFiles[repo]
go sendIndexes(protoConn, repo, fs)
go sendIndexes(protoConn, repo, fs, m.repoIgnores[repo])
}
if statRef, ok := m.nodeStatRefs[nodeID]; ok {
statRef.WasSeen()
@@ -593,7 +605,7 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
m.pmut.Unlock()
}
func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
func sendIndexes(conn protocol.Connection, repo string, fs *files.Set, ignores ignore.Patterns) {
nodeID := conn.ID()
name := conn.Name()
var err error
@@ -608,7 +620,7 @@ func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
}
}()
minLocalVer, err := sendIndexTo(true, 0, conn, repo, fs)
minLocalVer, err := sendIndexTo(true, 0, conn, repo, fs, ignores)
for err == nil {
time.Sleep(5 * time.Second)
@@ -616,11 +628,11 @@ func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
continue
}
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, repo, fs)
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, repo, fs, ignores)
}
}
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, repo string, fs *files.Set) (uint64, error) {
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, repo string, fs *files.Set, ignores ignore.Patterns) (uint64, error) {
nodeID := conn.ID()
name := conn.Name()
batch := make([]protocol.FileInfo, 0, indexBatchSize)
@@ -638,6 +650,10 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
maxLocalVer = f.LocalVersion
}
if ignores.Match(f.Name) {
return true
}
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
if initial {
if err = conn.Index(repo, batch); err != nil {
@@ -791,10 +807,13 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
fs, ok := m.repoFiles[repo]
dir := m.repoCfgs[repo].Directory
ignores, _ := ignore.Load(filepath.Join(dir, ".stignore"))
m.repoIgnores[repo] = ignores
w := &scanner.Walker{
Dir: dir,
Sub: sub,
IgnoreFile: ".stignore",
Ignores: ignores,
BlockSize: scanner.StandardBlockSize,
TempNamer: defTempNamer,
CurrentFiler: cFiler{m, repo},
@@ -837,15 +856,40 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
fs.WithHaveTruncated(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
if !strings.HasPrefix(f.Name, sub) {
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
return !seenPrefix
}
seenPrefix = true
if !protocol.IsDeleted(f.Flags) {
if f.IsInvalid() {
return true
}
if len(batch) == batchSize {
fs.Update(protocol.LocalNodeID, batch)
batch = batch[:0]
}
if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
if ignores.Match(f.Name) {
// File has been ignored. Set invalid bit.
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagInvalid,
Modified: f.Modified,
Version: f.Version, // The file is still the same, so don't bump version
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"repo": repo,
"name": f.Name,
"modified": time.Unix(f.Modified, 0),
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, nf)
} else if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
// File has been deleted
nf := protocol.FileInfo{
Name: f.Name,
@@ -938,6 +982,7 @@ func (m *Model) Override(repo string) {
fs := m.repoFiles[repo]
m.rmut.RUnlock()
m.setState(repo, RepoScanning)
batch := make([]protocol.FileInfo, 0, indexBatchSize)
fs.WithNeed(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
@@ -963,6 +1008,7 @@ func (m *Model) Override(repo string) {
if len(batch) > 0 {
fs.Update(protocol.LocalNodeID, batch)
}
m.setState(repo, RepoIdle)
}
// Version returns the change version for the given repository. This is

View File

@@ -1,119 +0,0 @@
package protocol
import (
"bytes"
"encoding/binary"
"errors"
"io"
"sync"
lz4 "github.com/bkaradzic/go-lz4"
)
const lz4Magic = 0x5e63b278
type lz4Writer struct {
wr io.Writer
mut sync.Mutex
buf []byte
}
func newLZ4Writer(w io.Writer) *lz4Writer {
return &lz4Writer{wr: w}
}
func (w *lz4Writer) Write(bs []byte) (int, error) {
w.mut.Lock()
defer w.mut.Unlock()
var err error
w.buf, err = lz4.Encode(w.buf[:cap(w.buf)], bs)
if err != nil {
return 0, err
}
var hdr [8]byte
binary.BigEndian.PutUint32(hdr[0:], lz4Magic)
binary.BigEndian.PutUint32(hdr[4:], uint32(len(w.buf)))
_, err = w.wr.Write(hdr[:])
if err != nil {
return 0, err
}
_, err = w.wr.Write(w.buf)
if err != nil {
return 0, err
}
if debug {
l.Debugf("lz4 write; %d / %d bytes", len(bs), 8+len(w.buf))
}
return len(bs), nil
}
type lz4Reader struct {
rd io.Reader
mut sync.Mutex
buf []byte
ebuf []byte
obuf *bytes.Buffer
ibytes uint64
obytes uint64
}
func newLZ4Reader(r io.Reader) *lz4Reader {
return &lz4Reader{rd: r}
}
func (r *lz4Reader) Read(bs []byte) (int, error) {
r.mut.Lock()
defer r.mut.Unlock()
if r.obuf == nil {
r.obuf = bytes.NewBuffer(nil)
}
if r.obuf.Len() == 0 {
if err := r.moreBits(); err != nil {
return 0, err
}
}
n, err := r.obuf.Read(bs)
if debug {
l.Debugf("lz4 read; %d bytes", n)
}
return n, err
}
func (r *lz4Reader) moreBits() error {
var hdr [8]byte
_, err := io.ReadFull(r.rd, hdr[:])
if binary.BigEndian.Uint32(hdr[0:]) != lz4Magic {
return errors.New("bad magic")
}
ln := int(binary.BigEndian.Uint32(hdr[4:]))
if len(r.buf) < ln {
r.buf = make([]byte, int(ln))
} else {
r.buf = r.buf[:ln]
}
_, err = io.ReadFull(r.rd, r.buf)
if err != nil {
return err
}
r.ebuf, err = lz4.Decode(r.ebuf[:cap(r.ebuf)], r.buf)
if err != nil {
return err
}
if debug {
l.Debugf("lz4 moreBits: %d / %d bytes", ln+8, len(r.ebuf))
}
_, err = r.obuf.Write(r.ebuf)
return err
}

View File

@@ -1,60 +0,0 @@
package protocol
import (
"bytes"
"crypto/rand"
"io"
"testing"
)
var toWrite = [][]byte{
[]byte("this is a short byte string that should pass through somewhat compressed this is a short byte string that should pass through somewhat compressed this is a short byte string that should pass through somewhat compressed this is a short byte string that should pass through somewhat compressed this is a short byte string that should pass through somewhat compressed this is a short byte string that should pass through somewhat compressed"),
[]byte("this is another short byte string that should pass through uncompressed"),
[]byte{0, 1, 2, 3, 4, 5},
}
func TestLZ4Stream(t *testing.T) {
tb := make([]byte, 128*1024)
rand.Reader.Read(tb)
toWrite = append(toWrite, tb)
tb = make([]byte, 512*1024)
rand.Reader.Read(tb)
toWrite = append(toWrite, tb)
toWrite = append(toWrite, toWrite[0])
toWrite = append(toWrite, toWrite[1])
rd, wr := io.Pipe()
lz4r := newLZ4Reader(rd)
lz4w := newLZ4Writer(wr)
go func() {
for i := 0; i < 5; i++ {
for _, bs := range toWrite {
n, err := lz4w.Write(bs)
if err != nil {
t.Error(err)
}
if n != len(bs) {
t.Errorf("weird write length; %d != %d", n, len(bs))
}
}
}
}()
buf := make([]byte, 512*1024)
for i := 0; i < 5; i++ {
for _, bs := range toWrite {
n, err := lz4r.Read(buf)
if err != nil {
t.Fatal(err)
}
if n != len(bs) {
t.Errorf("Unexpected len %d != %d", n, len(bs))
}
if bytes.Compare(bs, buf[:n]) != 0 {
t.Error("Unexpected data")
}
}
}
}

View File

@@ -39,6 +39,10 @@ func (f FileInfo) IsDeleted() bool {
return IsDeleted(f.Flags)
}
func (f FileInfo) IsInvalid() bool {
return IsInvalid(f.Flags)
}
// Used for unmarshalling a FileInfo structure but skipping the actual block list
type FileInfoTruncated struct {
Name string // max:8192
@@ -65,9 +69,14 @@ func (f FileInfoTruncated) IsDeleted() bool {
return IsDeleted(f.Flags)
}
func (f FileInfoTruncated) IsInvalid() bool {
return IsInvalid(f.Flags)
}
type FileIntf interface {
Size() int64
IsDeleted() bool
IsInvalid() bool
}
type BlockInfo struct {

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package protocol
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package protocol
import "testing"

View File

@@ -5,7 +5,6 @@
package protocol
import (
"bufio"
"encoding/binary"
"encoding/hex"
"errors"
@@ -86,9 +85,7 @@ type rawConnection struct {
state int
cr *countingReader
cw *countingWriter
wb *bufio.Writer
awaiting [4096]chan asyncResult
awaitingMut sync.Mutex

View File

Some files were not shown because too many files have changed in this diff Show More