mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-04 11:59:12 -05:00
Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed747a2d3d | ||
|
|
3a8ee4ce2e | ||
|
|
5ac01a3af4 | ||
|
|
46343f2f9e | ||
|
|
56ccb5b2ab | ||
|
|
9a946eed80 | ||
|
|
9c6cb0f630 | ||
|
|
1b066d6965 | ||
|
|
54c3caad53 | ||
|
|
9b5e8aaf83 | ||
|
|
5143c09bcf | ||
|
|
2496185629 | ||
|
|
34deb82aea | ||
|
|
8f72ae9da2 | ||
|
|
b753f01ac1 | ||
|
|
fd0a147ae6 | ||
|
|
e94bd90782 | ||
|
|
ce4b897d0e | ||
|
|
a7694029e2 | ||
|
|
1e9110b763 | ||
|
|
6f3fbbbe49 | ||
|
|
d346ec7bfe | ||
|
|
26a3613397 | ||
|
|
e6318bddf3 | ||
|
|
514bb0beda | ||
|
|
41b1bd2f05 | ||
|
|
bf40dadf04 | ||
|
|
cb1678ebec | ||
|
|
0c1ac568b5 | ||
|
|
0f9550c747 | ||
|
|
b13ae17a47 | ||
|
|
f762a12d18 | ||
|
|
20d30a80be | ||
|
|
229b218203 | ||
|
|
4b668aaca8 | ||
|
|
8c7f1421c6 | ||
|
|
d90b2c1d52 | ||
|
|
22f39be197 | ||
|
|
2fa45436c2 | ||
|
|
cadbb6bbce | ||
|
|
2c89f04be7 | ||
|
|
597011e3a9 | ||
|
|
0d433b58ba | ||
|
|
cde8ef56e5 | ||
|
|
110816c7aa | ||
|
|
fbb1e168f7 | ||
|
|
23085eb5ae | ||
|
|
7344a6205f | ||
|
|
4b76ec40c0 | ||
|
|
90101d0269 | ||
|
|
7ac84c0660 | ||
|
|
2090530bbb | ||
|
|
b6cb7ddbaf | ||
|
|
3422d9335c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -13,3 +13,5 @@ perfstats*.csv
|
||||
coverage.xml
|
||||
!gui/scripts/syncthing
|
||||
.DS_Store
|
||||
syncthing.md5
|
||||
syncthing.exe.md5
|
||||
|
||||
16
Godeps/Godeps.json
generated
16
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.4rc1",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "97e257099d2ab9578151ba85e2641e2cd14d3ca8"
|
||||
"Rev": "63c9e642efad852f49e20a6f90194cae112fd2ac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
@@ -51,23 +51,19 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Comment": "null-236",
|
||||
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
|
||||
"Rev": "731db29863ea7213d9556d0170afb38987f401d4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Comment": "null-236",
|
||||
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
|
||||
"Rev": "731db29863ea7213d9556d0170afb38987f401d4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Comment": "null-112",
|
||||
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
|
||||
"Rev": "985ee5acfaf1ff6712c7c99438752f8e09416ccb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Comment": "null-112",
|
||||
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
|
||||
"Rev": "985ee5acfaf1ff6712c7c99438752f8e09416ccb"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
753
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
753
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
@@ -8,152 +8,669 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// SetFunc is the function that will be called by Namespace.Get to create
|
||||
// a cache object, if charge is less than one than the cache object will
|
||||
// not be registered to cache tree, if value is nil then the cache object
|
||||
// will not be created.
|
||||
type SetFunc func() (charge int, value interface{})
|
||||
|
||||
// DelFin is the function that will be called as the result of a delete operation.
|
||||
// Exist == true is indication that the object is exist, and pending == true is
|
||||
// indication of deletion already happen but haven't done yet (wait for all handles
|
||||
// to be released). And exist == false means the object doesn't exist.
|
||||
type DelFin func(exist, pending bool)
|
||||
|
||||
// PurgeFin is the function that will be called as the result of a purge operation.
|
||||
type PurgeFin func(ns, key uint64)
|
||||
|
||||
// Cache is a cache tree. A cache instance must be goroutine-safe.
|
||||
type Cache interface {
|
||||
// SetCapacity sets cache tree capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Capacity returns cache tree capacity.
|
||||
// Cacher provides interface to implements a caching functionality.
|
||||
// An implementation must be goroutine-safe.
|
||||
type Cacher interface {
|
||||
// Capacity returns cache capacity.
|
||||
Capacity() int
|
||||
|
||||
// Used returns used cache tree capacity.
|
||||
Used() int
|
||||
// SetCapacity sets cache capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Size returns entire alive cache objects size.
|
||||
Size() int
|
||||
// Promote promotes the 'cache node'.
|
||||
Promote(n *Node)
|
||||
|
||||
// NumObjects returns number of alive objects.
|
||||
NumObjects() int
|
||||
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
|
||||
Ban(n *Node)
|
||||
|
||||
// GetNamespace gets cache namespace with the given id.
|
||||
// GetNamespace is never return nil.
|
||||
GetNamespace(id uint64) Namespace
|
||||
// Evict evicts the 'cache node'.
|
||||
Evict(n *Node)
|
||||
|
||||
// PurgeNamespace purges cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Purge.
|
||||
PurgeNamespace(id uint64, fin PurgeFin)
|
||||
// EvictNS evicts 'cache node' with the given namespace.
|
||||
EvictNS(ns uint64)
|
||||
|
||||
// ZapNamespace detaches cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Zap.
|
||||
ZapNamespace(id uint64)
|
||||
// EvictAll evicts all 'cache node'.
|
||||
EvictAll()
|
||||
|
||||
// Purge purges all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Purge method on all cache namespace.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Zap method on all cache namespace.
|
||||
Zap()
|
||||
// Close closes the 'cache tree'
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
|
||||
type Namespace interface {
|
||||
// Get gets cache object with the given key.
|
||||
// If cache object is not found and setf is not nil, Get will atomically creates
|
||||
// the cache object by calling setf. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned cache handle should be released after use by calling Release
|
||||
// method.
|
||||
Get(key uint64, setf SetFunc) Handle
|
||||
// Value is a 'cacheable object'. It may implements util.Releaser, if
|
||||
// so the the Release method will be called once object is released.
|
||||
type Value interface{}
|
||||
|
||||
// Delete removes cache object with the given key from cache tree.
|
||||
// A deleted cache object will be released as soon as all of its handles have
|
||||
// been released.
|
||||
// Delete only happen once, subsequent delete will consider cache object doesn't
|
||||
// exist, even if the cache object ins't released yet.
|
||||
//
|
||||
// If not nil, fin will be called if the cache object doesn't exist or when
|
||||
// finally be released.
|
||||
//
|
||||
// Delete returns true if such cache object exist and never been deleted.
|
||||
Delete(key uint64, fin DelFin) bool
|
||||
|
||||
// Purge removes all cache objects within this namespace from cache tree.
|
||||
// This is the same as doing delete on all cache objects.
|
||||
//
|
||||
// If not nil, fin will be called on all cache objects when its finally be
|
||||
// released.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches namespace from cache tree and release all its cache objects.
|
||||
// A zapped namespace can never be filled again.
|
||||
// Calling Get on zapped namespace will always return nil.
|
||||
Zap()
|
||||
type CacheGetter struct {
|
||||
Cache *Cache
|
||||
NS uint64
|
||||
}
|
||||
|
||||
// Handle is a cache handle.
|
||||
type Handle interface {
|
||||
// Release releases this cache handle. This method can be safely called mutiple
|
||||
// times.
|
||||
Release()
|
||||
|
||||
// Value returns value of this cache handle.
|
||||
// Value will returns nil after this cache handle have be released.
|
||||
Value() interface{}
|
||||
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
return g.Cache.Get(g.NS, key, setFunc)
|
||||
}
|
||||
|
||||
// The hash tables implementation is based on:
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
||||
const (
|
||||
DelNotExist = iota
|
||||
DelExist
|
||||
DelPendig
|
||||
mInitialSize = 1 << 4
|
||||
mOverflowThreshold = 1 << 5
|
||||
mOverflowGrowThreshold = 1 << 7
|
||||
)
|
||||
|
||||
// Namespace state.
|
||||
type nsState int
|
||||
|
||||
const (
|
||||
nsEffective nsState = iota
|
||||
nsZapped
|
||||
)
|
||||
|
||||
// Node state.
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeZero nodeState = iota
|
||||
nodeEffective
|
||||
nodeEvicted
|
||||
nodeDeleted
|
||||
)
|
||||
|
||||
// Fake handle.
|
||||
type fakeHandle struct {
|
||||
value interface{}
|
||||
fin func()
|
||||
once uint32
|
||||
type mBucket struct {
|
||||
mu sync.Mutex
|
||||
node []*Node
|
||||
frozen bool
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.value
|
||||
func (b *mBucket) freeze() []*Node {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.frozen {
|
||||
b.frozen = true
|
||||
}
|
||||
return b.node
|
||||
}
|
||||
|
||||
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
|
||||
b.mu.Lock()
|
||||
|
||||
if b.frozen {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Scan the node.
|
||||
for _, n := range b.node {
|
||||
if n.hash == hash && n.ns == ns && n.key == key {
|
||||
atomic.AddInt32(&n.ref, 1)
|
||||
b.mu.Unlock()
|
||||
return true, false, n
|
||||
}
|
||||
}
|
||||
|
||||
// Get only.
|
||||
if noset {
|
||||
b.mu.Unlock()
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// Create node.
|
||||
n = &Node{
|
||||
r: r,
|
||||
hash: hash,
|
||||
ns: ns,
|
||||
key: key,
|
||||
ref: 1,
|
||||
}
|
||||
// Add node to bucket.
|
||||
b.node = append(b.node, n)
|
||||
bLen := len(b.node)
|
||||
b.mu.Unlock()
|
||||
|
||||
// Update counter.
|
||||
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
|
||||
if bLen > mOverflowThreshold {
|
||||
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
|
||||
}
|
||||
|
||||
// Grow.
|
||||
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
|
||||
nhLen := len(h.buckets) << 1
|
||||
nh := &mNode{
|
||||
buckets: make([]unsafe.Pointer, nhLen),
|
||||
mask: uint32(nhLen) - 1,
|
||||
pred: unsafe.Pointer(h),
|
||||
growThreshold: int32(nhLen * mOverflowThreshold),
|
||||
shrinkThreshold: int32(nhLen >> 1),
|
||||
}
|
||||
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
|
||||
if !ok {
|
||||
panic("BUG: failed swapping head")
|
||||
}
|
||||
go nh.initBuckets()
|
||||
}
|
||||
|
||||
return true, true, n
|
||||
}
|
||||
|
||||
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
|
||||
b.mu.Lock()
|
||||
|
||||
if b.frozen {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Scan the node.
|
||||
var (
|
||||
n *Node
|
||||
bLen int
|
||||
)
|
||||
for i := range b.node {
|
||||
n = b.node[i]
|
||||
if n.ns == ns && n.key == key {
|
||||
if atomic.LoadInt32(&n.ref) == 0 {
|
||||
deleted = true
|
||||
|
||||
// Call releaser.
|
||||
if n.value != nil {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
n.value = nil
|
||||
}
|
||||
|
||||
// Remove node from bucket.
|
||||
b.node = append(b.node[:i], b.node[i+1:]...)
|
||||
bLen = len(b.node)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
if deleted {
|
||||
// Call OnDel.
|
||||
for _, f := range n.onDel {
|
||||
f()
|
||||
}
|
||||
|
||||
// Update counter.
|
||||
atomic.AddInt32(&r.size, int32(n.size)*-1)
|
||||
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
|
||||
if bLen >= mOverflowThreshold {
|
||||
atomic.AddInt32(&h.overflow, -1)
|
||||
}
|
||||
|
||||
// Shrink.
|
||||
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
|
||||
nhLen := len(h.buckets) >> 1
|
||||
nh := &mNode{
|
||||
buckets: make([]unsafe.Pointer, nhLen),
|
||||
mask: uint32(nhLen) - 1,
|
||||
pred: unsafe.Pointer(h),
|
||||
growThreshold: int32(nhLen * mOverflowThreshold),
|
||||
shrinkThreshold: int32(nhLen >> 1),
|
||||
}
|
||||
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
|
||||
if !ok {
|
||||
panic("BUG: failed swapping head")
|
||||
}
|
||||
go nh.initBuckets()
|
||||
}
|
||||
}
|
||||
|
||||
return true, deleted
|
||||
}
|
||||
|
||||
type mNode struct {
|
||||
buckets []unsafe.Pointer // []*mBucket
|
||||
mask uint32
|
||||
pred unsafe.Pointer // *mNode
|
||||
resizeInProgess int32
|
||||
|
||||
overflow int32
|
||||
growThreshold int32
|
||||
shrinkThreshold int32
|
||||
}
|
||||
|
||||
func (n *mNode) initBucket(i uint32) *mBucket {
|
||||
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
p := (*mNode)(atomic.LoadPointer(&n.pred))
|
||||
if p != nil {
|
||||
var node []*Node
|
||||
if n.mask > p.mask {
|
||||
// Grow.
|
||||
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
|
||||
if pb == nil {
|
||||
pb = p.initBucket(i & p.mask)
|
||||
}
|
||||
m := pb.freeze()
|
||||
// Split nodes.
|
||||
for _, x := range m {
|
||||
if x.hash&n.mask == i {
|
||||
node = append(node, x)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Shrink.
|
||||
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
|
||||
if pb0 == nil {
|
||||
pb0 = p.initBucket(i)
|
||||
}
|
||||
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
|
||||
if pb1 == nil {
|
||||
pb1 = p.initBucket(i + uint32(len(n.buckets)))
|
||||
}
|
||||
m0 := pb0.freeze()
|
||||
m1 := pb1.freeze()
|
||||
// Merge nodes.
|
||||
node = make([]*Node, 0, len(m0)+len(m1))
|
||||
node = append(node, m0...)
|
||||
node = append(node, m1...)
|
||||
}
|
||||
b := &mBucket{node: node}
|
||||
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
|
||||
if len(node) > mOverflowThreshold {
|
||||
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
|
||||
}
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
|
||||
}
|
||||
|
||||
func (n *mNode) initBuckets() {
|
||||
for i := range n.buckets {
|
||||
n.initBucket(uint32(i))
|
||||
}
|
||||
atomic.StorePointer(&n.pred, nil)
|
||||
}
|
||||
|
||||
// Cache is a 'cache map'.
|
||||
type Cache struct {
|
||||
mu sync.RWMutex
|
||||
mHead unsafe.Pointer // *mNode
|
||||
nodes int32
|
||||
size int32
|
||||
cacher Cacher
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCache creates a new 'cache map'. The cacher is optional and
|
||||
// may be nil.
|
||||
func NewCache(cacher Cacher) *Cache {
|
||||
h := &mNode{
|
||||
buckets: make([]unsafe.Pointer, mInitialSize),
|
||||
mask: mInitialSize - 1,
|
||||
growThreshold: int32(mInitialSize * mOverflowThreshold),
|
||||
shrinkThreshold: 0,
|
||||
}
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = unsafe.Pointer(&mBucket{})
|
||||
}
|
||||
r := &Cache{
|
||||
mHead: unsafe.Pointer(h),
|
||||
cacher: cacher,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
|
||||
h := (*mNode)(atomic.LoadPointer(&r.mHead))
|
||||
i := hash & h.mask
|
||||
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
|
||||
if b == nil {
|
||||
b = h.initBucket(i)
|
||||
}
|
||||
return h, b
|
||||
}
|
||||
|
||||
func (r *Cache) delete(n *Node) bool {
|
||||
for {
|
||||
h, b := r.getBucket(n.hash)
|
||||
done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
|
||||
if done {
|
||||
return deleted
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Nodes returns number of 'cache node' in the map.
|
||||
func (r *Cache) Nodes() int {
|
||||
return int(atomic.LoadInt32(&r.nodes))
|
||||
}
|
||||
|
||||
// Size returns sums of 'cache node' size in the map.
|
||||
func (r *Cache) Size() int {
|
||||
return int(atomic.LoadInt32(&r.size))
|
||||
}
|
||||
|
||||
// Capacity returns cache capacity.
|
||||
func (r *Cache) Capacity() int {
|
||||
if r.cacher == nil {
|
||||
return 0
|
||||
}
|
||||
return r.cacher.Capacity()
|
||||
}
|
||||
|
||||
// SetCapacity sets cache capacity.
|
||||
func (r *Cache) SetCapacity(capacity int) {
|
||||
if r.cacher != nil {
|
||||
r.cacher.SetCapacity(capacity)
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets 'cache node' with the given namespace and key.
|
||||
// If cache node is not found and setFunc is not nil, Get will atomically creates
|
||||
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned 'cache handle' should be released after use by calling Release
|
||||
// method.
|
||||
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
|
||||
if done {
|
||||
if n != nil {
|
||||
n.mu.Lock()
|
||||
if n.value == nil {
|
||||
if setFunc == nil {
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
return nil
|
||||
}
|
||||
|
||||
n.size, n.value = setFunc()
|
||||
if n.value == nil {
|
||||
n.size = 0
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
return nil
|
||||
}
|
||||
atomic.AddInt32(&r.size, int32(n.size))
|
||||
}
|
||||
n.mu.Unlock()
|
||||
if r.cacher != nil {
|
||||
r.cacher.Promote(n)
|
||||
}
|
||||
return &Handle{unsafe.Pointer(n)}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
// Delete removes and ban 'cache node' with the given namespace and key.
|
||||
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
|
||||
// only attributed to the particular 'cache node', so when a 'cache node'
|
||||
// is recreated it will not be banned.
|
||||
//
|
||||
// If onDel is not nil, then it will be executed if such 'cache node'
|
||||
// doesn't exist or once the 'cache node' is released.
|
||||
//
|
||||
// Delete return true is such 'cache node' exist.
|
||||
func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return false
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, true)
|
||||
if done {
|
||||
if n != nil {
|
||||
if onDel != nil {
|
||||
n.mu.Lock()
|
||||
n.onDel = append(n.onDel, onDel)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
if r.cacher != nil {
|
||||
r.cacher.Ban(n)
|
||||
}
|
||||
n.unref()
|
||||
return true
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if onDel != nil {
|
||||
onDel()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Evict evicts 'cache node' with the given namespace and key. This will
|
||||
// simply call Cacher.Evict.
|
||||
//
|
||||
// Evict return true is such 'cache node' exist.
|
||||
func (r *Cache) Evict(ns, key uint64) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return false
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, true)
|
||||
if done {
|
||||
if n != nil {
|
||||
if r.cacher != nil {
|
||||
r.cacher.Evict(n)
|
||||
}
|
||||
n.unref()
|
||||
return true
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// EvictNS evicts 'cache node' with the given namespace. This will
|
||||
// simply call Cacher.EvictNS.
|
||||
func (r *Cache) EvictNS(ns uint64) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return
|
||||
}
|
||||
if h.fin != nil {
|
||||
h.fin()
|
||||
h.fin = nil
|
||||
|
||||
if r.cacher != nil {
|
||||
r.cacher.EvictNS(ns)
|
||||
}
|
||||
}
|
||||
|
||||
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
|
||||
func (r *Cache) EvictAll() {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return
|
||||
}
|
||||
|
||||
if r.cacher != nil {
|
||||
r.cacher.EvictAll()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the 'cache map' and releases all 'cache node'.
|
||||
func (r *Cache) Close() error {
|
||||
r.mu.Lock()
|
||||
if !r.closed {
|
||||
r.closed = true
|
||||
|
||||
if r.cacher != nil {
|
||||
if err := r.cacher.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
h := (*mNode)(r.mHead)
|
||||
h.initBuckets()
|
||||
|
||||
for i := range h.buckets {
|
||||
b := (*mBucket)(h.buckets[i])
|
||||
for _, n := range b.node {
|
||||
// Call releaser.
|
||||
if n.value != nil {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
n.value = nil
|
||||
}
|
||||
|
||||
// Call OnDel.
|
||||
for _, f := range n.onDel {
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Node is a 'cache node'.
|
||||
type Node struct {
|
||||
r *Cache
|
||||
|
||||
hash uint32
|
||||
ns, key uint64
|
||||
|
||||
mu sync.Mutex
|
||||
size int
|
||||
value Value
|
||||
|
||||
ref int32
|
||||
onDel []func()
|
||||
|
||||
CacheData unsafe.Pointer
|
||||
}
|
||||
|
||||
// NS returns this 'cache node' namespace.
|
||||
func (n *Node) NS() uint64 {
|
||||
return n.ns
|
||||
}
|
||||
|
||||
// Key returns this 'cache node' key.
|
||||
func (n *Node) Key() uint64 {
|
||||
return n.key
|
||||
}
|
||||
|
||||
// Size returns this 'cache node' size.
|
||||
func (n *Node) Size() int {
|
||||
return n.size
|
||||
}
|
||||
|
||||
// Value returns this 'cache node' value.
|
||||
func (n *Node) Value() Value {
|
||||
return n.value
|
||||
}
|
||||
|
||||
// Ref returns this 'cache node' ref counter.
|
||||
func (n *Node) Ref() int32 {
|
||||
return atomic.LoadInt32(&n.ref)
|
||||
}
|
||||
|
||||
// GetHandle returns an handle for this 'cache node'.
|
||||
func (n *Node) GetHandle() *Handle {
|
||||
if atomic.AddInt32(&n.ref, 1) <= 1 {
|
||||
panic("BUG: Node.GetHandle on zero ref")
|
||||
}
|
||||
return &Handle{unsafe.Pointer(n)}
|
||||
}
|
||||
|
||||
func (n *Node) unref() {
|
||||
if atomic.AddInt32(&n.ref, -1) == 0 {
|
||||
n.r.delete(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) unrefLocked() {
|
||||
if atomic.AddInt32(&n.ref, -1) == 0 {
|
||||
n.r.mu.RLock()
|
||||
if !n.r.closed {
|
||||
n.r.delete(n)
|
||||
}
|
||||
n.r.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
type Handle struct {
|
||||
n unsafe.Pointer // *Node
|
||||
}
|
||||
|
||||
func (h *Handle) Value() Value {
|
||||
n := (*Node)(atomic.LoadPointer(&h.n))
|
||||
if n != nil {
|
||||
return n.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handle) Release() {
|
||||
nPtr := atomic.LoadPointer(&h.n)
|
||||
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
|
||||
n := (*Node)(nPtr)
|
||||
n.unrefLocked()
|
||||
}
|
||||
}
|
||||
|
||||
func murmur32(ns, key uint64, seed uint32) uint32 {
|
||||
const (
|
||||
m = uint32(0x5bd1e995)
|
||||
r = 24
|
||||
)
|
||||
|
||||
k1 := uint32(ns >> 32)
|
||||
k2 := uint32(ns)
|
||||
k3 := uint32(key >> 32)
|
||||
k4 := uint32(key)
|
||||
|
||||
k1 *= m
|
||||
k1 ^= k1 >> r
|
||||
k1 *= m
|
||||
|
||||
k2 *= m
|
||||
k2 ^= k2 >> r
|
||||
k2 *= m
|
||||
|
||||
k3 *= m
|
||||
k3 ^= k3 >> r
|
||||
k3 *= m
|
||||
|
||||
k4 *= m
|
||||
k4 ^= k4 >> r
|
||||
k4 *= m
|
||||
|
||||
h := seed
|
||||
|
||||
h *= m
|
||||
h ^= k1
|
||||
h *= m
|
||||
h ^= k2
|
||||
h *= m
|
||||
h ^= k3
|
||||
h *= m
|
||||
h ^= k4
|
||||
|
||||
h ^= h >> 13
|
||||
h *= m
|
||||
h ^= h >> 15
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
907
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
907
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
@@ -13,11 +13,26 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type int32o int32
|
||||
|
||||
func (o *int32o) acquire() {
|
||||
if atomic.AddInt32((*int32)(o), 1) != 1 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *int32o) Release() {
|
||||
if atomic.AddInt32((*int32)(o), -1) != 0 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value interface{}
|
||||
value Value
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
@@ -26,8 +41,8 @@ func (r releaserFunc) Release() {
|
||||
}
|
||||
}
|
||||
|
||||
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
|
||||
return ns.Get(key, func() (int, interface{}) {
|
||||
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
|
||||
return c.Get(ns, key, func() (int, Value) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
} else {
|
||||
@@ -36,7 +51,246 @@ func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) H
|
||||
})
|
||||
}
|
||||
|
||||
func TestCache_HitMiss(t *testing.T) {
|
||||
func TestCacheMap(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
nsx := []struct {
|
||||
nobjects, nhandles, concurrent, repeat int
|
||||
}{
|
||||
{10000, 400, 50, 3},
|
||||
{100000, 1000, 100, 10},
|
||||
}
|
||||
|
||||
var (
|
||||
objects [][]int32o
|
||||
handles [][]unsafe.Pointer
|
||||
)
|
||||
|
||||
for _, x := range nsx {
|
||||
objects = append(objects, make([]int32o, x.nobjects))
|
||||
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
|
||||
}
|
||||
|
||||
c := NewCache(nil)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
var done int32
|
||||
|
||||
for ns, x := range nsx {
|
||||
for i := 0; i < x.concurrent; i++ {
|
||||
wg.Add(1)
|
||||
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for j := len(objects) * repeat; j >= 0; j-- {
|
||||
key := uint64(r.Intn(len(objects)))
|
||||
h := c.Get(uint64(ns), key, func() (int, Value) {
|
||||
o := &objects[key]
|
||||
o.acquire()
|
||||
return 1, o
|
||||
})
|
||||
if v := h.Value().(*int32o); v != &objects[key] {
|
||||
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
|
||||
}
|
||||
if objects[key] != 1 {
|
||||
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
|
||||
}
|
||||
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}(ns, i, x.repeat, objects[ns], handles[ns])
|
||||
}
|
||||
|
||||
go func(handles []unsafe.Pointer) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
i := r.Intn(len(handles))
|
||||
h := (*Handle)(atomic.LoadPointer(&handles[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(handles[ns])
|
||||
}
|
||||
|
||||
go func() {
|
||||
handles := make([]*Handle, 100000)
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
for i := range handles {
|
||||
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
|
||||
return 1, 1
|
||||
})
|
||||
}
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
atomic.StoreInt32(&done, 1)
|
||||
|
||||
for _, handles0 := range handles {
|
||||
for i := range handles0 {
|
||||
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ns, objects0 := range objects {
|
||||
for i, o := range objects0 {
|
||||
if o != 0 {
|
||||
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NodesAndSize(t *testing.T) {
|
||||
c := NewCache(nil)
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 2, nil)
|
||||
set(c, 1, 1, 3, 3, nil)
|
||||
set(c, 2, 1, 4, 1, nil)
|
||||
if c.Nodes() != 4 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
|
||||
}
|
||||
if c.Size() != 7 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Capacity(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
if c.Capacity() != 10 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 2, nil).Release()
|
||||
set(c, 1, 1, 3, 3, nil).Release()
|
||||
set(c, 2, 1, 4, 1, nil).Release()
|
||||
set(c, 2, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 3, 6, 1, nil).Release()
|
||||
set(c, 2, 4, 7, 1, nil).Release()
|
||||
set(c, 2, 5, 8, 1, nil).Release()
|
||||
if c.Nodes() != 7 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
|
||||
}
|
||||
if c.Size() != 10 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
|
||||
}
|
||||
c.SetCapacity(9)
|
||||
if c.Capacity() != 9 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
|
||||
}
|
||||
if c.Nodes() != 6 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
|
||||
}
|
||||
if c.Size() != 8 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NilValue(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
h := c.Get(0, 0, func() (size int, value Value) {
|
||||
return 1, nil
|
||||
})
|
||||
if h != nil {
|
||||
t.Error("cache handle is non-nil")
|
||||
}
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_GetLatency(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
const (
|
||||
concurrentSet = 30
|
||||
concurrentGet = 3
|
||||
duration = 3 * time.Second
|
||||
delay = 3 * time.Millisecond
|
||||
maxkey = 100000
|
||||
)
|
||||
|
||||
var (
|
||||
set, getHit, getAll int32
|
||||
getMaxLatency, getDuration int64
|
||||
)
|
||||
|
||||
c := NewCache(NewLRU(5000))
|
||||
wg := &sync.WaitGroup{}
|
||||
until := time.Now().Add(duration)
|
||||
for i := 0; i < concurrentSet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for time.Now().Before(until) {
|
||||
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
|
||||
time.Sleep(delay)
|
||||
atomic.AddInt32(&set, 1)
|
||||
return 1, 1
|
||||
}).Release()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < concurrentGet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
mark := time.Now()
|
||||
if mark.Before(until) {
|
||||
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
|
||||
latency := int64(time.Now().Sub(mark))
|
||||
m := atomic.LoadInt64(&getMaxLatency)
|
||||
if latency > m {
|
||||
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
|
||||
}
|
||||
atomic.AddInt64(&getDuration, latency)
|
||||
if h != nil {
|
||||
atomic.AddInt32(&getHit, 1)
|
||||
h.Release()
|
||||
}
|
||||
atomic.AddInt32(&getAll, 1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
|
||||
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
|
||||
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
|
||||
|
||||
if getAvglatency > delay/3 {
|
||||
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_HitMiss(t *testing.T) {
|
||||
cases := []struct {
|
||||
key uint64
|
||||
value string
|
||||
@@ -54,14 +308,13 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
setfin := 0
|
||||
c := NewLRUCache(1000)
|
||||
ns := c.GetNamespace(0)
|
||||
c := NewCache(NewLRU(1000))
|
||||
for i, x := range cases {
|
||||
set(ns, x.key, x.value, len(x.value), func() {
|
||||
set(c, 0, x.key, x.value, len(x.value), func() {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
@@ -85,7 +338,7 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
ns.Delete(x.key, func(exist, pending bool) {
|
||||
c.Delete(0, x.key, func() {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
@@ -94,7 +347,7 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
@@ -122,20 +375,19 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUCache_Eviction(t *testing.T) {
|
||||
c := NewLRUCache(12)
|
||||
ns := c.GetNamespace(0)
|
||||
o1 := set(ns, 1, 1, 1, nil)
|
||||
set(ns, 2, 2, 1, nil).Release()
|
||||
set(ns, 3, 3, 1, nil).Release()
|
||||
set(ns, 4, 4, 1, nil).Release()
|
||||
set(ns, 5, 5, 1, nil).Release()
|
||||
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
|
||||
c := NewCache(NewLRU(12))
|
||||
o1 := set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
set(c, 0, 5, 5, 1, nil).Release()
|
||||
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(ns, 9, 9, 10, nil).Release() // 5,2,9
|
||||
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
@@ -147,7 +399,7 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
o1.Release()
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
@@ -158,7 +410,7 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
@@ -169,487 +421,150 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_SetGet(t *testing.T) {
|
||||
c := NewLRUCache(13)
|
||||
ns := c.GetNamespace(0)
|
||||
for i := 0; i < 200; i++ {
|
||||
n := uint64(rand.Intn(99999) % 20)
|
||||
set(ns, n, n, 1, nil).Release()
|
||||
if h := ns.Get(n, nil); h != nil {
|
||||
if h.Value() == nil {
|
||||
t.Errorf("key '%d' contains nil value", n)
|
||||
func TestLRUCache_Evict(t *testing.T) {
|
||||
c := NewCache(NewLRU(6))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 1, 1, 4, 1, nil).Release()
|
||||
set(c, 1, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 1, 6, 1, nil).Release()
|
||||
set(c, 2, 2, 7, 1, nil).Release()
|
||||
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
if x := h.Value().(uint64); x != n {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
|
||||
}
|
||||
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ok := c.Evict(0, 1); !ok {
|
||||
t.Error("first Cache.Evict on #0.1 return false")
|
||||
}
|
||||
if ok := c.Evict(0, 1); ok {
|
||||
t.Error("second Cache.Evict on #0.1 return true")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictNS(1)
|
||||
if h := c.Get(1, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if h := c.Get(1, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictAll()
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Delete(t *testing.T) {
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
|
||||
if ok := c.Delete(0, 1, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #1 return false")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if ok := c.Delete(0, 1, delFunc); ok {
|
||||
t.Error("Cache.Delete on #1 return true")
|
||||
}
|
||||
|
||||
h2 := c.Get(0, 2, nil)
|
||||
if h2 == nil {
|
||||
t.Error("Cache.Get on #2 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(1) Cache.Delete on #2 return false")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(2) Cache.Delete on #2 return false")
|
||||
}
|
||||
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
c.Get(0, 2, nil).Release()
|
||||
|
||||
for key := 2; key <= 4; key++ {
|
||||
if h := c.Get(0, uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("key '%d' doesn't exist", n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Purge(t *testing.T) {
|
||||
c := NewLRUCache(3)
|
||||
ns1 := c.GetNamespace(0)
|
||||
o1 := set(ns1, 1, 1, 1, nil)
|
||||
o2 := set(ns1, 2, 2, 1, nil)
|
||||
ns1.Purge(nil)
|
||||
set(ns1, 3, 3, 1, nil).Release()
|
||||
for _, key := range []uint64{1, 2, 3} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
o2.Release()
|
||||
for _, key := range []uint64{1, 2} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testingCacheObjectCounter struct {
|
||||
created uint
|
||||
released uint
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) createOne() {
|
||||
c.created++
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) releaseOne() {
|
||||
c.released++
|
||||
}
|
||||
|
||||
type testingCacheObject struct {
|
||||
t *testing.T
|
||||
cnt *testingCacheObjectCounter
|
||||
|
||||
ns, key uint64
|
||||
|
||||
releaseCalled bool
|
||||
}
|
||||
|
||||
func (x *testingCacheObject) Release() {
|
||||
if !x.releaseCalled {
|
||||
x.releaseCalled = true
|
||||
x.cnt.releaseOne()
|
||||
} else {
|
||||
x.t.Errorf("duplicate setfin NS#%d KEY#%d", x.ns, x.key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_ConcurrentSetGet(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
const (
|
||||
N = 2000000
|
||||
M = 4000
|
||||
C = 3
|
||||
)
|
||||
|
||||
var set, get uint32
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
c := NewLRUCache(M / 4)
|
||||
for ni := uint64(0); ni < C; ni++ {
|
||||
r0 := rand.New(rand.NewSource(seed + int64(ni)))
|
||||
r1 := rand.New(rand.NewSource(seed + int64(ni) + 1))
|
||||
ns := c.GetNamespace(ni)
|
||||
|
||||
wg.Add(2)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, func() (int, interface{}) {
|
||||
atomic.AddUint32(&set, 1)
|
||||
return 1, x
|
||||
})
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r0)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, nil)
|
||||
if o != nil {
|
||||
atomic.AddUint32(&get, 1)
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r1)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
t.Logf("set=%d get=%d", set, get)
|
||||
}
|
||||
|
||||
func TestLRUCache_Finalizer(t *testing.T) {
|
||||
const (
|
||||
capacity = 100
|
||||
goroutines = 100
|
||||
iterations = 10000
|
||||
keymax = 8000
|
||||
)
|
||||
|
||||
cnt := &testingCacheObjectCounter{}
|
||||
|
||||
c := NewLRUCache(capacity)
|
||||
|
||||
type instance struct {
|
||||
seed int64
|
||||
rnd *rand.Rand
|
||||
nsid uint64
|
||||
ns Namespace
|
||||
effective int
|
||||
handles []Handle
|
||||
handlesMap map[uint64]int
|
||||
|
||||
delete bool
|
||||
purge bool
|
||||
zap bool
|
||||
wantDel int
|
||||
delfinCalled int
|
||||
delfinCalledAll int
|
||||
delfinCalledEff int
|
||||
purgefinCalled int
|
||||
}
|
||||
|
||||
instanceGet := func(p *instance, key uint64) {
|
||||
h := p.ns.Get(key, func() (charge int, value interface{}) {
|
||||
to := &testingCacheObject{
|
||||
t: t, cnt: cnt,
|
||||
ns: p.nsid,
|
||||
key: key,
|
||||
}
|
||||
p.effective++
|
||||
cnt.createOne()
|
||||
return 1, releaserFunc{func() {
|
||||
to.Release()
|
||||
p.effective--
|
||||
}, to}
|
||||
})
|
||||
p.handles = append(p.handles, h)
|
||||
p.handlesMap[key] = p.handlesMap[key] + 1
|
||||
}
|
||||
instanceRelease := func(p *instance, i int) {
|
||||
h := p.handles[i]
|
||||
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
|
||||
if n := p.handlesMap[key]; n == 0 {
|
||||
t.Fatal("key ref == 0")
|
||||
} else if n > 1 {
|
||||
p.handlesMap[key] = n - 1
|
||||
} else {
|
||||
delete(p.handlesMap, key)
|
||||
}
|
||||
h.Release()
|
||||
p.handles = append(p.handles[:i], p.handles[i+1:]...)
|
||||
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
|
||||
}
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
instances := make([]*instance, goroutines)
|
||||
for i := range instances {
|
||||
p := &instance{}
|
||||
p.handlesMap = make(map[uint64]int)
|
||||
p.seed = seed + int64(i)
|
||||
p.rnd = rand.New(rand.NewSource(p.seed))
|
||||
p.nsid = uint64(i)
|
||||
p.ns = c.GetNamespace(p.nsid)
|
||||
p.delete = i%6 == 0
|
||||
p.purge = i%8 == 0
|
||||
p.zap = i%12 == 0 || i%3 == 0
|
||||
instances[i] = p
|
||||
}
|
||||
|
||||
runr := rand.New(rand.NewSource(seed - 1))
|
||||
run := func(rnd *rand.Rand, x []*instance, init func(p *instance) bool, fn func(p *instance, i int) bool) {
|
||||
var (
|
||||
rx []*instance
|
||||
rn []int
|
||||
)
|
||||
if init == nil {
|
||||
rx = append([]*instance{}, x...)
|
||||
rn = make([]int, len(x))
|
||||
} else {
|
||||
for _, p := range x {
|
||||
if init(p) {
|
||||
rx = append(rx, p)
|
||||
rn = append(rn, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
for len(rx) > 0 {
|
||||
i := rand.Intn(len(rx))
|
||||
if fn(rx[i], rn[i]) {
|
||||
rn[i]++
|
||||
} else {
|
||||
rx = append(rx[:i], rx[i+1:]...)
|
||||
rn = append(rn[:i], rn[i+1:]...)
|
||||
}
|
||||
t.Errorf("Cache.Get on #%d return nil", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Get and release.
|
||||
run(runr, instances, nil, func(p *instance, i int) bool {
|
||||
if i < iterations {
|
||||
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
|
||||
instanceGet(p, uint64(p.rnd.Intn(keymax)))
|
||||
} else {
|
||||
instanceRelease(p, p.rnd.Intn(len(p.handles)))
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
h2.Release()
|
||||
if h := c.Get(0, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
if delFuncCalled != 4 {
|
||||
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Close(t *testing.T) {
|
||||
relFuncCalled := 0
|
||||
relFunc := func() {
|
||||
relFuncCalled++
|
||||
}
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, relFunc).Release()
|
||||
set(c, 0, 2, 2, 1, relFunc).Release()
|
||||
|
||||
h3 := set(c, 0, 3, 3, 1, relFunc)
|
||||
if h3 == nil {
|
||||
t.Error("Cache.Get on #3 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 3, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #3 return false")
|
||||
}
|
||||
|
||||
c.Close()
|
||||
|
||||
if relFuncCalled != 3 {
|
||||
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
|
||||
}
|
||||
if delFuncCalled != 1 {
|
||||
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache(b *testing.B) {
|
||||
c := NewCache(NewLRU(10000))
|
||||
|
||||
b.SetParallelism(10)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for pb.Next() {
|
||||
key := uint64(r.Intn(1000000))
|
||||
c.Get(0, key, func() (int, Value) {
|
||||
return 1, key
|
||||
}).Release()
|
||||
}
|
||||
})
|
||||
|
||||
if used, cap := c.Used(), c.Capacity(); used > cap {
|
||||
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
|
||||
}
|
||||
|
||||
// Check effective objects.
|
||||
for i, p := range instances {
|
||||
if int(p.effective) < len(p.handlesMap) {
|
||||
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
|
||||
}
|
||||
}
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// First delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.wantDel = p.effective
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
_, wantExist := p.handlesMap[key]
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
p.delfinCalledAll++
|
||||
if exist {
|
||||
p.delfinCalledEff++
|
||||
}
|
||||
})
|
||||
if !gotExist && wantExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d not found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Second delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.delfinCalled = 0
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
if exist && !pending {
|
||||
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.nsid, key)
|
||||
}
|
||||
p.delfinCalled++
|
||||
})
|
||||
if gotExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
if p.delfinCalled != keymax {
|
||||
t.Errorf("(2) NS#%d not all delete fin called, diff=%d", p.nsid, keymax-p.delfinCalled)
|
||||
}
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Purge.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.purge
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Purge(func(ns, key uint64) {
|
||||
p.purgefinCalled++
|
||||
})
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Release.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return !p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
if len(p.handles) > 0 {
|
||||
instanceRelease(p, len(p.handles)-1)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Zap.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Zap()
|
||||
p.handles = nil
|
||||
p.handlesMap = nil
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
|
||||
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
|
||||
}
|
||||
|
||||
c.Purge(nil)
|
||||
|
||||
for _, p := range instances {
|
||||
if p.delete {
|
||||
if p.delfinCalledAll != keymax {
|
||||
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.nsid, p.purge, p.zap, keymax-p.delfinCalledAll)
|
||||
}
|
||||
if p.delfinCalledEff != p.wantDel {
|
||||
t.Errorf("#%d not all effective delete fin called, diff=%d", p.nsid, p.wantDel-p.delfinCalledEff)
|
||||
}
|
||||
if p.purge && p.purgefinCalled > 0 {
|
||||
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.nsid, p.delete, p.zap, p.purgefinCalled)
|
||||
}
|
||||
} else {
|
||||
if p.purge {
|
||||
if p.purgefinCalled != p.wantDel {
|
||||
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.nsid, p.delete, p.zap, p.wantDel-p.purgefinCalled)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cnt.created != cnt.released {
|
||||
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Set(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get2(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, func() (charge int, value interface{}) {
|
||||
return 0, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Release(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
handles := make([]Handle, b.N)
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
handles[i] = set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetRelease(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
|
||||
na := b.N / 2
|
||||
nb := b.N - na
|
||||
|
||||
for i := uint64(0); i < uint64(na); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
|
||||
for i := uint64(0); i < uint64(nb); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
195
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
generated
vendored
Normal file
195
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type lruNode struct {
|
||||
n *Node
|
||||
h *Handle
|
||||
ban bool
|
||||
|
||||
next, prev *lruNode
|
||||
}
|
||||
|
||||
func (n *lruNode) insert(at *lruNode) {
|
||||
x := at.next
|
||||
at.next = n
|
||||
n.prev = at
|
||||
n.next = x
|
||||
x.prev = n
|
||||
}
|
||||
|
||||
func (n *lruNode) remove() {
|
||||
if n.prev != nil {
|
||||
n.prev.next = n.next
|
||||
n.next.prev = n.prev
|
||||
n.prev = nil
|
||||
n.next = nil
|
||||
} else {
|
||||
panic("BUG: removing removed node")
|
||||
}
|
||||
}
|
||||
|
||||
type lru struct {
|
||||
mu sync.Mutex
|
||||
capacity int
|
||||
used int
|
||||
recent lruNode
|
||||
}
|
||||
|
||||
func (r *lru) reset() {
|
||||
r.recent.next = &r.recent
|
||||
r.recent.prev = &r.recent
|
||||
r.used = 0
|
||||
}
|
||||
|
||||
func (r *lru) Capacity() int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.capacity
|
||||
}
|
||||
|
||||
func (r *lru) SetCapacity(capacity int) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
r.capacity = capacity
|
||||
for r.used > r.capacity {
|
||||
rn := r.recent.prev
|
||||
if rn == nil {
|
||||
panic("BUG: invalid LRU used or capacity counter")
|
||||
}
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Promote(n *Node) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
if n.CacheData == nil {
|
||||
if n.Size() <= r.capacity {
|
||||
rn := &lruNode{n: n, h: n.GetHandle()}
|
||||
rn.insert(&r.recent)
|
||||
n.CacheData = unsafe.Pointer(rn)
|
||||
r.used += n.Size()
|
||||
|
||||
for r.used > r.capacity {
|
||||
rn := r.recent.prev
|
||||
if rn == nil {
|
||||
panic("BUG: invalid LRU used or capacity counter")
|
||||
}
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if !rn.ban {
|
||||
rn.remove()
|
||||
rn.insert(&r.recent)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Ban(n *Node) {
|
||||
r.mu.Lock()
|
||||
if n.CacheData == nil {
|
||||
n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
|
||||
} else {
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if !rn.ban {
|
||||
rn.remove()
|
||||
rn.ban = true
|
||||
r.used -= rn.n.Size()
|
||||
r.mu.Unlock()
|
||||
|
||||
rn.h.Release()
|
||||
rn.h = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *lru) Evict(n *Node) {
|
||||
r.mu.Lock()
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if rn == nil || rn.ban {
|
||||
r.mu.Unlock()
|
||||
return
|
||||
}
|
||||
n.CacheData = nil
|
||||
r.mu.Unlock()
|
||||
|
||||
rn.h.Release()
|
||||
}
|
||||
|
||||
func (r *lru) EvictNS(ns uint64) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
for e := r.recent.prev; e != &r.recent; {
|
||||
rn := e
|
||||
e = e.prev
|
||||
if rn.n.NS() == ns {
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) EvictAll() {
|
||||
r.mu.Lock()
|
||||
back := r.recent.prev
|
||||
for rn := back; rn != &r.recent; rn = rn.prev {
|
||||
rn.n.CacheData = nil
|
||||
}
|
||||
r.reset()
|
||||
r.mu.Unlock()
|
||||
|
||||
for rn := back; rn != &r.recent; rn = rn.prev {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLRU create a new LRU-cache.
|
||||
func NewLRU(capacity int) Cacher {
|
||||
r := &lru{capacity: capacity}
|
||||
r.reset()
|
||||
return r
|
||||
}
|
||||
622
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
622
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
@@ -1,622 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// The LLRB implementation were taken from https://github.com/petar/GoLLRB.
|
||||
// Which contains the following header:
|
||||
//
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// lruCache represent a LRU cache state.
|
||||
type lruCache struct {
|
||||
mu sync.Mutex
|
||||
recent lruNode
|
||||
table map[uint64]*lruNs
|
||||
capacity int
|
||||
used, size, alive int
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new initialized LRU cache with the given capacity.
|
||||
func NewLRUCache(capacity int) Cache {
|
||||
c := &lruCache{
|
||||
table: make(map[uint64]*lruNs),
|
||||
capacity: capacity,
|
||||
}
|
||||
c.recent.rNext = &c.recent
|
||||
c.recent.rPrev = &c.recent
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *lruCache) Capacity() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.capacity
|
||||
}
|
||||
|
||||
func (c *lruCache) Used() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.used
|
||||
}
|
||||
|
||||
func (c *lruCache) Size() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.size
|
||||
}
|
||||
|
||||
func (c *lruCache) NumObjects() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.alive
|
||||
}
|
||||
|
||||
// SetCapacity set cache capacity.
|
||||
func (c *lruCache) SetCapacity(capacity int) {
|
||||
c.mu.Lock()
|
||||
c.capacity = capacity
|
||||
c.evict()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetNamespace return namespace object for given id.
|
||||
func (c *lruCache) GetNamespace(id uint64) Namespace {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if ns, ok := c.table[id]; ok {
|
||||
return ns
|
||||
}
|
||||
|
||||
ns := &lruNs{lru: c, id: id}
|
||||
c.table[id] = ns
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *lruCache) ZapNamespace(id uint64) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.zapNB()
|
||||
delete(c.table, id)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Purge purge entire cache.
|
||||
func (c *lruCache) Purge(fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) Zap() {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.zapNB()
|
||||
}
|
||||
c.table = make(map[uint64]*lruNs)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) evict() {
|
||||
top := &c.recent
|
||||
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
|
||||
if n.state != nodeEffective {
|
||||
panic("evicting non effective node")
|
||||
}
|
||||
n.state = nodeEvicted
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
c.used -= n.charge
|
||||
n = c.recent.rPrev
|
||||
}
|
||||
}
|
||||
|
||||
type lruNs struct {
|
||||
lru *lruCache
|
||||
id uint64
|
||||
rbRoot *lruNode
|
||||
state nsState
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
n = &lruNode{ns: ns, key: key}
|
||||
return n, n
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
|
||||
if hn != nil {
|
||||
h.rbLeft = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else if key > h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
|
||||
if hn != nil {
|
||||
h.rbRight = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
return h, n
|
||||
}
|
||||
|
||||
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
|
||||
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
|
||||
if hn != nil {
|
||||
ns.rbRoot = hn
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
|
||||
h := ns.rbRoot
|
||||
for h != nil {
|
||||
switch {
|
||||
case key < h.key:
|
||||
h = h.rbLeft
|
||||
case key > h.key:
|
||||
h = h.rbRight
|
||||
default:
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *lruNs) getNode(key uint64) *lruNode {
|
||||
return ns.rbGetNode(key)
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
if h.rbLeft == nil { // key not present. Nothing to delete
|
||||
return h
|
||||
}
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
|
||||
} else {
|
||||
if rbIsRed(h.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
// If @key equals @h.key and no right children at @h
|
||||
if h.key == key && h.rbRight == nil {
|
||||
return nil
|
||||
}
|
||||
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
|
||||
h = rbMoveRight(h)
|
||||
}
|
||||
// If @key equals @h.key, and (from above) 'h.Right != nil'
|
||||
if h.key == key {
|
||||
var x *lruNode
|
||||
h.rbRight, x = rbDeleteMin(h.rbRight)
|
||||
if x == nil {
|
||||
panic("logic")
|
||||
}
|
||||
x.rbLeft, h.rbLeft = h.rbLeft, nil
|
||||
x.rbRight, h.rbRight = h.rbRight, nil
|
||||
x.rbBlack = h.rbBlack
|
||||
h = x
|
||||
} else { // Else, @key is bigger than @h.key
|
||||
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
|
||||
}
|
||||
}
|
||||
|
||||
return rbFixup(h)
|
||||
}
|
||||
|
||||
func (ns *lruNs) deleteNode(key uint64) {
|
||||
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
|
||||
if ns.rbRoot != nil {
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if h.key >= pivot {
|
||||
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
|
||||
return false
|
||||
}
|
||||
if !iter(h) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ns.rbIterateNodes(h.rbRight, pivot, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
|
||||
ns.rbIterateNodes(ns.rbRoot, 0, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
return nil
|
||||
}
|
||||
|
||||
var n *lruNode
|
||||
if setf == nil {
|
||||
n = ns.getNode(key)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
n = ns.getOrCreateNode(key)
|
||||
}
|
||||
switch n.state {
|
||||
case nodeZero:
|
||||
charge, value := setf()
|
||||
if value == nil {
|
||||
ns.deleteNode(key)
|
||||
return nil
|
||||
}
|
||||
if charge < 0 {
|
||||
charge = 0
|
||||
}
|
||||
|
||||
n.value = value
|
||||
n.charge = charge
|
||||
n.state = nodeEvicted
|
||||
|
||||
ns.lru.size += charge
|
||||
ns.lru.alive++
|
||||
|
||||
fallthrough
|
||||
case nodeEvicted:
|
||||
if n.charge == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Insert to recent list.
|
||||
n.state = nodeEffective
|
||||
n.ref++
|
||||
ns.lru.used += n.charge
|
||||
ns.lru.evict()
|
||||
|
||||
fallthrough
|
||||
case nodeEffective:
|
||||
// Bump to front.
|
||||
n.rRemove()
|
||||
n.rInsert(&ns.lru.recent)
|
||||
case nodeDeleted:
|
||||
// Do nothing.
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
n.ref++
|
||||
|
||||
return &lruHandle{node: n}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
n := ns.getNode(key)
|
||||
if n == nil {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
case nodeDeleted:
|
||||
if fin != nil {
|
||||
fin(true, true)
|
||||
}
|
||||
return false
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ns *lruNs) purgeNB(fin PurgeFin) {
|
||||
if ns.state == nsEffective {
|
||||
var nodes []*lruNode
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
nodes = append(nodes, n)
|
||||
return true
|
||||
})
|
||||
for _, n := range nodes {
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
case nodeDeleted:
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Purge(fin PurgeFin) {
|
||||
ns.lru.mu.Lock()
|
||||
ns.purgeNB(fin)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ns *lruNs) zapNB() {
|
||||
if ns.state == nsEffective {
|
||||
ns.state = nsZapped
|
||||
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
if n.state == nodeEffective {
|
||||
ns.lru.used -= n.charge
|
||||
n.rRemove()
|
||||
}
|
||||
ns.lru.size -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.fin()
|
||||
|
||||
return true
|
||||
})
|
||||
ns.rbRoot = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Zap() {
|
||||
ns.lru.mu.Lock()
|
||||
ns.zapNB()
|
||||
delete(ns.lru.table, ns.id)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruNode struct {
|
||||
ns *lruNs
|
||||
|
||||
rNext, rPrev *lruNode
|
||||
rbLeft, rbRight *lruNode
|
||||
rbBlack bool
|
||||
|
||||
key uint64
|
||||
value interface{}
|
||||
charge int
|
||||
ref int
|
||||
state nodeState
|
||||
delfin DelFin
|
||||
purgefin PurgeFin
|
||||
}
|
||||
|
||||
func (n *lruNode) rInsert(at *lruNode) {
|
||||
x := at.rNext
|
||||
at.rNext = n
|
||||
n.rPrev = at
|
||||
n.rNext = x
|
||||
x.rPrev = n
|
||||
}
|
||||
|
||||
func (n *lruNode) rRemove() bool {
|
||||
if n.rPrev == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.rPrev.rNext = n.rNext
|
||||
n.rNext.rPrev = n.rPrev
|
||||
n.rPrev = nil
|
||||
n.rNext = nil
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lruNode) fin() {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
if n.purgefin != nil {
|
||||
if n.delfin != nil {
|
||||
panic("conflicting delete and purge fin")
|
||||
}
|
||||
n.purgefin(n.ns.id, n.key)
|
||||
n.purgefin = nil
|
||||
} else if n.delfin != nil {
|
||||
n.delfin(true, false)
|
||||
n.delfin = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) derefNB() {
|
||||
n.ref--
|
||||
if n.ref == 0 {
|
||||
if n.ns.state == nsEffective {
|
||||
// Remove elemement.
|
||||
n.ns.deleteNode(n.key)
|
||||
n.ns.lru.size -= n.charge
|
||||
n.ns.lru.alive--
|
||||
n.fin()
|
||||
}
|
||||
n.value = nil
|
||||
} else if n.ref < 0 {
|
||||
panic("leveldb/cache: lruCache: negative node reference")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) deref() {
|
||||
n.ns.lru.mu.Lock()
|
||||
n.derefNB()
|
||||
n.ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruHandle struct {
|
||||
node *lruNode
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (h *lruHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.node.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *lruHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
h.node.deref()
|
||||
h.node = nil
|
||||
}
|
||||
|
||||
func rbIsRed(h *lruNode) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.rbBlack
|
||||
}
|
||||
|
||||
func rbRotLeft(h *lruNode) *lruNode {
|
||||
x := h.rbRight
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbRight = x.rbLeft
|
||||
x.rbLeft = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbRotRight(h *lruNode) *lruNode {
|
||||
x := h.rbLeft
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbLeft = x.rbRight
|
||||
x.rbRight = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbFlip(h *lruNode) {
|
||||
h.rbBlack = !h.rbBlack
|
||||
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
|
||||
h.rbRight.rbBlack = !h.rbRight.rbBlack
|
||||
}
|
||||
|
||||
func rbMoveLeft(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbRight.rbLeft) {
|
||||
h.rbRight = rbRotRight(h.rbRight)
|
||||
h = rbRotLeft(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbMoveRight(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbFixup(h *lruNode) *lruNode {
|
||||
if rbIsRed(h.rbRight) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.rbLeft == nil {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
|
||||
h.rbLeft, n = rbDeleteMin(h.rbLeft)
|
||||
|
||||
return rbFixup(h), n
|
||||
}
|
||||
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
@@ -9,14 +9,12 @@ package leveldb
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const ctValSize = 1000
|
||||
@@ -33,8 +31,8 @@ func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
|
||||
|
||||
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
|
||||
return newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCache: cache.NewLRUCache(100),
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -269,9 +267,9 @@ func TestCorruptDB_TableIndex(t *testing.T) {
|
||||
func TestCorruptDB_MissingManifest(t *testing.T) {
|
||||
rnd := rand.New(rand.NewSource(0x0badda7a))
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCache: cache.NewLRUCache(100),
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
})
|
||||
|
||||
h.build(1000)
|
||||
|
||||
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -823,8 +823,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
case p == "blockpool":
|
||||
value = fmt.Sprintf("%v", db.s.tops.bpool)
|
||||
case p == "cachedblock":
|
||||
if bc := db.s.o.GetBlockCache(); bc != nil {
|
||||
value = fmt.Sprintf("%d", bc.Size())
|
||||
if db.s.tops.bcache != nil {
|
||||
value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
|
||||
} else {
|
||||
value = "<nil>"
|
||||
}
|
||||
|
||||
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
@@ -8,6 +8,7 @@ package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -89,6 +90,10 @@ func (db *DB) newSnapshot() *Snapshot {
|
||||
return snap
|
||||
}
|
||||
|
||||
func (snap *Snapshot) String() string {
|
||||
return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns ErrNotFound if
|
||||
// the DB does not contains the key.
|
||||
//
|
||||
|
||||
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@@ -1271,7 +1271,7 @@ func TestDB_DeletionMarkers2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDB_CompactionTableOpenError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
|
||||
h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
|
||||
defer h.close()
|
||||
|
||||
im := 10
|
||||
@@ -1629,8 +1629,8 @@ func TestDB_ManualCompaction(t *testing.T) {
|
||||
|
||||
func TestDB_BloomFilter(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
BlockCache: opt.NoCache,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
DisableBlockCache: true,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
@@ -2066,8 +2066,8 @@ func TestDB_GetProperties(t *testing.T) {
|
||||
|
||||
func TestDB_GoleveldbIssue72and83(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
CachedOpenFiles: 3,
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
OpenFilesCacheCapacity: 3,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
@@ -2200,7 +2200,7 @@ func TestDB_GoleveldbIssue72and83(t *testing.T) {
|
||||
func TestDB_TransientError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 128 * opt.KiB,
|
||||
CachedOpenFiles: 3,
|
||||
OpenFilesCacheCapacity: 3,
|
||||
DisableCompactionBackoff: true,
|
||||
})
|
||||
defer h.close()
|
||||
@@ -2410,7 +2410,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
CompactionTableSize: 43 * opt.KiB,
|
||||
CompactionExpandLimitFactor: 1,
|
||||
CompactionGPOverlapsFactor: 1,
|
||||
BlockCache: opt.NoCache,
|
||||
DisableBlockCache: true,
|
||||
}
|
||||
s, err := newSession(stor, o)
|
||||
if err != nil {
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -112,9 +112,9 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
db.writeDelay += time.Since(start)
|
||||
db.writeDelayN++
|
||||
} else if db.writeDelayN > 0 {
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
db.writeDelay = 0
|
||||
db.writeDelayN = 0
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
16
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
16
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
@@ -17,14 +17,14 @@ import (
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Leveldb external", func() {
|
||||
o := &opt.Options{
|
||||
BlockCache: opt.NoCache,
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 80,
|
||||
Compression: opt.NoCompression,
|
||||
CachedOpenFiles: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
CompactionTableSize: 2000,
|
||||
DisableBlockCache: true,
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 80,
|
||||
Compression: opt.NoCompression,
|
||||
OpenFilesCacheCapacity: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
CompactionTableSize: 2000,
|
||||
}
|
||||
|
||||
Describe("write test", func() {
|
||||
|
||||
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
@@ -106,7 +106,7 @@ func (ik iKey) assert() {
|
||||
panic("leveldb: nil iKey")
|
||||
}
|
||||
if len(ik) < 8 {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (ik iKey) parseNum() (seq uint64, kt kType) {
|
||||
num := ik.num()
|
||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
||||
if kt > ktVal {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
131
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
131
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@@ -20,8 +20,9 @@ const (
|
||||
GiB = MiB * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultBlockCacheSize = 8 * MiB
|
||||
var (
|
||||
DefaultBlockCacher = LRUCacher
|
||||
DefaultBlockCacheCapacity = 8 * MiB
|
||||
DefaultBlockRestartInterval = 16
|
||||
DefaultBlockSize = 4 * KiB
|
||||
DefaultCompactionExpandLimitFactor = 25
|
||||
@@ -33,7 +34,8 @@ const (
|
||||
DefaultCompactionTotalSize = 10 * MiB
|
||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultCachedOpenFiles = 500
|
||||
DefaultOpenFilesCacher = LRUCacher
|
||||
DefaultOpenFilesCacheCapacity = 500
|
||||
DefaultMaxMemCompationLevel = 2
|
||||
DefaultNumLevel = 7
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
@@ -41,22 +43,33 @@ const (
|
||||
DefaultWriteL0SlowdownTrigger = 8
|
||||
)
|
||||
|
||||
type noCache struct{}
|
||||
// Cacher is a caching algorithm.
|
||||
type Cacher interface {
|
||||
New(capacity int) cache.Cacher
|
||||
}
|
||||
|
||||
func (noCache) SetCapacity(capacity int) {}
|
||||
func (noCache) Capacity() int { return 0 }
|
||||
func (noCache) Used() int { return 0 }
|
||||
func (noCache) Size() int { return 0 }
|
||||
func (noCache) NumObjects() int { return 0 }
|
||||
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
|
||||
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
|
||||
func (noCache) ZapNamespace(id uint64) {}
|
||||
func (noCache) Purge(fin cache.PurgeFin) {}
|
||||
func (noCache) Zap() {}
|
||||
type CacherFunc struct {
|
||||
NewFunc func(capacity int) cache.Cacher
|
||||
}
|
||||
|
||||
var NoCache cache.Cache = noCache{}
|
||||
func (f *CacherFunc) New(capacity int) cache.Cacher {
|
||||
if f.NewFunc != nil {
|
||||
return f.NewFunc(capacity)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compression is the per-block compression algorithm to use.
|
||||
func noCacher(int) cache.Cacher { return nil }
|
||||
|
||||
var (
|
||||
// LRUCacher is the LRU-cache algorithm.
|
||||
LRUCacher = &CacherFunc{cache.NewLRU}
|
||||
|
||||
// NoCacher is the value to disable caching algorithm.
|
||||
NoCacher = &CacherFunc{}
|
||||
)
|
||||
|
||||
// Compression is the 'sorted table' block compression algorithm to use.
|
||||
type Compression uint
|
||||
|
||||
func (c Compression) String() string {
|
||||
@@ -133,16 +146,17 @@ type Options struct {
|
||||
// The default value is nil
|
||||
AltFilters []filter.Filter
|
||||
|
||||
// BlockCache provides per-block caching for LevelDB. Specify NoCache to
|
||||
// disable block caching.
|
||||
// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
|
||||
// Specify NoCacher to disable caching algorithm.
|
||||
//
|
||||
// By default LevelDB will create LRU-cache with capacity of BlockCacheSize.
|
||||
BlockCache cache.Cache
|
||||
// The default value is LRUCacher.
|
||||
BlockCacher Cacher
|
||||
|
||||
// BlockCacheSize defines the capacity of the default 'block cache'.
|
||||
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
|
||||
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
|
||||
//
|
||||
// The default value is 8MiB.
|
||||
BlockCacheSize int
|
||||
BlockCacheCapacity int
|
||||
|
||||
// BlockRestartInterval is the number of keys between restart points for
|
||||
// delta encoding of keys.
|
||||
@@ -156,13 +170,6 @@ type Options struct {
|
||||
// The default value is 4KiB.
|
||||
BlockSize int
|
||||
|
||||
// CachedOpenFiles defines number of open files to kept around when not
|
||||
// in-use, the counting includes still in-use files.
|
||||
// Set this to negative value to disable caching.
|
||||
//
|
||||
// The default value is 500.
|
||||
CachedOpenFiles int
|
||||
|
||||
// CompactionExpandLimitFactor limits compaction size after expanded.
|
||||
// This will be multiplied by table size limit at compaction target level.
|
||||
//
|
||||
@@ -237,11 +244,17 @@ type Options struct {
|
||||
// The default value uses the same ordering as bytes.Compare.
|
||||
Comparer comparer.Comparer
|
||||
|
||||
// Compression defines the per-block compression to use.
|
||||
// Compression defines the 'sorted table' block compression to use.
|
||||
//
|
||||
// The default value (DefaultCompression) uses snappy compression.
|
||||
Compression Compression
|
||||
|
||||
// DisableBlockCache allows disable use of cache.Cache functionality on
|
||||
// 'sorted table' block.
|
||||
//
|
||||
// The default value is false.
|
||||
DisableBlockCache bool
|
||||
|
||||
// DisableCompactionBackoff allows disable compaction retry backoff.
|
||||
//
|
||||
// The default value is false.
|
||||
@@ -288,6 +301,18 @@ type Options struct {
|
||||
// The default is 7.
|
||||
NumLevel int
|
||||
|
||||
// OpenFilesCacher provides cache algorithm for open files caching.
|
||||
// Specify NoCacher to disable caching algorithm.
|
||||
//
|
||||
// The default value is LRUCacher.
|
||||
OpenFilesCacher Cacher
|
||||
|
||||
// OpenFilesCacheCapacity defines the capacity of the open files caching.
|
||||
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
|
||||
//
|
||||
// The default value is 500.
|
||||
OpenFilesCacheCapacity int
|
||||
|
||||
// Strict defines the DB strict level.
|
||||
Strict Strict
|
||||
|
||||
@@ -320,18 +345,22 @@ func (o *Options) GetAltFilters() []filter.Filter {
|
||||
return o.AltFilters
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockCache() cache.Cache {
|
||||
if o == nil {
|
||||
func (o *Options) GetBlockCacher() Cacher {
|
||||
if o == nil || o.BlockCacher == nil {
|
||||
return DefaultBlockCacher
|
||||
} else if o.BlockCacher == NoCacher {
|
||||
return nil
|
||||
}
|
||||
return o.BlockCache
|
||||
return o.BlockCacher
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockCacheSize() int {
|
||||
if o == nil || o.BlockCacheSize <= 0 {
|
||||
return DefaultBlockCacheSize
|
||||
func (o *Options) GetBlockCacheCapacity() int {
|
||||
if o == nil || o.BlockCacheCapacity <= 0 {
|
||||
return DefaultBlockCacheCapacity
|
||||
} else if o.BlockCacheCapacity == -1 {
|
||||
return 0
|
||||
}
|
||||
return o.BlockCacheSize
|
||||
return o.BlockCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockRestartInterval() int {
|
||||
@@ -348,15 +377,6 @@ func (o *Options) GetBlockSize() int {
|
||||
return o.BlockSize
|
||||
}
|
||||
|
||||
func (o *Options) GetCachedOpenFiles() int {
|
||||
if o == nil || o.CachedOpenFiles == 0 {
|
||||
return DefaultCachedOpenFiles
|
||||
} else if o.CachedOpenFiles < 0 {
|
||||
return 0
|
||||
}
|
||||
return o.CachedOpenFiles
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionExpandLimit(level int) int {
|
||||
factor := DefaultCompactionExpandLimitFactor
|
||||
if o != nil && o.CompactionExpandLimitFactor > 0 {
|
||||
@@ -494,6 +514,25 @@ func (o *Options) GetNumLevel() int {
|
||||
return o.NumLevel
|
||||
}
|
||||
|
||||
func (o *Options) GetOpenFilesCacher() Cacher {
|
||||
if o == nil || o.OpenFilesCacher == nil {
|
||||
return DefaultOpenFilesCacher
|
||||
}
|
||||
if o.OpenFilesCacher == NoCacher {
|
||||
return nil
|
||||
}
|
||||
return o.OpenFilesCacher
|
||||
}
|
||||
|
||||
func (o *Options) GetOpenFilesCacheCapacity() int {
|
||||
if o == nil || o.OpenFilesCacheCapacity <= 0 {
|
||||
return DefaultOpenFilesCacheCapacity
|
||||
} else if o.OpenFilesCacheCapacity == -1 {
|
||||
return 0
|
||||
}
|
||||
return o.OpenFilesCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetStrict(strict Strict) bool {
|
||||
if o == nil || o.Strict == 0 {
|
||||
return DefaultStrict&strict != 0
|
||||
|
||||
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
@@ -7,7 +7,6 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
@@ -32,13 +31,6 @@ func (s *session) setOptions(o *opt.Options) {
|
||||
no.AltFilters[i] = &iFilter{filter}
|
||||
}
|
||||
}
|
||||
// Block cache.
|
||||
switch o.GetBlockCache() {
|
||||
case nil:
|
||||
no.BlockCache = cache.NewLRUCache(o.GetBlockCacheSize())
|
||||
case opt.NoCache:
|
||||
no.BlockCache = nil
|
||||
}
|
||||
// Comparer.
|
||||
s.icmp = &iComparer{o.GetComparer()}
|
||||
no.Comparer = s.icmp
|
||||
|
||||
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@@ -73,7 +73,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
stCompPtrs: make([]iKey, o.GetNumLevel()),
|
||||
}
|
||||
s.setOptions(o)
|
||||
s.tops = newTableOps(s, s.o.GetCachedOpenFiles())
|
||||
s.tops = newTableOps(s)
|
||||
s.setVersion(newVersion(s))
|
||||
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
|
||||
return
|
||||
@@ -82,9 +82,6 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
// Close session.
|
||||
func (s *session) close() {
|
||||
s.tops.close()
|
||||
if bc := s.o.GetBlockCache(); bc != nil {
|
||||
bc.Purge(nil)
|
||||
}
|
||||
if s.manifest != nil {
|
||||
s.manifest.Close()
|
||||
}
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
@@ -221,7 +221,7 @@ func (fs *fileStorage) GetManifest() (f File, err error) {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
|
||||
continue
|
||||
}
|
||||
if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil {
|
||||
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
|
||||
continue
|
||||
}
|
||||
|
||||
70
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
70
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@@ -286,10 +286,10 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
|
||||
|
||||
// Table operations.
|
||||
type tOps struct {
|
||||
s *session
|
||||
cache cache.Cache
|
||||
cacheNS cache.Namespace
|
||||
bpool *util.BufferPool
|
||||
s *session
|
||||
cache *cache.Cache
|
||||
bcache *cache.Cache
|
||||
bpool *util.BufferPool
|
||||
}
|
||||
|
||||
// Creates an empty table and returns table writer.
|
||||
@@ -338,26 +338,28 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
|
||||
|
||||
// Opens table. It returns a cache handle, which should
|
||||
// be released after use.
|
||||
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
|
||||
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
|
||||
num := f.file.Num()
|
||||
ch = t.cacheNS.Get(num, func() (charge int, value interface{}) {
|
||||
ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
|
||||
var r storage.Reader
|
||||
r, err = f.file.Open()
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var bcacheNS cache.Namespace
|
||||
if bc := t.s.o.GetBlockCache(); bc != nil {
|
||||
bcacheNS = bc.GetNamespace(num)
|
||||
var bcache *cache.CacheGetter
|
||||
if t.bcache != nil {
|
||||
bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
|
||||
}
|
||||
|
||||
var tr *table.Reader
|
||||
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcacheNS, t.bpool, t.s.o.Options)
|
||||
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
|
||||
if err != nil {
|
||||
r.Close()
|
||||
return 0, nil
|
||||
}
|
||||
return 1, tr
|
||||
|
||||
})
|
||||
if ch == nil && err == nil {
|
||||
err = ErrClosed
|
||||
@@ -412,16 +414,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
|
||||
// no one use the the table.
|
||||
func (t *tOps) remove(f *tFile) {
|
||||
num := f.file.Num()
|
||||
t.cacheNS.Delete(num, func(exist, pending bool) {
|
||||
if !pending {
|
||||
if err := f.file.Remove(); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", num, err)
|
||||
} else {
|
||||
t.s.logf("table@remove removed @%d", num)
|
||||
}
|
||||
if bc := t.s.o.GetBlockCache(); bc != nil {
|
||||
bc.ZapNamespace(num)
|
||||
}
|
||||
t.cache.Delete(0, num, func() {
|
||||
if err := f.file.Remove(); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", num, err)
|
||||
} else {
|
||||
t.s.logf("table@remove removed @%d", num)
|
||||
}
|
||||
if t.bcache != nil {
|
||||
t.bcache.EvictNS(num)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -429,18 +429,34 @@ func (t *tOps) remove(f *tFile) {
|
||||
// Closes the table ops instance. It will close all tables,
|
||||
// regadless still used or not.
|
||||
func (t *tOps) close() {
|
||||
t.cache.Zap()
|
||||
t.bpool.Close()
|
||||
t.cache.Close()
|
||||
if t.bcache != nil {
|
||||
t.bcache.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Creates new initialized table ops instance.
|
||||
func newTableOps(s *session, cacheCap int) *tOps {
|
||||
c := cache.NewLRUCache(cacheCap)
|
||||
func newTableOps(s *session) *tOps {
|
||||
var (
|
||||
cacher cache.Cacher
|
||||
bcache *cache.Cache
|
||||
)
|
||||
if s.o.GetOpenFilesCacheCapacity() > 0 {
|
||||
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
|
||||
}
|
||||
if !s.o.DisableBlockCache {
|
||||
var bcacher cache.Cacher
|
||||
if s.o.GetBlockCacheCapacity() > 0 {
|
||||
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
|
||||
}
|
||||
bcache = cache.NewCache(bcacher)
|
||||
}
|
||||
return &tOps{
|
||||
s: s,
|
||||
cache: c,
|
||||
cacheNS: c.GetNamespace(0),
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
s: s,
|
||||
cache: cache.NewCache(cacher),
|
||||
bcache: bcache,
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
60
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
60
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@@ -509,7 +509,7 @@ type Reader struct {
|
||||
mu sync.RWMutex
|
||||
fi *storage.FileInfo
|
||||
reader io.ReaderAt
|
||||
cache cache.Namespace
|
||||
cache *cache.CacheGetter
|
||||
err error
|
||||
bpool *util.BufferPool
|
||||
// Options
|
||||
@@ -613,18 +613,22 @@ func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error)
|
||||
|
||||
func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
|
||||
if r.cache != nil {
|
||||
var err error
|
||||
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
|
||||
if !fillCache {
|
||||
return 0, nil
|
||||
}
|
||||
var b *block
|
||||
b, err = r.readBlock(bh, verifyChecksum)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
var (
|
||||
err error
|
||||
ch *cache.Handle
|
||||
)
|
||||
if fillCache {
|
||||
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
|
||||
var b *block
|
||||
b, err = r.readBlock(bh, verifyChecksum)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
} else {
|
||||
ch = r.cache.Get(bh.offset, nil)
|
||||
}
|
||||
if ch != nil {
|
||||
b, ok := ch.Value().(*block)
|
||||
if !ok {
|
||||
@@ -667,18 +671,22 @@ func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
|
||||
|
||||
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
|
||||
if r.cache != nil {
|
||||
var err error
|
||||
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
|
||||
if !fillCache {
|
||||
return 0, nil
|
||||
}
|
||||
var b *filterBlock
|
||||
b, err = r.readFilterBlock(bh)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
var (
|
||||
err error
|
||||
ch *cache.Handle
|
||||
)
|
||||
if fillCache {
|
||||
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
|
||||
var b *filterBlock
|
||||
b, err = r.readFilterBlock(bh)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
} else {
|
||||
ch = r.cache.Get(bh.offset, nil)
|
||||
}
|
||||
if ch != nil {
|
||||
b, ok := ch.Value().(*filterBlock)
|
||||
if !ok {
|
||||
@@ -980,7 +988,7 @@ func (r *Reader) Release() {
|
||||
// The fi, cache and bpool is optional and can be nil.
|
||||
//
|
||||
// The returned table reader instance is goroutine-safe.
|
||||
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
if f == nil {
|
||||
return nil, errors.New("leveldb/table: nil file")
|
||||
}
|
||||
|
||||
10
Godeps/_workspace/src/golang.org/x/text/transform/transform_test.go
generated
vendored
10
Godeps/_workspace/src/golang.org/x/text/transform/transform_test.go
generated
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lowerCaseASCII struct{ transform.NopResetter }
|
||||
type lowerCaseASCII struct{ NopResetter }
|
||||
|
||||
func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
@@ -34,7 +34,7 @@ func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, er
|
||||
|
||||
var errYouMentionedX = errors.New("you mentioned X")
|
||||
|
||||
type dontMentionX struct{ transform.NopResetter }
|
||||
type dontMentionX struct{ NopResetter }
|
||||
|
||||
func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
@@ -52,7 +52,7 @@ func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||
|
||||
// doublerAtEOF is a strange Transformer that transforms "this" to "tthhiiss",
|
||||
// but only if atEOF is true.
|
||||
type doublerAtEOF struct{ transform.NopResetter }
|
||||
type doublerAtEOF struct{ NopResetter }
|
||||
|
||||
func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if !atEOF {
|
||||
@@ -71,7 +71,7 @@ func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||
// rleDecode and rleEncode implement a toy run-length encoding: "aabbbbbbbbbb"
|
||||
// is encoded as "2a10b". The decoding is assumed to not contain any numbers.
|
||||
|
||||
type rleDecode struct{ transform.NopResetter }
|
||||
type rleDecode struct{ NopResetter }
|
||||
|
||||
func (rleDecode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
loop:
|
||||
@@ -104,7 +104,7 @@ loop:
|
||||
}
|
||||
|
||||
type rleEncode struct {
|
||||
transform.NopResetter
|
||||
NopResetter
|
||||
|
||||
// allowStutter means that "xxxxxxxx" can be encoded as "5x3x"
|
||||
// instead of always as "8x".
|
||||
|
||||
1
NICKS
1
NICKS
@@ -1,6 +1,7 @@
|
||||
# This file maps email addresses used in commits to nicks used the changelog.
|
||||
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
|
||||
18
README.md
18
README.md
@@ -11,7 +11,7 @@ This is the `syncthing` project. The following are the project goals:
|
||||
collaborating devices. The protocol should be well defined, unambiguous,
|
||||
easily understood, free to use, efficient, secure and language neutral.
|
||||
This is the [Block Exchange
|
||||
Protocol](https://github.com/syncthing/syncthing/blob/master/protocol/PROTOCOL.md).
|
||||
Protocol](https://github.com/syncthing/protocol/blob/master/BEPv1.md).
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility. It is the hope that
|
||||
@@ -25,7 +25,8 @@ for incompatible changes.
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
Take a look at the [getting started guide](http://discourse.syncthing.net/t/46).
|
||||
Take a look at the [getting started
|
||||
guide](https://github.com/syncthing/syncthing/wiki/Getting-Started).
|
||||
|
||||
There are a few examples for keeping syncthing running in the background
|
||||
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
|
||||
@@ -37,7 +38,7 @@ Building
|
||||
--------
|
||||
|
||||
Building Syncthing from source is easy, and there's a
|
||||
[guide](http://discourse.syncthing.net/t/44)
|
||||
[guide](https://github.com/syncthing/syncthing/wiki/Building).
|
||||
that describes it for both Unix and Windows.
|
||||
|
||||
Signed Releases
|
||||
@@ -52,15 +53,8 @@ Documentation
|
||||
=============
|
||||
|
||||
The [syncthing
|
||||
documentation](http://discourse.syncthing.net/category/documentation) is
|
||||
on the discourse site.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
All documentation and protocol specifications are licensed
|
||||
under the [Creative Commons Attribution 4.0 International
|
||||
License](http://creativecommons.org/licenses/by/4.0/).
|
||||
documentation](https://github.com/syncthing/syncthing/wiki/) is on the
|
||||
Github wiki.
|
||||
|
||||
All code is licensed under the
|
||||
[GPL](https://github.com/syncthing/syncthing/blob/master/LICENSE), v3 or
|
||||
|
||||
43
build.go
43
build.go
@@ -22,6 +22,7 @@ import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -190,7 +191,12 @@ func install(pkg string, tags []string) {
|
||||
}
|
||||
|
||||
func build(pkg string, tags []string) {
|
||||
rmr("syncthing", "syncthing.exe")
|
||||
binary := "syncthing"
|
||||
if goos == "windows" {
|
||||
binary += ".exe"
|
||||
}
|
||||
|
||||
rmr(binary, binary+".md5")
|
||||
args := []string{"build", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
@@ -201,6 +207,13 @@ func build(pkg string, tags []string) {
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
runPrint("go", args...)
|
||||
|
||||
// Create an md5 checksum of the binary, to be included in the archive for
|
||||
// automatic upgrades.
|
||||
err := md5File(binary)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTar() {
|
||||
@@ -217,6 +230,7 @@ func buildTar() {
|
||||
{"LICENSE", name + "/LICENSE.txt"},
|
||||
{"AUTHORS", name + "/AUTHORS.txt"},
|
||||
{"syncthing", name + "/syncthing"},
|
||||
{"syncthing.md5", name + "/syncthing.md5"},
|
||||
}
|
||||
for _, file := range listFiles("etc") {
|
||||
files = append(files, archiveFile{file, name + "/" + file})
|
||||
@@ -239,6 +253,7 @@ func buildZip() {
|
||||
{"LICENSE", name + "/LICENSE.txt"},
|
||||
{"AUTHORS", name + "/AUTHORS.txt"},
|
||||
{"syncthing.exe", name + "/syncthing.exe"},
|
||||
{"syncthing.exe.md5", name + "/syncthing.exe.md5"},
|
||||
}
|
||||
zipFile(filename, files)
|
||||
log.Println(filename)
|
||||
@@ -554,3 +569,29 @@ func zipFile(out string, files []archiveFile) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func md5File(file string) error {
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
h := md5.New()
|
||||
_, err = io.Copy(h, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(file + ".md5")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
14
build.sh
14
build.sh
@@ -105,7 +105,7 @@ case "${1:-default}" in
|
||||
;;
|
||||
|
||||
docker-init)
|
||||
docker build -q -t syncthing/build:$DOCKERIMGV docker >/dev/null
|
||||
docker build -q -t syncthing/build:$DOCKERIMGV docker
|
||||
;;
|
||||
|
||||
docker-all)
|
||||
@@ -122,17 +122,15 @@ case "${1:-default}" in
|
||||
|
||||
docker-test)
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/tmp/syncthing \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
syncthing/build:$DOCKERIMGV \
|
||||
sh -euxc 'mkdir -p /go/src/github.com/syncthing \
|
||||
&& cd /go/src/github.com/syncthing \
|
||||
&& cp -r /tmp/syncthing syncthing \
|
||||
&& cd syncthing \
|
||||
&& ./build.sh clean \
|
||||
sh -euxc './build.sh clean \
|
||||
&& go run build.go -race \
|
||||
&& export GOPATH=$(pwd)/Godeps/_workspace:$GOPATH \
|
||||
&& cd test \
|
||||
&& go test -tags integration -v -timeout 60m -short'
|
||||
&& go test -tags integration -v -timeout 60m -short \
|
||||
&& git clean -fxd .'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -15,7 +15,8 @@ no-docs-typos() {
|
||||
grep -v f0621207e3953711f9ab86d99724f1d0faac45b1 |\
|
||||
grep -v f1120d7aa936c0658429edef0037792520b46334 |\
|
||||
grep -v a9339d0627fff439879d157c75077f02c9fac61b |\
|
||||
grep -v 254c63763a3ad42fd82259f1767db526cff94a14
|
||||
grep -v 254c63763a3ad42fd82259f1767db526cff94a14 |\
|
||||
grep -v 4b76ec40c07078beaa2c5e250ed7d9bd6276a718
|
||||
}
|
||||
|
||||
print-missing-authors() {
|
||||
|
||||
177
cmd/stcompdirs/main.go
Normal file
177
cmd/stcompdirs/main.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
log.Println(compareDirectories(flag.Args()...))
|
||||
}
|
||||
|
||||
// Compare a number of directories. Returns nil if the contents are identical,
|
||||
// otherwise an error describing the first found difference.
|
||||
func compareDirectories(dirs ...string) error {
|
||||
chans := make([]chan fileInfo, len(dirs))
|
||||
for i := range chans {
|
||||
chans[i] = make(chan fileInfo)
|
||||
}
|
||||
errcs := make([]chan error, len(dirs))
|
||||
abort := make(chan struct{})
|
||||
|
||||
for i := range dirs {
|
||||
errcs[i] = startWalker(dirs[i], chans[i], abort)
|
||||
}
|
||||
|
||||
res := make([]fileInfo, len(dirs))
|
||||
for {
|
||||
numDone := 0
|
||||
for i := range chans {
|
||||
fi, ok := <-chans[i]
|
||||
if !ok {
|
||||
err, hasError := <-errcs[i]
|
||||
if hasError {
|
||||
close(abort)
|
||||
return err
|
||||
}
|
||||
numDone++
|
||||
}
|
||||
res[i] = fi
|
||||
}
|
||||
|
||||
for i := 1; i < len(res); i++ {
|
||||
if res[i] != res[0] {
|
||||
close(abort)
|
||||
if res[i].name < res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[0], res[i], dirs[i])
|
||||
} else if res[i].name > res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
|
||||
}
|
||||
return fmt.Errorf("Mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
|
||||
}
|
||||
}
|
||||
|
||||
if numDone == len(dirs) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
mod int64
|
||||
hash [16]byte
|
||||
}
|
||||
|
||||
func (f fileInfo) String() string {
|
||||
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
|
||||
}
|
||||
|
||||
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
|
||||
walker := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rn, _ := filepath.Rel(dir, path)
|
||||
if rn == "." || rn == ".stfolder" {
|
||||
return nil
|
||||
}
|
||||
if rn == ".stversions" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
var f fileInfo
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: os.ModeSymlink,
|
||||
}
|
||||
|
||||
tgt, _, err := symlinks.Read(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write([]byte(tgt))
|
||||
hash := h.Sum(nil)
|
||||
|
||||
copy(f.hash[:], hash)
|
||||
} else if info.IsDir() {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
// hash and modtime zero for directories
|
||||
}
|
||||
} else {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
mod: info.ModTime().Unix(),
|
||||
}
|
||||
sum, err := md5file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.hash = sum
|
||||
}
|
||||
|
||||
select {
|
||||
case res <- f:
|
||||
return nil
|
||||
case <-abort:
|
||||
return errors.New("abort")
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
err := filepath.Walk(dir, walker)
|
||||
close(res)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
close(errc)
|
||||
}()
|
||||
return errc
|
||||
}
|
||||
|
||||
func md5file(fname string) (hash [16]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := md5.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -149,6 +149,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
|
||||
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
|
||||
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
|
||||
postRestMux.HandleFunc("/rest/bump", withModel(m, restPostBump))
|
||||
|
||||
// A handler that splits requests between the two above and disables
|
||||
// caching
|
||||
@@ -314,19 +315,12 @@ func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var folder = qs.Get("folder")
|
||||
|
||||
files := m.NeedFolderFilesLimited(folder, 100) // max 100 files
|
||||
progress, queued, rest := m.NeedFolderFiles(folder, 100)
|
||||
// Convert the struct to a more loose structure, and inject the size.
|
||||
output := make([]map[string]interface{}, 0, len(files))
|
||||
for _, file := range files {
|
||||
output = append(output, map[string]interface{}{
|
||||
"Name": file.Name,
|
||||
"Flags": file.Flags,
|
||||
"Modified": file.Modified,
|
||||
"Version": file.Version,
|
||||
"LocalVersion": file.LocalVersion,
|
||||
"NumBlocks": file.NumBlocks,
|
||||
"Size": protocol.BlocksToSize(file.NumBlocks),
|
||||
})
|
||||
output := map[string][]map[string]interface{}{
|
||||
"progress": toNeedSlice(progress),
|
||||
"queued": toNeedSlice(queued),
|
||||
"rest": toNeedSlice(rest),
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
@@ -650,6 +644,14 @@ func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func restPostBump(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
file := qs.Get("file")
|
||||
m.BringToFront(folder, file)
|
||||
restGetNeed(m, w, r)
|
||||
}
|
||||
|
||||
func getQR(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var text = qs.Get("text")
|
||||
@@ -775,3 +777,19 @@ func mimeTypeForFile(file string) string {
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func toNeedSlice(files []protocol.FileInfoTruncated) []map[string]interface{} {
|
||||
output := make([]map[string]interface{}, len(files))
|
||||
for i, file := range files {
|
||||
output[i] = map[string]interface{}{
|
||||
"Name": file.Name,
|
||||
"Flags": file.Flags,
|
||||
"Modified": file.Modified,
|
||||
"Version": file.Version,
|
||||
"LocalVersion": file.LocalVersion,
|
||||
"NumBlocks": file.NumBlocks,
|
||||
"Size": protocol.BlocksToSize(file.NumBlocks),
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
@@ -182,6 +182,7 @@ var (
|
||||
showVersion bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
noBrowser bool
|
||||
noConsole bool
|
||||
generateDir string
|
||||
@@ -227,6 +228,7 @@ func main() {
|
||||
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&showVersion, "version", false, "Show version")
|
||||
flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL")
|
||||
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, defConfDir))
|
||||
flag.Parse()
|
||||
@@ -315,6 +317,15 @@ func main() {
|
||||
// Ensure that our home directory exists.
|
||||
ensureDir(confDir, 0700)
|
||||
|
||||
if upgradeTo != "" {
|
||||
err := upgrade.ToURL(upgradeTo)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
l.Okln("Upgraded from", upgradeTo)
|
||||
return
|
||||
}
|
||||
|
||||
if doUpgrade || doUpgradeCheck {
|
||||
rel, err := upgrade.LatestRelease(IsBeta)
|
||||
if err != nil {
|
||||
@@ -330,7 +341,7 @@ func main() {
|
||||
|
||||
if doUpgrade {
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
|
||||
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot upgrade, database seems to be locked. Is another copy of Syncthing already running?")
|
||||
}
|
||||
@@ -478,7 +489,7 @@ func syncthingMain() {
|
||||
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
|
||||
}
|
||||
|
||||
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
|
||||
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
@@ -784,7 +795,7 @@ func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + predictableRandom.Intn(65535-1024)
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", cfg.Options().UPnPLease*60)
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, fmt.Sprintf("syncthing-%d", r), cfg.Options().UPnPLease*60)
|
||||
if err == nil {
|
||||
return r
|
||||
}
|
||||
@@ -964,11 +975,16 @@ next:
|
||||
}
|
||||
}
|
||||
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
})
|
||||
l.Infof("Connection from %s with unknown device ID %s; ignoring", conn.RemoteAddr(), remoteID)
|
||||
if !cfg.IgnoredDevice(remoteID) {
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
})
|
||||
l.Infof("Connection from %s with unknown device ID %s", conn.RemoteAddr(), remoteID)
|
||||
} else {
|
||||
l.Infof("Connection from %s with ignored device ID %s", conn.RemoteAddr(), remoteID)
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
@@ -1263,7 +1279,8 @@ func autoUpgrade() {
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
|
||||
if upgrade.CompareVersions(rel.Tag, Version) != upgrade.Newer {
|
||||
// Skip equal, older or majorly newer (incompatible) versions
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Comment, when used at the start of a line",
|
||||
"Compression is recommended in most setups.": "Compression is recommended in most setups.",
|
||||
"Connection Error": "Connection Error",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg and the following Contributors:",
|
||||
"Delete": "Delete",
|
||||
"Device ID": "Device ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Disconnected",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Download Rate",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Edit",
|
||||
"Edit Device": "Edit Device",
|
||||
"Edit Folder": "Edit Folder",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Folder ID",
|
||||
"Folder Master": "Folder Master",
|
||||
"Folder Path": "Folder Path",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "GUI Authentication Password",
|
||||
"GUI Authentication User": "GUI Authentication User",
|
||||
"GUI Listen Addresses": "GUI Listen Addresses",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversion of the given condition (i.e. do not exclude)",
|
||||
"Keep Versions": "Keep Versions",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Last seen",
|
||||
"Latest Release": "Latest Release",
|
||||
"Local Discovery": "Local Discovery",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Restart",
|
||||
"Restart Needed": "Restart Needed",
|
||||
"Restarting": "Restarting",
|
||||
"Reused": "Reused",
|
||||
"Save": "Save",
|
||||
"Scanning": "Scanning",
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Settings",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Share With Devices",
|
||||
"Shared With": "Shared With",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Short identifier for the folder. Must be the same on all cluster devices.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"The rescan interval must be at least 5 seconds.": "The rescan interval must be at least 5 seconds.",
|
||||
"Unknown": "Unknown",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Up to Date",
|
||||
"Upgrade To {%version%}": "Upgrade To {{version}}",
|
||||
"Upgrading": "Upgrading",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Коментар, използван в началото на реда",
|
||||
"Compression is recommended in most setups.": "Компресията е препоръчителна в повечето конфигурации.",
|
||||
"Connection Error": "Грешка при Свързването",
|
||||
"Copied from elsewhere": "Копиране от някъде другаде",
|
||||
"Copied from original": "Копиран от оригинала",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Правата запазени © 2014 Jakob Borg и следните Сътрудници:",
|
||||
"Delete": "Изтрий",
|
||||
"Device ID": "Идентификатор на устройство",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Прекрати Връзката",
|
||||
"Documentation": "Документация",
|
||||
"Download Rate": "Скорост на Теглене",
|
||||
"Downloaded": "Изтеглен",
|
||||
"Downloading": "Изтегляне",
|
||||
"Edit": "Промени",
|
||||
"Edit Device": "Промени устройство",
|
||||
"Edit Folder": "Промени папка",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Идентификатор на папка",
|
||||
"Folder Master": "Главна папка",
|
||||
"Folder Path": "Път до папката",
|
||||
"Folders": "Папки",
|
||||
"GUI Authentication Password": "Парола за Потребителския Интерфейс",
|
||||
"GUI Authentication User": "Потребител за Потребителския Интерфейс",
|
||||
"GUI Listen Addresses": "Адрес за Свързване с Потребителския Интерфейс",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Обратното на даденото условие (пр. не изключвай)",
|
||||
"Keep Versions": "Пази Версии",
|
||||
"Last File Synced": "Последния синхронизиран файл",
|
||||
"Last seen": "Последно видян",
|
||||
"Latest Release": "Най-новата Версия",
|
||||
"Local Discovery": "Локално Откриване",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Рестартирай",
|
||||
"Restart Needed": "Изискава се Рестартиране",
|
||||
"Restarting": "Рестартиране",
|
||||
"Reused": "Повторно използван",
|
||||
"Save": "Запази",
|
||||
"Scanning": "Сканиране",
|
||||
"Select the devices to share this folder with.": "Избери устройствата, с които да споделиш тази папка.",
|
||||
"Select the folders to share with this device.": "Изберете папките за споделяне с това устройство.",
|
||||
"Settings": "Настройки",
|
||||
"Share Folders With Device": "Сподели папки с това устройство",
|
||||
"Share With Devices": "Сподели с устройства",
|
||||
"Shared With": "Споделена със",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Кратък идентификатор на папката. Трябва да бъде същият на всички компютри.",
|
||||
@@ -121,9 +130,11 @@
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Максималното време да се пазят весрсии (в дни, сложи 0, за да пазиш версии завинаги).",
|
||||
"The number of old versions to keep, per file.": "Броят стари версии, които да бъдат пазени за всеки файл.",
|
||||
"The number of versions must be a number and cannot be blank.": "Броят версии трябва да бъде число и не може да бъде празно.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервала на сканиране трябва да бъде не отрицателно число в секунди.",
|
||||
"The rescan interval must be at least 5 seconds.": "Интервала за повторно сканиране трябва да бъде поне 5 секунди.",
|
||||
"Unknown": "Неясен",
|
||||
"Unshared": "Споделянето прекратено",
|
||||
"Unused": "Неизползван",
|
||||
"Up to Date": "Актуален",
|
||||
"Upgrade To {%version%}": "Обновен До {{version}}",
|
||||
"Upgrading": "Обновяване",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Komentář, pokud použito na začátku řádku",
|
||||
"Compression is recommended in most setups.": "Komprese je doporučena pro většinu nastavení.",
|
||||
"Connection Error": "Chyba připojení",
|
||||
"Copied from elsewhere": "Zkopírováno odjinud",
|
||||
"Copied from original": "Zkopírováno z originálu",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg a následující přispěvatelé:",
|
||||
"Delete": "Smazat",
|
||||
"Device ID": "ID přístroje",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Odpojeno",
|
||||
"Documentation": "Dokumentace",
|
||||
"Download Rate": "Rychlost stahování",
|
||||
"Downloaded": "Staženo",
|
||||
"Downloading": "Stahuji",
|
||||
"Edit": "Upravit",
|
||||
"Edit Device": "Upravit přístroj",
|
||||
"Edit Folder": "Upravit adresář",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID adresáře",
|
||||
"Folder Master": "Master adresář",
|
||||
"Folder Path": "Cesta k adresáři",
|
||||
"Folders": "Adresáře",
|
||||
"GUI Authentication Password": "Přihlašovací heslo pro GUI",
|
||||
"GUI Authentication User": "Přihlašovací jméno pro GUI",
|
||||
"GUI Listen Addresses": "Adresa naslouchání GUI",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Zavaděč",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Prohození zadané podmínky (např. nevynechat)",
|
||||
"Keep Versions": "Ponechat verze",
|
||||
"Last File Synced": "Poslední soubor synchronizován",
|
||||
"Last seen": "Naposledy spatřen",
|
||||
"Latest Release": "Poslední vydání",
|
||||
"Local Discovery": "Místní oznamování",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Restart",
|
||||
"Restart Needed": "Je nutný restart",
|
||||
"Restarting": "Restartuji",
|
||||
"Reused": "Opakovaně použité",
|
||||
"Save": "Uložit",
|
||||
"Scanning": "Skenování",
|
||||
"Select the devices to share this folder with.": "Vybrat přístroje se kterými sdílet tento adresář.",
|
||||
"Select the folders to share with this device.": "Vybrat adresáře sdílené tomuto přístroji.",
|
||||
"Settings": "Nastavení",
|
||||
"Share Folders With Device": "Sdílet adresáře tomuto přístroji",
|
||||
"Share With Devices": "Sdílet s přístroji",
|
||||
"Shared With": "Sdíleno s",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Krátký identifikátor tohoto adresáře. Musí být stejný na všech přístrojích v clusteru.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Interval pro opakování skenování musí být pozitivní číslo.",
|
||||
"The rescan interval must be at least 5 seconds.": "Interval opakování skenování musí být delší než 5 sekund.",
|
||||
"Unknown": "Neznámý",
|
||||
"Unshared": "Nesdílené",
|
||||
"Unused": "Nepoužité",
|
||||
"Up to Date": "Aktuální",
|
||||
"Upgrade To {%version%}": "Aktualizovat na {{version}}",
|
||||
"Upgrading": "Aktualizuji",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Kommentar, wenn am Anfang der Zeile benutzt.",
|
||||
"Compression is recommended in most setups.": "Datenkomprimierung ist für die meisten Anwendungen empfohlen",
|
||||
"Connection Error": "Verbindungsfehler",
|
||||
"Copied from elsewhere": "Von woanders kopiert",
|
||||
"Copied from original": "Vom Originial kopiert",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg und folgende Unterstützer:",
|
||||
"Delete": "Löschen",
|
||||
"Device ID": "Geräte ID",
|
||||
@@ -22,7 +24,9 @@
|
||||
"Device Name": "Gerätename",
|
||||
"Disconnected": "Getrennt",
|
||||
"Documentation": "Dokumentation",
|
||||
"Download Rate": "Downloadgeschwindigkeit",
|
||||
"Download Rate": "Download",
|
||||
"Downloaded": "Heruntergeladen",
|
||||
"Downloading": "Lädt herunter",
|
||||
"Edit": "Bearbeiten",
|
||||
"Edit Device": "Gerät bearbeiten",
|
||||
"Edit Folder": "Verzeichnis bearbeiten",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Verzeichnis ID",
|
||||
"Folder Master": "Keine Veränderungen zulassen",
|
||||
"Folder Path": "Verzeichnispfad",
|
||||
"Folders": "Verzeichnisse",
|
||||
"GUI Authentication Password": "Passwort für Zugang zur Benutzeroberfläche",
|
||||
"GUI Authentication User": "Nutzername für Zugang zur Benutzeroberfläche",
|
||||
"GUI Listen Addresses": "Adresse(n) für die Benutzeroberfläche",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Verteilergerät",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Umkehrung der angegebenen Bedingung (z.B. schließe nicht aus)",
|
||||
"Keep Versions": "Versionen erhalten",
|
||||
"Last File Synced": "Letzte Änderung",
|
||||
"Last seen": "Zuletzt online",
|
||||
"Latest Release": "Letzte Veröffentlichung",
|
||||
"Local Discovery": "Lokale Auffindung",
|
||||
@@ -74,16 +80,19 @@
|
||||
"Preview": "Vorschau",
|
||||
"Preview Usage Report": "Vorschau des Nutzungsberichts",
|
||||
"Quick guide to supported patterns": "Schnellanleitung zu den unterstützten Suchstrukturen",
|
||||
"RAM Utilization": "Verwendeter Arbeitsspeicher",
|
||||
"RAM Utilization": "RAM Auslastung",
|
||||
"Rescan": "Überprüfen",
|
||||
"Rescan Interval": "Suchintervall",
|
||||
"Restart": "Neustart",
|
||||
"Restart Needed": "Neustart notwendig",
|
||||
"Restarting": "Wird neu gestartet",
|
||||
"Reused": "Erneut benutzt",
|
||||
"Save": "Speichern",
|
||||
"Scanning": "Suche",
|
||||
"Select the devices to share this folder with.": "Wähle die Geräte aus, mit denen Du dieses Verzeichnis teilen willst.",
|
||||
"Select the folders to share with this device.": "Wähle die Verzeichnisse aus, die du mit diesem Gerät teilen möchtest",
|
||||
"Settings": "Einstellungen",
|
||||
"Share Folders With Device": "Teile Order mit diesem Gerät",
|
||||
"Share With Devices": "Teile mit diesen Geräten",
|
||||
"Shared With": "Geteilt mit",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kurze ID für das Verzeichnis. Muss auf allen Verbunds-Geräten gleich sein.",
|
||||
@@ -124,10 +133,12 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Das Suchintervall muss eine nicht negative Anzahl von Sekunden sein.",
|
||||
"The rescan interval must be at least 5 seconds.": "Das Suchintervall muss mindestens 5 Sekunden betragen.",
|
||||
"Unknown": "Unbekannt",
|
||||
"Unshared": "Ungeteilt",
|
||||
"Unused": "Ungenutzt",
|
||||
"Up to Date": "Aktuell",
|
||||
"Upgrade To {%version%}": "Update auf {{version}}",
|
||||
"Upgrading": "Wird aktualisiert",
|
||||
"Upload Rate": "Uploadgeschwindigkeit",
|
||||
"Upload Rate": "Upload",
|
||||
"Use Compression": "Benutze Komprimierung",
|
||||
"Use HTTPS for GUI": "HTTPS für Benutzeroberfläche benutzen",
|
||||
"Version": "Version",
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
{
|
||||
"API Key": "API Key",
|
||||
"About": "About",
|
||||
"Add": "Add",
|
||||
"Add Device": "Add Device",
|
||||
"Add Folder": "Add Folder",
|
||||
"Add new folder?": "Add new folder?",
|
||||
"Address": "Address",
|
||||
"Addresses": "Addresses",
|
||||
"Allow Anonymous Usage Reporting?": "Allow Anonymous Usage Reporting?",
|
||||
@@ -22,6 +24,7 @@
|
||||
"Device ID": "Device ID",
|
||||
"Device Identification": "Device Identification",
|
||||
"Device Name": "Device Name",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Device {{device}} ({{address}}) wants to connect. Add new device?",
|
||||
"Disconnected": "Disconnected",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Download Rate",
|
||||
@@ -51,6 +54,7 @@
|
||||
"Global Discovery Server": "Global Discovery Server",
|
||||
"Global State": "Global State",
|
||||
"Idle": "Idle",
|
||||
"Ignore": "Ignore",
|
||||
"Ignore Patterns": "Ignore Patterns",
|
||||
"Ignore Permissions": "Ignore Permissions",
|
||||
"Incoming Rate Limit (KiB/s)": "Incoming Rate Limit (KiB/s)",
|
||||
@@ -59,13 +63,15 @@
|
||||
"Keep Versions": "Keep Versions",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Last seen",
|
||||
"Later": "Later",
|
||||
"Latest Release": "Latest Release",
|
||||
"Legend:": "Legend:",
|
||||
"Local Discovery": "Local Discovery",
|
||||
"Local State": "Local State",
|
||||
"Maximum Age": "Maximum Age",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Multi level wildcard (matches multiple directory levels)",
|
||||
"Never": "Never",
|
||||
"New Device": "New Device",
|
||||
"New Folder": "New Folder",
|
||||
"No": "No",
|
||||
"No File Versioning": "No File Versioning",
|
||||
"Notice": "Notice",
|
||||
@@ -93,8 +99,11 @@
|
||||
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Settings",
|
||||
"Share": "Share",
|
||||
"Share Folder": "Share Folder",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Share With Devices",
|
||||
"Share this folder?": "Share this folder?",
|
||||
"Shared With": "Shared With",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Short identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Show ID": "Show ID",
|
||||
@@ -150,5 +159,6 @@
|
||||
"Yes": "Yes",
|
||||
"You must keep at least one version.": "You must keep at least one version.",
|
||||
"full documentation": "full documentation",
|
||||
"items": "items"
|
||||
"items": "items",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} wants to share folder \"{{folder}}\"."
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Comentario, cuando es utilizado al inicio de una línea.",
|
||||
"Compression is recommended in most setups.": "La compresión de datos es recomendada para la mayoría de las configuraciones.",
|
||||
"Connection Error": "Error de conexión",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Derechos de autor © 2014 Jakob Borg y los siguientes colaboradores:",
|
||||
"Delete": "Suprimir",
|
||||
"Device ID": "ID del dispositivo",
|
||||
@@ -23,12 +25,14 @@
|
||||
"Disconnected": "Desconectado",
|
||||
"Documentation": "Documentación",
|
||||
"Download Rate": "Tasa de descarga",
|
||||
"Downloaded": "Descargado",
|
||||
"Downloading": "Descargando",
|
||||
"Edit": "Editar",
|
||||
"Edit Device": "Editar dispositivo",
|
||||
"Edit Folder": "Editar repositorio",
|
||||
"Editing": "Editando",
|
||||
"Enable UPnP": "Permitir UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Ingrese las direcciones \"ip:puerto\" separadas por coma o \"dynamic\" para descubrir automáticamente las direcciones.",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Ingrese las direcciones \"ip:puerto\" separadas por coma, o \"dynamic\" para descubrir automáticamente las direcciones.",
|
||||
"Enter ignore patterns, one per line.": "Añadir patrones de exclusión, uno por línea.",
|
||||
"Error": "Error",
|
||||
"File Versioning": "Control de versiones",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID del repositorio",
|
||||
"Folder Master": "Repositorio maestro",
|
||||
"Folder Path": "Ruta del repositorio",
|
||||
"Folders": "Repositorios",
|
||||
"GUI Authentication Password": "Contraseña de autenticación de la GUI",
|
||||
"GUI Authentication User": "Usuario de la GUI",
|
||||
"GUI Listen Addresses": "Direcciones de escucha para la GUI.",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introductor",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversión de la condición dada (es decir, no excluir)",
|
||||
"Keep Versions": "Conservar versiones",
|
||||
"Last File Synced": "Último archivo sincronizado.",
|
||||
"Last seen": "Visto por ultima vez",
|
||||
"Latest Release": "Última versión",
|
||||
"Local Discovery": "Búsqueda en red local",
|
||||
@@ -80,16 +86,19 @@
|
||||
"Restart": "Reiniciar",
|
||||
"Restart Needed": "Es necesario reiniciar",
|
||||
"Restarting": "Reiniciando",
|
||||
"Reused": "Reused",
|
||||
"Save": "Guardar",
|
||||
"Scanning": "Actualización",
|
||||
"Select the devices to share this folder with.": "Seleccione los dispositivos con los cuales compartir este repositorio.",
|
||||
"Select the folders to share with this device.": "Seleccione los repositorios para compartir con este dispositivo.",
|
||||
"Settings": "Configuración",
|
||||
"Share Folders With Device": "Compartir repositorios con dispositivo",
|
||||
"Share With Devices": "Compartir con los dispositivos",
|
||||
"Shared With": "Compartido con",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Identificador corto para el repositorio. Debe ser el mismo en todos los dispositivos del grupo.",
|
||||
"Show ID": "Mostrar ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Mostrado en vez del ID del dispositivo en el estado del grupo. Si dejado en blanco, será usado el nombre sugerido por el dispositivo.",
|
||||
"Shutdown": "Apagar",
|
||||
"Simple File Versioning": "Versiones simple de archivos",
|
||||
"Single level wildcard (matches within a directory only)": "Single level wildcard (matches within a directory only)",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "El intervalo de reescaneo debe ser un número no negativo de segundos.",
|
||||
"The rescan interval must be at least 5 seconds.": "El intervalo de reescaneo debe ser al menos de 5 segundos.",
|
||||
"Unknown": "Desconocido",
|
||||
"Unshared": "No compartido",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Actualizado",
|
||||
"Upgrade To {%version%}": "Actualizar a {{version}}",
|
||||
"Upgrading": "Actualizando",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Commentaire, lorsque utilisé en début de ligne",
|
||||
"Compression is recommended in most setups.": "La compression est recommandée pour la plupart des configurations.",
|
||||
"Connection Error": "Erreur de connexion",
|
||||
"Copied from elsewhere": "Copié d'ailleurs",
|
||||
"Copied from original": "Copié de l'original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg et les contributeurs suivants :",
|
||||
"Delete": "Supprimer",
|
||||
"Device ID": "ID du périphérique",
|
||||
@@ -23,13 +25,15 @@
|
||||
"Disconnected": "Déconnecté",
|
||||
"Documentation": "Documentation",
|
||||
"Download Rate": "Débit de réception",
|
||||
"Downloaded": "Téléchargé",
|
||||
"Downloading": "En cours de téléchargement",
|
||||
"Edit": "Éditer",
|
||||
"Edit Device": "Éditer le périphérique",
|
||||
"Edit Folder": "Éditer le répertoire",
|
||||
"Editing": "Édition",
|
||||
"Enable UPnP": "Activer l'UPnP",
|
||||
"Enter comma separated \"ip:port\" addresses or \"dynamic\" to perform automatic discovery of the address.": "Entrer les adresses \"ip:port\" séparées par une virgule ou \"dynamic\" afin d'activer la recherche automatique de l'adresse.",
|
||||
"Enter ignore patterns, one per line.": "Entrer les modèles à ignorer, un par ligne.",
|
||||
"Enter ignore patterns, one per line.": "Entrer les masques de filtrage, un par ligne.",
|
||||
"Error": "Erreur",
|
||||
"File Versioning": "Versions de fichier",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT filesystems.": "Les permissions de fichier sont ignorées lors de la recherche de changements. À utiliser sur les systèmes de fichiers de type FAT.",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID du répertoire",
|
||||
"Folder Master": "Répertoire maître",
|
||||
"Folder Path": "Chemin du répertoire",
|
||||
"Folders": "Dossiers",
|
||||
"GUI Authentication Password": "Mot de passe d'authentification GUI",
|
||||
"GUI Authentication User": "Utilistateur autorisé GUI",
|
||||
"GUI Listen Addresses": "Adresse du GUI",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Initiateur",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inverser la condition donnée (i.e. ne pas exclure)",
|
||||
"Keep Versions": "Conserver les versions",
|
||||
"Last File Synced": "Dernier fichier synchronisé",
|
||||
"Last seen": "Dernière apparition",
|
||||
"Latest Release": "Dernière version",
|
||||
"Local Discovery": "Recherche locale",
|
||||
@@ -73,25 +79,28 @@
|
||||
"Please wait": "Merci de patienter",
|
||||
"Preview": "Aperçu",
|
||||
"Preview Usage Report": "Aperçu du rapport de statistiques d'utilisation",
|
||||
"Quick guide to supported patterns": "Guide rapide des modèles supportés",
|
||||
"Quick guide to supported patterns": "Guide rapide des masques supportés",
|
||||
"RAM Utilization": "Utilisation de la RAM",
|
||||
"Rescan": "Rescanner",
|
||||
"Rescan Interval": "Intervalle de scan",
|
||||
"Restart": "Redémarrer",
|
||||
"Restart Needed": "Redémarrage nécessaire",
|
||||
"Restarting": "Redémarrage",
|
||||
"Reused": "Réutilisé",
|
||||
"Save": "Sauver",
|
||||
"Scanning": "En cours de scan",
|
||||
"Select the devices to share this folder with.": "Sélectionner les appareils avec qui partager ce répertoire.",
|
||||
"Select the devices to share this folder with.": "Sélectionner les machines avec qui partager ce répertoire.",
|
||||
"Select the folders to share with this device.": "Sélectionner les dossiers à partager avec cette machine.",
|
||||
"Settings": "Configuration",
|
||||
"Share With Devices": "Partager avec les périphériques",
|
||||
"Share Folders With Device": "Partage du dossier avec les machines",
|
||||
"Share With Devices": "Partager avec les machines",
|
||||
"Shared With": "Partagé avec",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Court identifiant du répertoire. Il doit être le même sur l'ensemble des appareils du groupe.",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Court identifiant du répertoire. Il doit être le même sur l'ensemble des machines du groupe.",
|
||||
"Show ID": "Montrer l'ID",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Affiché à la place de l'ID de l'appareil dans le statut du groupe. Sera proposé aux autres nœuds comme un nom par défaut optionnel.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Affiché à la place de l'ID de l'appareil dans le statut du groupe. Si laissé vide, il sera changé par le nom proposé par l'appareil.",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Affiché à la place de l'ID de la machine dans le groupe. Sera proposé aux autres machines comme nom optionnel par défaut.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Affiché à la place de l'ID de la machine dans le groupe. Si laissé vide, il sera mis à jour par le nom proposé par la machine distante.",
|
||||
"Shutdown": "Éteindre",
|
||||
"Simple File Versioning": "Versions simples de fichier",
|
||||
"Simple File Versioning": "Suivi simple des versions de fichier",
|
||||
"Single level wildcard (matches within a directory only)": "Astérisque à un seul niveau (correspond uniquement à l’intérieur du répertoire)",
|
||||
"Source Code": "Code source",
|
||||
"Staggered File Versioning": "Versions échelonnées de fichier",
|
||||
@@ -120,10 +129,12 @@
|
||||
"The maximum age must be a number and cannot be blank.": "L'ancienneté maximum doit être un nombre et ne peut être vide.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Le temps maximum de conservation d'une version (en jours, mettre à 0 pour conserver les versions pour toujours)",
|
||||
"The number of old versions to keep, per file.": "Le nombre d'anciennes versions à garder, par fichier.",
|
||||
"The number of versions must be a number and cannot be blank.": "Le nombre de version doit être un nombre et ne peut pas être vide.",
|
||||
"The number of versions must be a number and cannot be blank.": "Le nombre de versions doit être numérique, et ne peut pas être vide.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "L'intervalle de scan ne doit pas être un nombre négatif de secondes.",
|
||||
"The rescan interval must be at least 5 seconds.": "L'intervalle de scan doit être d'au minimum 5 secondes.",
|
||||
"Unknown": "Inconnu",
|
||||
"Unshared": "Non partagé",
|
||||
"Unused": "Non utilisé",
|
||||
"Up to Date": "Synchronisation à jour",
|
||||
"Upgrade To {%version%}": "Mettre à jour vers {{version}}",
|
||||
"Upgrading": "Mise à jour de Syncthing",
|
||||
@@ -132,7 +143,7 @@
|
||||
"Use HTTPS for GUI": "Utiliser l'HTTPS pour le GUI",
|
||||
"Version": "Version",
|
||||
"Versions Path": "Emplacement des versions",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions sont supprimées automatiquement si celles-ci sont plus anciennes que l'ancienneté maximum ou que leur nombre est supérieur au nombre autorisé dans une intervale.",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions seront supprimées automatiquement, si elles dépassent la durée maximum de conservation, ou si leur nombre est supérieur à la valeur autorisée dans l'intervalle.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Lorsqu'un appareil est ajouté, gardez à l'esprit que cet appareil doit aussi être ajouté de l'autre coté.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Lorsqu'un nouveau répertoire est ajouté, gardez à l'esprit que son ID est utilisé pour lier les répertoires à travers les appareils. Les ID sont sensibles à la casse et doivent être identiques à travers tous les nœuds.",
|
||||
"Yes": "Oui",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Megjegyzés, a sor elején használva",
|
||||
"Compression is recommended in most setups.": "A tömörítés a a legtöbb esetben ajánlott",
|
||||
"Connection Error": "Kapcsolódási hiba",
|
||||
"Copied from elsewhere": "Másolva máshonnan",
|
||||
"Copied from original": "Másolva az eredetiről",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg és az alábbi Közreműködők",
|
||||
"Delete": "Törlés",
|
||||
"Device ID": "Eszköz azonosító",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Kapcsolat bontva",
|
||||
"Documentation": "Dokumentáció",
|
||||
"Download Rate": "Letöltési sebesség",
|
||||
"Downloaded": "Letöltve",
|
||||
"Downloading": "Letöltés",
|
||||
"Edit": "Szerkesztés",
|
||||
"Edit Device": "Eszköz szerkesztése",
|
||||
"Edit Folder": "Mappa szerkesztése",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Mappa azonosító",
|
||||
"Folder Master": "Központi mappa",
|
||||
"Folder Path": "Mappa elérési útja",
|
||||
"Folders": "Mappák",
|
||||
"GUI Authentication Password": "Grafikus felület jelszava",
|
||||
"GUI Authentication User": "Grafikus felület felhasználó neve ",
|
||||
"GUI Listen Addresses": "Grafikus felület címe",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Bevezető",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "A feltétel ellentéte (pl. ki nem hagyás)",
|
||||
"Keep Versions": "Megtartott verziók",
|
||||
"Last File Synced": "Utolsó szinkronizált fájl",
|
||||
"Last seen": "Utoljára látva",
|
||||
"Latest Release": "Utolsó kiadás",
|
||||
"Local Discovery": "Helyi felfedezés",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Újraindítás",
|
||||
"Restart Needed": "Újraindítás szükséges",
|
||||
"Restarting": "Újraindulás",
|
||||
"Reused": "Újrafelhasználva",
|
||||
"Save": "Mentés",
|
||||
"Scanning": "Átnézés",
|
||||
"Select the devices to share this folder with.": "Válaszd ki az eszközöket amelyekkel meg szeretnéd osztani a mappát",
|
||||
"Select the folders to share with this device.": "Válaszd ki a mappákat amiket meg szeretnél osztani ezzel az eszközzel",
|
||||
"Settings": "Beállítások",
|
||||
"Share Folders With Device": "Mappák megosztása az eszközzel",
|
||||
"Share With Devices": "Megosztás más eszközzel",
|
||||
"Shared With": "Megosztva ezekkel:",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Rövid azonosító. Minden megosztott eszközön azonosnak kell lennie.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Az átnézési intervallum nullánál nagyobb másodperc érték kell legyen",
|
||||
"The rescan interval must be at least 5 seconds.": "Az átnézési intervallumnak legalább 5 másodpercnek kell lennie.",
|
||||
"Unknown": "Ismeretlen",
|
||||
"Unshared": "Nincs megosztva",
|
||||
"Unused": "Nincs használatban",
|
||||
"Up to Date": "Friss",
|
||||
"Upgrade To {%version%}": "Frissítés a {{version}} verzióra",
|
||||
"Upgrading": "Frissítés",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Per commentare, va inserito all'inizio di una riga",
|
||||
"Compression is recommended in most setups.": "La compressione è raccomandata nella maggior parte delle configurazioni.",
|
||||
"Connection Error": "Errore di Connessione",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg e i seguenti Collaboratori:",
|
||||
"Delete": "Elimina",
|
||||
"Device ID": "ID Dispositivo",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Disconnesso",
|
||||
"Documentation": "Documentazione",
|
||||
"Download Rate": "Velocità Download",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Modifica",
|
||||
"Edit Device": "Modifica Dispositivo",
|
||||
"Edit Folder": "Modifica Cartella",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID Cartella",
|
||||
"Folder Master": "Cartella Principale",
|
||||
"Folder Path": "Percorso Cartella",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "Password di Autenticazione dell'Utente",
|
||||
"GUI Authentication User": "Utente dell'Interfaccia Grafica",
|
||||
"GUI Listen Addresses": "Indirizzi dell'Interfaccia Grafica",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introduttore",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversione della condizione indicata (ad es. non escludere)",
|
||||
"Keep Versions": "Versioni Mantenute",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Ultima connessione",
|
||||
"Latest Release": "Ultima Versione",
|
||||
"Local Discovery": "Individuazione Locale",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Riavvia",
|
||||
"Restart Needed": "Riavvio Necessario",
|
||||
"Restarting": "Riavvio",
|
||||
"Reused": "Reused",
|
||||
"Save": "Salva",
|
||||
"Scanning": "Scansione in corso",
|
||||
"Select the devices to share this folder with.": "Seleziona i dispositivi con i quali condividere questa cartella.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Impostazioni",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Condividi con i Dispositivi",
|
||||
"Shared With": "Condiviso Con",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Breve identificatore della cartella. Deve essere lo stesso su tutti i dispositivi del cluster.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"The rescan interval must be at least 5 seconds.": "L'intervallo di scansione non può essere inferiore a 5 secondi.",
|
||||
"Unknown": "Sconosciuto",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Sincronizzato",
|
||||
"Upgrade To {%version%}": "Aggiorna alla {{version}}",
|
||||
"Upgrading": "Aggiornamento",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Komentaras naudojamas naujoje eilutėje",
|
||||
"Compression is recommended in most setups.": "Daugumoje atvejų spaudimas rekomenduojamas.",
|
||||
"Connection Error": "Susijungimo klaida",
|
||||
"Copied from elsewhere": "Nukopijuota iš betkur",
|
||||
"Copied from original": "Nukopijuota iš originalo",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Visos teisės saugomos © 2014 Jakob Borg ir šių bendraautorių:",
|
||||
"Delete": "Trinti",
|
||||
"Device ID": "Įrenginio ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Atsijungęs",
|
||||
"Documentation": "Aprašymas",
|
||||
"Download Rate": "Parsisiuntimo greitis",
|
||||
"Downloaded": "Parsisiųstas",
|
||||
"Downloading": "Siunčiama",
|
||||
"Edit": "Redaguoti",
|
||||
"Edit Device": "Keisti įrenginį",
|
||||
"Edit Folder": "Keisti aplanką",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Aplanko ID",
|
||||
"Folder Master": "Aplanko vadovas",
|
||||
"Folder Path": "Kelias iki apkanko",
|
||||
"Folders": "Aplankai",
|
||||
"GUI Authentication Password": "Valdymo skydelio slaptažodis",
|
||||
"GUI Authentication User": "Valdymo skydelio vartotojo vardas",
|
||||
"GUI Listen Addresses": "Valdymo skydelio adresas",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Supažindintojas",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Apversti sąlygas (pvz.: nenustoti naudoti)",
|
||||
"Keep Versions": "Saugojamų versijų kiekis",
|
||||
"Last File Synced": "Paskutinis failas sinchronizuotas",
|
||||
"Last seen": "Paskutinį kartą matytas",
|
||||
"Latest Release": "Paskutinė versija",
|
||||
"Local Discovery": "Vietinis matomumas",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Perleisti",
|
||||
"Restart Needed": "Reikalingas perleidimas",
|
||||
"Restarting": "Persileidžia",
|
||||
"Reused": "Pakartotinas",
|
||||
"Save": "Išsaugoti",
|
||||
"Scanning": "Skenuojama",
|
||||
"Select the devices to share this folder with.": "Pasirinkite įrenginius, su kuriais dalinsitės šį aplanką.",
|
||||
"Select the folders to share with this device.": "Pasirinkite aplankus kuriais norite dalintis su šiuo įrenginiu.",
|
||||
"Settings": "Nustatymai",
|
||||
"Share Folders With Device": "Dalintis aplankalais su šiuo įrenginiu",
|
||||
"Share With Devices": "Dalintis su įrenginiais",
|
||||
"Shared With": "Dalinamasi su",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Trumpas aplanko identifikatorius. Privalo būti toks pat visuose įrenginiuose.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Nuskaitymo dažnis negali būti neigiamas skaičius.",
|
||||
"The rescan interval must be at least 5 seconds.": "Nuskaityti galima nedažniau nei kas 5 sekundes.",
|
||||
"Unknown": "Nežinoma",
|
||||
"Unshared": "Nesidalinama",
|
||||
"Unused": "Nenaudojamas",
|
||||
"Up to Date": "Atnaujinta",
|
||||
"Upgrade To {%version%}": "Atnaujinti į {{version}}",
|
||||
"Upgrading": "Atnaujinama",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Kommentar, når det blir brukt i starten av en linje.",
|
||||
"Compression is recommended in most setups.": "Komprimering er anbefalt i de fleste tilfeller.",
|
||||
"Connection Error": "Tilkoblingsfeil",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg og følgende Bidragsytere:",
|
||||
"Delete": "Slett",
|
||||
"Device ID": "Enhet ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Frakoblet",
|
||||
"Documentation": "Dokumentasjon",
|
||||
"Download Rate": "Nedlastingsrate",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Rediger",
|
||||
"Edit Device": "Rediger Enhet",
|
||||
"Edit Folder": "Rediger Mappe",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Mappe ID",
|
||||
"Folder Master": "Styrende Mappe",
|
||||
"Folder Path": "Mappeplassering",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "GUI Passord",
|
||||
"GUI Authentication User": "GUI Bruker",
|
||||
"GUI Listen Addresses": "GUI Lytteadresse",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introduktør",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Invers av den gitte tilstanden (t.d. ikke ekskluder)",
|
||||
"Keep Versions": "Behold Versjoner",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Sist sett",
|
||||
"Latest Release": "Nyeste Versjon",
|
||||
"Local Discovery": "Lokal Søking",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Omstart",
|
||||
"Restart Needed": "Omstart Kreves",
|
||||
"Restarting": "Starter På Ny",
|
||||
"Reused": "Reused",
|
||||
"Save": "Lagre",
|
||||
"Scanning": "Skanner",
|
||||
"Select the devices to share this folder with.": "Velg enhetene du vil dele denne mappen med.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Innstillinger",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Del Med Enheter",
|
||||
"Shared With": "Del Med",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort kjennemerke på mappen. Må være det samme på alle enheter i en gruppe.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Antall sekund i skanneintervallet kan ikke være negativt.",
|
||||
"The rescan interval must be at least 5 seconds.": "Skanneintervallet må være minst 5 sekund.",
|
||||
"Unknown": "Ukjent",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Commentaar, indien gebruikt aan het begin van de lijn",
|
||||
"Compression is recommended in most setups.": "Gegevenscompressie is aan te raden in de meeste situaties.",
|
||||
"Connection Error": "Verbindingsfout",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg en de onderstaande bijdragers:",
|
||||
"Delete": "Verwijderen",
|
||||
"Device ID": "Toestel ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Niet Verbonden",
|
||||
"Documentation": "Documentatie",
|
||||
"Download Rate": "Downloadsnelheid",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Bewerk",
|
||||
"Edit Device": "Toestel aanpassen",
|
||||
"Edit Folder": "Folder aanpassen",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Folder ID",
|
||||
"Folder Master": "Hoofdfolder",
|
||||
"Folder Path": "Locatie folder",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "GUI Authentificatie Wachtwoord",
|
||||
"GUI Authentication User": "GUI Authentificatie Gebruikersnaam",
|
||||
"GUI Listen Addresses": "GUI Inkomend adres",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introductietoestel",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversie van de gegeven voorwaarde (bv. niet uitsluiten)",
|
||||
"Keep Versions": "Versies behouden",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Laatst gezien op",
|
||||
"Latest Release": "Laatste uitgave",
|
||||
"Local Discovery": "Lokaal zoeken",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Herstart",
|
||||
"Restart Needed": "Herstart nodig",
|
||||
"Restarting": "Herstarten",
|
||||
"Reused": "Reused",
|
||||
"Save": "Bewaar",
|
||||
"Scanning": "Aan het zoeken",
|
||||
"Select the devices to share this folder with.": "Selecteer de toestellen om deze folder mee te delen.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Instellingen",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Delen met toestellen",
|
||||
"Shared With": "Gedeeld met",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Korte aanduiding voor deze folder. Moet dezelfde zijn op alle toestellen in de cluster.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "De scanfrequentie moet een positief getal in seconden zijn.",
|
||||
"The rescan interval must be at least 5 seconds.": "De scanfrequentie moet minimaal 5 seconden zijn.",
|
||||
"Unknown": "Onbekend",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Gesynchroniseerd",
|
||||
"Upgrade To {%version%}": "Upgrade naar {{version}}",
|
||||
"Upgrading": "Bezig met upgrade",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Kommentar, når det vert brukt i starten av ei linje.",
|
||||
"Compression is recommended in most setups.": "Komprimering er tilrådd i dei fleste høve.",
|
||||
"Connection Error": "Tilkoplingsfeil",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg og følgjande Bidragsytarar:",
|
||||
"Delete": "Slett",
|
||||
"Device ID": "Eining ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Fråkopla",
|
||||
"Documentation": "Dokumentasjon",
|
||||
"Download Rate": "Nedlastingsrate",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Rediger",
|
||||
"Edit Device": "Rediger Eining",
|
||||
"Edit Folder": "Rediger Mappe",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Mappe ID",
|
||||
"Folder Master": "Styrande Mappe",
|
||||
"Folder Path": "Mappeplassering",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "GUI Passord",
|
||||
"GUI Authentication User": "GUI Brukar",
|
||||
"GUI Listen Addresses": "GUI Lytteadresse",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introduktør",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Invers av den gitte tilstanden (t.d. ikkje ekskluder)",
|
||||
"Keep Versions": "Behald Versjonar",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Sist sett",
|
||||
"Latest Release": "Nyaste Versjon",
|
||||
"Local Discovery": "Lokal Søking",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Omstart",
|
||||
"Restart Needed": "Omstart Trengs",
|
||||
"Restarting": "Startar På Ny",
|
||||
"Reused": "Reused",
|
||||
"Save": "Lagre",
|
||||
"Scanning": "Skannar",
|
||||
"Select the devices to share this folder with.": "Vel einingane du vil dela denne mappa med.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Innstillingar",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Del Med Einingar",
|
||||
"Shared With": "Delt Med",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort kjennemerke på mappa. Må vera det same på alle einingar i ei gruppe.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Talet på sekund i skanneintervallet kan ikkje vera negativt.",
|
||||
"The rescan interval must be at least 5 seconds.": "Skanneintervallet må vera minst 5 sekund.",
|
||||
"Unknown": "Ukjent",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Oppdatert",
|
||||
"Upgrade To {%version%}": "Oppgrader Til {{version}}",
|
||||
"Upgrading": "Oppgraderer",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Komentarz, jeżeli użyty na początku linii",
|
||||
"Compression is recommended in most setups.": "Kompresja jest zalecana w większości przypadków",
|
||||
"Connection Error": "Błąd połączenia",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg i następujący współautorzy:",
|
||||
"Delete": "Usuń",
|
||||
"Device ID": "ID urządzenia",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Rozłączony",
|
||||
"Documentation": "Dokumentacja",
|
||||
"Download Rate": "Prędkość pobierania",
|
||||
"Downloaded": "Downloaded",
|
||||
"Downloading": "Downloading",
|
||||
"Edit": "Edytuj",
|
||||
"Edit Device": "Edytuj urządzenie",
|
||||
"Edit Folder": "Edytuj folder",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID folderu",
|
||||
"Folder Master": "Główny folder",
|
||||
"Folder Path": "Ścieżka folderu",
|
||||
"Folders": "Folders",
|
||||
"GUI Authentication Password": "Hasło",
|
||||
"GUI Authentication User": "Użytkownik",
|
||||
"GUI Listen Addresses": "Adres nasłuchiwania",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Wprowadzający",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Odwrócenie podanego wzorca (np. nie wykluczaj)",
|
||||
"Keep Versions": "Zachowuj wersje",
|
||||
"Last File Synced": "Last File Synced",
|
||||
"Last seen": "Ostatnio widziany",
|
||||
"Latest Release": "Najnowsza wersja",
|
||||
"Local Discovery": "Lokalne odnajdywanie",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Uruchom ponownie",
|
||||
"Restart Needed": "Wymagane ponowne uruchomienie",
|
||||
"Restarting": "Uruchamianie ponowne",
|
||||
"Reused": "Reused",
|
||||
"Save": "Zapisz",
|
||||
"Scanning": "Skanowanie",
|
||||
"Select the devices to share this folder with.": "Wybierz urządzenie, któremu udostępnić folder.",
|
||||
"Select the folders to share with this device.": "Select the folders to share with this device.",
|
||||
"Settings": "Ustawienia",
|
||||
"Share Folders With Device": "Share Folders With Device",
|
||||
"Share With Devices": "Udostępnij dla urządzenia",
|
||||
"Shared With": "Współdzielony z",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Krótki identyfikator folderu. Musi być taki sam na wszystkich urządzeniach.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"The rescan interval must be at least 5 seconds.": "Interwał skanowania musi wynosić co najmniej 5 sekund.",
|
||||
"Unknown": "Nieznany",
|
||||
"Unshared": "Unshared",
|
||||
"Unused": "Unused",
|
||||
"Up to Date": "Aktualny",
|
||||
"Upgrade To {%version%}": "Aktualizuj do {{version}}",
|
||||
"Upgrading": "Aktualizowanie",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Comentário, quando usado no início de uma linha",
|
||||
"Compression is recommended in most setups.": "A compressão é recomendada na maior parte dos casos.",
|
||||
"Connection Error": "Erro de ligação",
|
||||
"Copied from elsewhere": "Copiado doutro sítio",
|
||||
"Copied from original": "Copiado do original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Direitos reservados © 2014 Jakob Borg e os seguintes contribuidores:",
|
||||
"Delete": "Eliminar",
|
||||
"Device ID": "ID do dispositivo",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Desconectado",
|
||||
"Documentation": "Documentação",
|
||||
"Download Rate": "Velocidade de recepção",
|
||||
"Downloaded": "Recebido",
|
||||
"Downloading": "Recebendo",
|
||||
"Edit": "Editar",
|
||||
"Edit Device": "Editar dispositivo",
|
||||
"Edit Folder": "Editar pasta",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID da pasta",
|
||||
"Folder Master": "Pasta mestre",
|
||||
"Folder Path": "Caminho da pasta",
|
||||
"Folders": "Pastas",
|
||||
"GUI Authentication Password": "Senha da autenticação na interface gráfica",
|
||||
"GUI Authentication User": "Utilizador da autenticação na interface gráfica",
|
||||
"GUI Listen Addresses": "Endereço de escuta da interface gráfica",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Apresentador",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inversão de uma dada condição (ou seja, não excluir)",
|
||||
"Keep Versions": "Manter versões",
|
||||
"Last File Synced": "Último ficheiro sincronizado",
|
||||
"Last seen": "Última vez que foi verificado",
|
||||
"Latest Release": "Última versão",
|
||||
"Local Discovery": "Busca local",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Reiniciar",
|
||||
"Restart Needed": "É preciso reiniciar",
|
||||
"Restarting": "Reiniciando",
|
||||
"Reused": "Reutilizado",
|
||||
"Save": "Gravar",
|
||||
"Scanning": "Verificando",
|
||||
"Select the devices to share this folder with.": "Seleccione os dispositivos com os quais vai partilhar esta pasta.",
|
||||
"Select the folders to share with this device.": "Seleccione as pastas a partilhar com este dispositivo.",
|
||||
"Settings": "Configurações",
|
||||
"Share Folders With Device": "Partilhar pastas com dispositivo",
|
||||
"Share With Devices": "Partilhar com os dispositivos",
|
||||
"Shared With": "Partilhado com",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Identificador curto para a pasta. Tem que ser igual em todos os dispositivos do grupo.",
|
||||
@@ -124,7 +133,9 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "O intervalo entre verificações tem que ser um valor não negativo de segundos.",
|
||||
"The rescan interval must be at least 5 seconds.": "O intervalo entre verificações tem que ser pelo menos de 5 segundos.",
|
||||
"Unknown": "Desconhecido",
|
||||
"Up to Date": "Actualizado",
|
||||
"Unshared": "Não partilhada",
|
||||
"Unused": "Não utilizada",
|
||||
"Up to Date": "Actualizada",
|
||||
"Upgrade To {%version%}": "Actualizar para {{version}}",
|
||||
"Upgrading": "Actualizando",
|
||||
"Upload Rate": "Velocidade de envio",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "Комментарий, если используется в начале строки",
|
||||
"Compression is recommended in most setups.": "Сжатие рекомендуется в большинстве случаев.",
|
||||
"Connection Error": "Ошибка подключения",
|
||||
"Copied from elsewhere": "Скопировано из другого места",
|
||||
"Copied from original": "Скопировано с оригинала",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Все права защищены © 2014 Jakob Borg и следующие участники:",
|
||||
"Delete": "Удалить",
|
||||
"Device ID": "ID устройства",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Нет соединения",
|
||||
"Documentation": "Документация",
|
||||
"Download Rate": "Скорость загрузки",
|
||||
"Downloaded": "Загружено",
|
||||
"Downloading": "Загрузка",
|
||||
"Edit": "Изменить",
|
||||
"Edit Device": "Изменить устройство",
|
||||
"Edit Folder": "Изменение папки",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "ID папки",
|
||||
"Folder Master": "Папка-оригинал",
|
||||
"Folder Path": "Путь к папке",
|
||||
"Folders": "Папки",
|
||||
"GUI Authentication Password": "Пароль для доступа к панели управления",
|
||||
"GUI Authentication User": "Имя пользователя для доступа к панели управления",
|
||||
"GUI Listen Addresses": "Адрес панели управления",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Рекомендатель",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Инвертировать текущее условие (например, исключить)",
|
||||
"Keep Versions": "Количество хранимых версий",
|
||||
"Last File Synced": "Последний синхронизированный файл",
|
||||
"Last seen": "Был доступен",
|
||||
"Latest Release": "Последняя версия",
|
||||
"Local Discovery": "Локальное обнаружение",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Перезапуск",
|
||||
"Restart Needed": "Требуется перезапуск",
|
||||
"Restarting": "Перезапуск",
|
||||
"Reused": "Повторно использовано",
|
||||
"Save": "Сохранить",
|
||||
"Scanning": "Сканирование",
|
||||
"Select the devices to share this folder with.": "Выберите устройства, для которых будет доступна эта папка.",
|
||||
"Select the folders to share with this device.": "Выберите папку для предоставления доступа данному устройству",
|
||||
"Settings": "Настройки",
|
||||
"Share Folders With Device": "Предоставить доступ устройству к папкам",
|
||||
"Share With Devices": "Предоставить доступ устройствам",
|
||||
"Shared With": "Доступ предоставлен",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Короткий идентификатор папки. Должен быть одинаковым на всех устройствах кластера.",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервал пересканирования должен быть неотрицательным количеством секунд.",
|
||||
"The rescan interval must be at least 5 seconds.": "Интервал пересканирования должен быть хотя бы 5 секунд.",
|
||||
"Unknown": "Неизвестно",
|
||||
"Unshared": "Необщедоступно",
|
||||
"Unused": "Не используется",
|
||||
"Up to Date": "Обновлено",
|
||||
"Upgrade To {%version%}": "Обновить до {{version}}",
|
||||
"Upgrading": "Обновление",
|
||||
|
||||
@@ -8,13 +8,15 @@
|
||||
"Allow Anonymous Usage Reporting?": "Tillåt anonym användarstatistik?",
|
||||
"Anonymous Usage Reporting": "Anonym användarstatistik",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Any devices configured on an introducer device will be added to this device as well.",
|
||||
"Automatic upgrades": "Automatic upgrades",
|
||||
"Automatic upgrades": "Automatisk uppgradering",
|
||||
"Bugs": "Buggar",
|
||||
"CPU Utilization": "CPU-användning",
|
||||
"Close": "Stäng",
|
||||
"Comment, when used at the start of a line": "Kommentar, vid början av en rad.",
|
||||
"Compression is recommended in most setups.": "Komprimering är rekommenderat för de flesta.",
|
||||
"Connection Error": "Anslutningsproblem",
|
||||
"Copied from elsewhere": "Kopierat utifrån",
|
||||
"Copied from original": "Oförändrat",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "Copyright © 2014 Jakob Borg och de följande medarbetarna:",
|
||||
"Delete": "Radera",
|
||||
"Device ID": "Enhets-ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "Ej ansluten",
|
||||
"Documentation": "Dokumentation",
|
||||
"Download Rate": "Nedladdningshastighet",
|
||||
"Downloaded": "Nerladdat",
|
||||
"Downloading": "Laddar ner",
|
||||
"Edit": "Redigera",
|
||||
"Edit Device": "Redigera enhet",
|
||||
"Edit Folder": "Redigera katalog",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "Katalog-ID",
|
||||
"Folder Master": "Huvudlagring",
|
||||
"Folder Path": "Sökväg",
|
||||
"Folders": "Kataloger",
|
||||
"GUI Authentication Password": "GUI-lösenord",
|
||||
"GUI Authentication User": "GUI-användare",
|
||||
"GUI Listen Addresses": "GUI-adress",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "Introducer",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Vänder på villkoret, d.v.s. exkluderar inte.",
|
||||
"Keep Versions": "Behåll versioner",
|
||||
"Last File Synced": "Senast uppdaterad fil",
|
||||
"Last seen": "Senast online",
|
||||
"Latest Release": "Senaste version",
|
||||
"Local Discovery": "Lokal uppslagning",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "Starta om",
|
||||
"Restart Needed": "Omstart behövs",
|
||||
"Restarting": "Startar om",
|
||||
"Reused": "Återanvänt",
|
||||
"Save": "Spara",
|
||||
"Scanning": "Uppdaterar",
|
||||
"Select the devices to share this folder with.": "Ange enheterna att dela den här katalogen med.",
|
||||
"Select the folders to share with this device.": "Välja kataloger att dela med den här enheten",
|
||||
"Settings": "Inställningar",
|
||||
"Share Folders With Device": "Dela kataloger med enhet",
|
||||
"Share With Devices": "Dela med enheter",
|
||||
"Shared With": "Delat med",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort identifieringssträng för katalogen. Måste vara samma på alla enheter i klustern.",
|
||||
@@ -121,9 +130,11 @@
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Den längsta tid att behålla en version (i dagar, sätt till 0 för att behålla versioner för evigt).",
|
||||
"The number of old versions to keep, per file.": "Antalet gamla versioner som ska behållas, per fil.",
|
||||
"The number of versions must be a number and cannot be blank.": "Antalet versioner måste vara ett nummer och kan inte lämnas blankt.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "The rescan interval must be a non-negative number of seconds.",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Förnyelseintervallet måste vara ett positivt antal sekunder",
|
||||
"The rescan interval must be at least 5 seconds.": "Uppdateringsintervallet måste vara minst 5 sekunder.",
|
||||
"Unknown": "Okänt",
|
||||
"Unshared": "Odelat",
|
||||
"Unused": "Oanvänd",
|
||||
"Up to Date": "Helt uppdaterad",
|
||||
"Upgrade To {%version%}": "Uppgradera till {{version}}",
|
||||
"Upgrading": "Uppgraderar",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "注释,在行首使用",
|
||||
"Compression is recommended in most setups.": "在大多数场合,建议开启压缩",
|
||||
"Connection Error": "连接出错",
|
||||
"Copied from elsewhere": "从其他地点复制",
|
||||
"Copied from original": "从源复制",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "版权© 2014 Jakob Borg 及以下贡献者:",
|
||||
"Delete": "删除",
|
||||
"Device ID": "设备ID",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "连接已断开",
|
||||
"Documentation": "文档",
|
||||
"Download Rate": "下载速度",
|
||||
"Downloaded": "已下载",
|
||||
"Downloading": "下载中",
|
||||
"Edit": "选项",
|
||||
"Edit Device": "修改设备选项",
|
||||
"Edit Folder": "修改文件夹选项",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "文件夹ID",
|
||||
"Folder Master": "母文件夹",
|
||||
"Folder Path": "文件夹路径",
|
||||
"Folders": "文件夹",
|
||||
"GUI Authentication Password": "登陆web管理页面的密码",
|
||||
"GUI Authentication User": "登陆web管理页面的用户名",
|
||||
"GUI Listen Addresses": "web管理页面监听地址",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "介绍人节点",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "对本条件取反(例如:不要排除某项)",
|
||||
"Keep Versions": "保留历史版本数量",
|
||||
"Last File Synced": "最近同步的文件",
|
||||
"Last seen": "最后可见",
|
||||
"Latest Release": "最新版本",
|
||||
"Local Discovery": "在局域网上寻找节点",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "重启syncthing",
|
||||
"Restart Needed": "需要重启Syncthing",
|
||||
"Restarting": "重启中",
|
||||
"Reused": "复用",
|
||||
"Save": "保存",
|
||||
"Scanning": "扫描中",
|
||||
"Select the devices to share this folder with.": "选择将本文件夹共享给哪些设备",
|
||||
"Select the folders to share with this device.": "选择与该设备共享的文件夹。",
|
||||
"Settings": "设置",
|
||||
"Share Folders With Device": "将指定文件夹共享给设备",
|
||||
"Share With Devices": "共享给",
|
||||
"Shared With": "共享给",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "文件夹的别名。必须在所有设备上保持一致。",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "扫描间隔单位为秒,且不能为负数。",
|
||||
"The rescan interval must be at least 5 seconds.": "扫描间隔必须至少为5秒。",
|
||||
"Unknown": "未知",
|
||||
"Unshared": "未共享",
|
||||
"Unused": "已共享",
|
||||
"Up to Date": "同步完成",
|
||||
"Upgrade To {%version%}": "升级至版本{{version}}",
|
||||
"Upgrading": "升级中",
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
"Comment, when used at the start of a line": "註解,當輸入在一行的開頭時",
|
||||
"Compression is recommended in most setups.": "建議在大多數的設置中使用壓縮。",
|
||||
"Connection Error": "連線錯誤",
|
||||
"Copied from elsewhere": "Copied from elsewhere",
|
||||
"Copied from original": "Copied from original",
|
||||
"Copyright © 2014 Jakob Borg and the following Contributors:": "版權所有 © 2014 Jakob Borg 及以下貢獻者:",
|
||||
"Delete": "刪除",
|
||||
"Device ID": "裝置識別碼",
|
||||
@@ -23,6 +25,8 @@
|
||||
"Disconnected": "斷線",
|
||||
"Documentation": "說明文件",
|
||||
"Download Rate": "下載速率",
|
||||
"Downloaded": "已下載",
|
||||
"Downloading": "正在下載",
|
||||
"Edit": "編輯",
|
||||
"Edit Device": "編輯裝置",
|
||||
"Edit Folder": "編輯資料夾",
|
||||
@@ -38,6 +42,7 @@
|
||||
"Folder ID": "資料夾識別碼",
|
||||
"Folder Master": "主資料夾",
|
||||
"Folder Path": "資料夾路徑",
|
||||
"Folders": "資料夾",
|
||||
"GUI Authentication Password": "GUI 認證密碼",
|
||||
"GUI Authentication User": "GUI 認證使用者名稱",
|
||||
"GUI Listen Addresses": "GUI 監聽位址",
|
||||
@@ -52,6 +57,7 @@
|
||||
"Introducer": "引入者",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "反轉給定條件 (即:不要排除)",
|
||||
"Keep Versions": "保留歷史版本數",
|
||||
"Last File Synced": "最後同步檔案",
|
||||
"Last seen": "最後發現時間",
|
||||
"Latest Release": "最新發佈",
|
||||
"Local Discovery": "本地探索",
|
||||
@@ -80,10 +86,13 @@
|
||||
"Restart": "重新啟動",
|
||||
"Restart Needed": "需要重新啟動",
|
||||
"Restarting": "正在重新啟動",
|
||||
"Reused": "Reused",
|
||||
"Save": "儲存",
|
||||
"Scanning": "正在掃描",
|
||||
"Select the devices to share this folder with.": "選擇要共享這個資料夾的裝置。",
|
||||
"Select the folders to share with this device.": "選擇要共享這個資料夾的裝置。",
|
||||
"Settings": "設定",
|
||||
"Share Folders With Device": "與裝置共享資料夾",
|
||||
"Share With Devices": "與這些裝置共享",
|
||||
"Shared With": "與誰共享",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "資料夾的簡短識別字。必須在叢集內所有的裝置上皆相同。",
|
||||
@@ -124,6 +133,8 @@
|
||||
"The rescan interval must be a non-negative number of seconds.": "重新掃描間隔必須為一個非負數的秒數。",
|
||||
"The rescan interval must be at least 5 seconds.": "重新掃描間隔至少須為 5 秒。",
|
||||
"Unknown": "未知",
|
||||
"Unshared": "未共享",
|
||||
"Unused": "未使用",
|
||||
"Up to Date": "最新",
|
||||
"Upgrade To {%version%}": "升級至 {{version}}",
|
||||
"Upgrading": "正在升級",
|
||||
|
||||
107
gui/index.html
107
gui/index.html
@@ -308,6 +308,73 @@
|
||||
</div>
|
||||
</div> <!-- /row -->
|
||||
|
||||
<!-- Device Rejections -->
|
||||
|
||||
<div ng-repeat="(device, event) in deviceRejections" class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="panel panel-warning">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<identicon data-value="device"></identicon> <span translate>New Device</span>
|
||||
</h3>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<p>
|
||||
<small>{{ event.time | date:"H:mm:ss" }}:</small>
|
||||
<span translate translate-value-device="{{ device }}" translate-value-address="{{ event.data.address }}">
|
||||
Device {%device%} ({%address%}) wants to connect. Add new device?
|
||||
<span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="panel-footer clearfix">
|
||||
<div class="pull-right">
|
||||
<button class="btn btn-sm btn-success" ng-click="addNewDeviceID(device)"><span class="glyphicon glyphicon-ok"></span> <span translate>Add</span></button>
|
||||
<button class="btn btn-sm btn-danger" ng-click="ignoreRejectedDevice(device)"><span class="glyphicon glyphicon-remove"></span> <span translate>Ignore</span></button>
|
||||
<button class="btn btn-sm btn-default" ng-click="dismissDeviceRejection(device)"><span class="glyphicon glyphicon-time"></span> <span translate>Later</span></button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Folder Rejections -->
|
||||
|
||||
<div ng-repeat="(key, event) in folderRejections" class="row reject">
|
||||
<div class="col-md-12">
|
||||
<div class="panel panel-warning">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<span translate ng-if="!folders[event.data.folder]">New Folder</span>
|
||||
<span translate ng-if="folders[event.data.folder]">Share Folder</span>
|
||||
</h3>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<p>
|
||||
<small>{{ event.time | date:"H:mm:ss" }}:</small>
|
||||
<span translate translate-value-device="{{ deviceName(findDevice(event.data.device)) }}" translate-value-folder="{{ event.data.folder }}">
|
||||
{%device%} wants to share folder "{%folder%}".
|
||||
</span>
|
||||
<span translate ng-if="folders[event.data.folder]">Share this folder?</span>
|
||||
<span translate ng-if="!folders[event.data.folder]">Add new folder?</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="panel-footer clearfix">
|
||||
<div class="pull-right">
|
||||
<button class="btn btn-sm btn-success" ng-click="addFolderAndShare(event.data.folder, event.data.device)" ng-if="!folders[event.data.folder]">
|
||||
<span class="glyphicon glyphicon-ok"></span> <span translate>Add</span>
|
||||
</button>
|
||||
<button class="btn btn-sm btn-success" ng-click="shareFolderWithDevice(event.data.folder, event.data.device)" ng-if="folders[event.data.folder]">
|
||||
<span class="glyphicon glyphicon-ok"></span> <span translate>Share</span>
|
||||
</button>
|
||||
<button class="btn btn-sm btn-default" ng-click="dismissFolderRejection(event.data.folder, event.data.device)">
|
||||
<span class="glyphicon glyphicon-time"></span> <span translate>Later</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Errors -->
|
||||
|
||||
<div ng-if="errorList().length > 0" class="row">
|
||||
@@ -801,21 +868,37 @@
|
||||
<hr/>
|
||||
|
||||
<table class="table table-striped table-condensed">
|
||||
<tr ng-repeat="f in needed" ng-init="a = needAction(f)">
|
||||
<tr ng-repeat="f in needed.progress" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.Name}}">{{f.Name | basename}}</td>
|
||||
<td>
|
||||
<span ng-if="a == 'sync' && progress[neededFolder] && progress[neededFolder][f.Name]">
|
||||
<div class="progress">
|
||||
<div class="progress-bar progress-bar-success" style="width: {{progress[neededFolder][f.Name].Reused}}%"></div>
|
||||
<div class="progress-bar" style="width: {{progress[neededFolder][f.Name].CopiedFromOrigin}}%"></div>
|
||||
<div class="progress-bar progress-bar-info" style="width: {{progress[neededFolder][f.Name].CopiedFromElsewhere}}%"></div>
|
||||
<div class="progress-bar progress-bar-warning" style="width: {{progress[neededFolder][f.Name].Pulled}}%"></div>
|
||||
<div class="progress-bar progress-bar-danger progress-bar-striped active" style="width: {{progress[neededFolder][f.Name].Pulling}}%"></div>
|
||||
<span class="show frontal">{{progress[neededFolder][f.Name].BytesDone | binary}}B / {{progress[neededFolder][f.Name].BytesTotal | binary}}B</span>
|
||||
</div>
|
||||
</span>
|
||||
<td ng-if="a == 'sync' && progress[neededFolder] && progress[neededFolder][f.Name]">
|
||||
<div class="progress">
|
||||
<div class="progress-bar progress-bar-success" style="width: {{progress[neededFolder][f.Name].Reused}}%"></div>
|
||||
<div class="progress-bar" style="width: {{progress[neededFolder][f.Name].CopiedFromOrigin}}%"></div>
|
||||
<div class="progress-bar progress-bar-info" style="width: {{progress[neededFolder][f.Name].CopiedFromElsewhere}}%"></div>
|
||||
<div class="progress-bar progress-bar-warning" style="width: {{progress[neededFolder][f.Name].Pulled}}%"></div>
|
||||
<div class="progress-bar progress-bar-danger progress-bar-striped active" style="width: {{progress[neededFolder][f.Name].Pulling}}%"></div>
|
||||
<span class="show frontal">
|
||||
{{progress[neededFolder][f.Name].BytesDone | binary}}B / {{progress[neededFolder][f.Name].BytesTotal | binary}}B
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td class="text-right small-data" ng-if="a != 'sync' || !progress[neededFolder] || !progress[neededFolder][f.Name]">
|
||||
<span ng-if="f.Size > 0">{{f.Size | binary}}B</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-repeat="f in needed.queued" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.Name}}">{{f.Name | basename}}</td>
|
||||
<td class="text-right small-data">
|
||||
<span ng-if="$index != 0" class="glyphicon glyphicon-chevron-up" ng-click="bumpFile(neededFolder, f.Name)"></span>
|
||||
<span ng-if="f.Size > 0">{{f.Size | binary}}B</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr ng-repeat="f in needed.rest" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.Name}}">{{f.Name | basename}}</td>
|
||||
<td class="text-right small-data"><span ng-if="f.Size > 0">{{f.Size | binary}}B</span></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
@@ -104,15 +104,12 @@ function decimals(val, num) {
|
||||
return decs;
|
||||
}
|
||||
|
||||
function randomString(len, bits) {
|
||||
bits = bits || 36;
|
||||
var outStr = "",
|
||||
newStr;
|
||||
while (outStr.length < len) {
|
||||
newStr = Math.random().toString(bits).slice(2);
|
||||
outStr += newStr.slice(0, Math.min(newStr.length, (len - outStr.length)));
|
||||
function randomString(len) {
|
||||
var i, result = '', chars = '01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-';
|
||||
for (i = 0; i < len; i++) {
|
||||
result += chars[Math.round(Math.random() * (chars.length - 1))];
|
||||
}
|
||||
return outStr.toLowerCase();
|
||||
return result;
|
||||
}
|
||||
|
||||
function isEmptyObject(obj) {
|
||||
|
||||
@@ -47,6 +47,8 @@ angular.module('syncthing.core')
|
||||
$scope.model = {};
|
||||
$scope.myID = '';
|
||||
$scope.devices = [];
|
||||
$scope.deviceRejections = {};
|
||||
$scope.folderRejections = {};
|
||||
$scope.protocolChanged = false;
|
||||
$scope.reportData = {};
|
||||
$scope.reportPreview = false;
|
||||
@@ -168,6 +170,14 @@ angular.module('syncthing.core')
|
||||
}
|
||||
});
|
||||
|
||||
$scope.$on('DeviceRejected', function (event, arg) {
|
||||
$scope.deviceRejections[arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on('FolderRejected', function (event, arg) {
|
||||
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
|
||||
});
|
||||
|
||||
$scope.$on('ConfigSaved', function (event, arg) {
|
||||
updateLocalConfig(arg.data);
|
||||
|
||||
@@ -698,6 +708,11 @@ angular.module('syncthing.core')
|
||||
return n.DeviceID !== $scope.currentDevice.DeviceID;
|
||||
});
|
||||
$scope.config.Devices = $scope.devices;
|
||||
// In case we later added the device manually, remove the ignoral
|
||||
// record.
|
||||
$scope.config.IgnoredDevices = $scope.config.IgnoredDevices.filter(function (id) {
|
||||
return id !== $scope.currentDevice.DeviceID;
|
||||
});
|
||||
|
||||
for (var id in $scope.folders) {
|
||||
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
|
||||
@@ -709,10 +724,24 @@ angular.module('syncthing.core')
|
||||
};
|
||||
|
||||
$scope.saveDevice = function () {
|
||||
var deviceCfg, done, i;
|
||||
|
||||
$('#editDevice').modal('hide');
|
||||
deviceCfg = $scope.currentDevice;
|
||||
$scope.saveDeviceConfig($scope.currentDevice);
|
||||
};
|
||||
|
||||
$scope.addNewDeviceID = function (device) {
|
||||
var deviceCfg = {
|
||||
DeviceID: device,
|
||||
AddressesStr: 'dynamic',
|
||||
Compression: true,
|
||||
Introducer: false,
|
||||
selectedFolders: {}
|
||||
};
|
||||
$scope.saveDeviceConfig(deviceCfg);
|
||||
$scope.dismissDeviceRejection(device);
|
||||
};
|
||||
|
||||
$scope.saveDeviceConfig = function (deviceCfg) {
|
||||
var done, i;
|
||||
deviceCfg.Addresses = deviceCfg.AddressesStr.split(',').map(function (x) {
|
||||
return x.trim();
|
||||
});
|
||||
@@ -732,6 +761,11 @@ angular.module('syncthing.core')
|
||||
|
||||
$scope.devices.sort(deviceCompare);
|
||||
$scope.config.Devices = $scope.devices;
|
||||
// In case we are adding the device manually, remove the ignoral
|
||||
// record.
|
||||
$scope.config.IgnoredDevices = $scope.config.IgnoredDevices.filter(function (id) {
|
||||
return id !== deviceCfg.DeviceID;
|
||||
});
|
||||
|
||||
if (!$scope.editingSelf) {
|
||||
for (var id in deviceCfg.selectedFolders) {
|
||||
@@ -749,7 +783,6 @@ angular.module('syncthing.core')
|
||||
DeviceID: deviceCfg.DeviceID
|
||||
});
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
|
||||
return n.DeviceID != deviceCfg.DeviceID;
|
||||
@@ -761,6 +794,16 @@ angular.module('syncthing.core')
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.dismissDeviceRejection = function (device) {
|
||||
delete $scope.deviceRejections[device];
|
||||
};
|
||||
|
||||
$scope.ignoreRejectedDevice = function (device) {
|
||||
$scope.config.IgnoredDevices.push(device);
|
||||
$scope.saveConfig();
|
||||
$scope.dismissDeviceRejection(device);
|
||||
};
|
||||
|
||||
$scope.otherDevices = function () {
|
||||
return $scope.devices.filter(function (n) {
|
||||
return n.DeviceID !== $scope.myID;
|
||||
@@ -817,8 +860,8 @@ angular.module('syncthing.core')
|
||||
});
|
||||
});
|
||||
|
||||
$scope.editFolder = function (deviceCfg) {
|
||||
$scope.currentFolder = angular.copy(deviceCfg);
|
||||
$scope.editFolder = function (folderCfg) {
|
||||
$scope.currentFolder = angular.copy(folderCfg);
|
||||
$scope.currentFolder.selectedDevices = {};
|
||||
$scope.currentFolder.Devices.forEach(function (n) {
|
||||
$scope.currentFolder.selectedDevices[n.DeviceID] = true;
|
||||
@@ -867,6 +910,34 @@ angular.module('syncthing.core')
|
||||
$('#editFolder').modal();
|
||||
};
|
||||
|
||||
$scope.addFolderAndShare = function (folder, device) {
|
||||
$scope.dismissFolderRejection(folder, device);
|
||||
$scope.currentFolder = {
|
||||
ID: folder,
|
||||
selectedDevices: {}
|
||||
};
|
||||
$scope.currentFolder.selectedDevices[device] = true;
|
||||
|
||||
$scope.currentFolder.RescanIntervalS = 60;
|
||||
$scope.currentFolder.FileVersioningSelector = "none";
|
||||
$scope.currentFolder.simpleKeep = 5;
|
||||
$scope.currentFolder.staggeredMaxAge = 365;
|
||||
$scope.currentFolder.staggeredCleanInterval = 3600;
|
||||
$scope.currentFolder.staggeredVersionsPath = "";
|
||||
$scope.editingExisting = false;
|
||||
$scope.folderEditor.$setPristine();
|
||||
$('#editFolder').modal();
|
||||
};
|
||||
|
||||
$scope.shareFolderWithDevice = function (folder, device) {
|
||||
$scope.folders[folder].Devices.push({
|
||||
DeviceID: device
|
||||
});
|
||||
$scope.config.Folders = folderList($scope.folders);
|
||||
$scope.saveConfig();
|
||||
$scope.dismissFolderRejection(folder, device);
|
||||
};
|
||||
|
||||
$scope.saveFolder = function () {
|
||||
var folderCfg, done, i;
|
||||
|
||||
@@ -916,6 +987,10 @@ angular.module('syncthing.core')
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.dismissFolderRejection = function (folder, device) {
|
||||
delete $scope.folderRejections[folder + "-" + device];
|
||||
};
|
||||
|
||||
$scope.sharesFolder = function (folderCfg) {
|
||||
var names = [];
|
||||
folderCfg.Devices.forEach(function (device) {
|
||||
@@ -994,7 +1069,7 @@ angular.module('syncthing.core')
|
||||
};
|
||||
|
||||
$scope.setAPIKey = function (cfg) {
|
||||
cfg.APIKey = randomString(30, 32);
|
||||
cfg.APIKey = randomString(32);
|
||||
};
|
||||
|
||||
$scope.showURPreview = function () {
|
||||
@@ -1056,6 +1131,15 @@ angular.module('syncthing.core')
|
||||
$http.post(urlbase + "/scan?folder=" + encodeURIComponent(folder));
|
||||
};
|
||||
|
||||
$scope.bumpFile = function (folder, file) {
|
||||
$http.post(urlbase + "/bump?folder=" + encodeURIComponent(folder) + "&file=" + encodeURIComponent(file)).success(function (data) {
|
||||
if ($scope.neededFolder == folder) {
|
||||
console.log("bumpFile", folder, data);
|
||||
$scope.needed = data;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// pseudo main. called on all definitions assigned
|
||||
initController();
|
||||
});
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -20,6 +20,7 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -37,12 +38,13 @@ var l = logger.DefaultLogger
|
||||
const CurrentVersion = 7
|
||||
|
||||
type Configuration struct {
|
||||
Version int `xml:"version,attr"`
|
||||
Folders []FolderConfiguration `xml:"folder"`
|
||||
Devices []DeviceConfiguration `xml:"device"`
|
||||
GUI GUIConfiguration `xml:"gui"`
|
||||
Options OptionsConfiguration `xml:"options"`
|
||||
XMLName xml.Name `xml:"configuration" json:"-"`
|
||||
Version int `xml:"version,attr"`
|
||||
Folders []FolderConfiguration `xml:"folder"`
|
||||
Devices []DeviceConfiguration `xml:"device"`
|
||||
GUI GUIConfiguration `xml:"gui"`
|
||||
Options OptionsConfiguration `xml:"options"`
|
||||
IgnoredDevices []protocol.DeviceID `xml:"ignoredDevice"`
|
||||
XMLName xml.Name `xml:"configuration" json:"-"`
|
||||
|
||||
OriginalVersion int `xml:"-" json:"-"` // The version we read from disk, before any conversion
|
||||
Deprecated_Repositories []FolderConfiguration `xml:"repository" json:"-"`
|
||||
@@ -240,10 +242,13 @@ func (cfg *Configuration) WriteXML(w io.Writer) error {
|
||||
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
fillNilSlices(&cfg.Options)
|
||||
|
||||
// Initialize an empty slice for folders if the config has none
|
||||
// Initialize an empty slices
|
||||
if cfg.Folders == nil {
|
||||
cfg.Folders = []FolderConfiguration{}
|
||||
}
|
||||
if cfg.IgnoredDevices == nil {
|
||||
cfg.IgnoredDevices = []protocol.DeviceID{}
|
||||
}
|
||||
|
||||
// Check for missing, bad or duplicate folder ID:s
|
||||
var seenFolders = map[string]*FolderConfiguration{}
|
||||
@@ -369,6 +374,10 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
|
||||
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
|
||||
cfg.Options.GlobalAnnServers = uniqueStrings(cfg.Options.GlobalAnnServers)
|
||||
|
||||
if cfg.GUI.APIKey == "" {
|
||||
cfg.GUI.APIKey = randomString(32)
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeRequiresRestart returns true if updating the configuration requires a
|
||||
@@ -674,3 +683,16 @@ func (l FolderDeviceConfigurationList) Swap(a, b int) {
|
||||
func (l FolderDeviceConfigurationList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
// randomCharset contains the characters that can make up a randomString().
|
||||
const randomCharset = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
|
||||
// randomString returns a string of random characters (taken from
|
||||
// randomCharset) of the specified length.
|
||||
func randomString(l int) string {
|
||||
bs := make([]byte, l)
|
||||
for i := range bs {
|
||||
bs[i] = randomCharset[rand.Intn(len(randomCharset))]
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
@@ -245,6 +245,19 @@ func (w *Wrapper) InvalidateFolder(id string, err string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns whether or not connection attempts from the given device should be
|
||||
// silently ignored.
|
||||
func (w *Wrapper) IgnoredDevice(id protocol.DeviceID) bool {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
for _, device := range w.cfg.IgnoredDevices {
|
||||
if device == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Save writes the configuration to disk, and generates a ConfigSaved event.
|
||||
func (w *Wrapper) Save() error {
|
||||
fd, err := ioutil.TempFile(filepath.Dir(w.path), "cfg")
|
||||
|
||||
@@ -144,7 +144,7 @@ func TestConcurrentSetClear(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-clear.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{CachedOpenFiles: 10})
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func TestConcurrentSetOnly(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
os.RemoveAll("testdata/concurrent-set-only.db")
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{CachedOpenFiles: 10})
|
||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -15,26 +15,11 @@
|
||||
|
||||
package ignore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
caches = make(map[string]*cache)
|
||||
cacheMut sync.Mutex
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Periodically go through the cache and remove cache entries that have
|
||||
// not been touched in the last two hours.
|
||||
go cleanIgnoreCaches(2 * time.Hour)
|
||||
}
|
||||
import "time"
|
||||
|
||||
type cache struct {
|
||||
patterns []Pattern
|
||||
entries map[string]cacheEntry
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
@@ -50,46 +35,27 @@ func newCache(patterns []Pattern) *cache {
|
||||
}
|
||||
|
||||
func (c *cache) clean(d time.Duration) {
|
||||
c.mut.Lock()
|
||||
for k, v := range c.entries {
|
||||
if time.Since(v.access) > d {
|
||||
delete(c.entries, k)
|
||||
}
|
||||
}
|
||||
c.mut.Unlock()
|
||||
}
|
||||
|
||||
func (c *cache) get(key string) (result, ok bool) {
|
||||
c.mut.Lock()
|
||||
res, ok := c.entries[key]
|
||||
if ok {
|
||||
res.access = time.Now()
|
||||
c.entries[key] = res
|
||||
}
|
||||
c.mut.Unlock()
|
||||
return res.value, ok
|
||||
}
|
||||
|
||||
func (c *cache) set(key string, val bool) {
|
||||
c.mut.Lock()
|
||||
c.entries[key] = cacheEntry{val, time.Now()}
|
||||
c.mut.Unlock()
|
||||
}
|
||||
|
||||
func (c *cache) len() int {
|
||||
c.mut.Lock()
|
||||
l := len(c.entries)
|
||||
c.mut.Unlock()
|
||||
return l
|
||||
}
|
||||
|
||||
func cleanIgnoreCaches(dur time.Duration) {
|
||||
for {
|
||||
time.Sleep(dur)
|
||||
cacheMut.Lock()
|
||||
for _, v := range caches {
|
||||
v.clean(dur)
|
||||
}
|
||||
cacheMut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ package ignore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/fnmatch"
|
||||
)
|
||||
@@ -33,51 +36,76 @@ type Pattern struct {
|
||||
include bool
|
||||
}
|
||||
|
||||
func (p Pattern) String() string {
|
||||
if p.include {
|
||||
return p.match.String()
|
||||
} else {
|
||||
return "(?exclude)" + p.match.String()
|
||||
}
|
||||
}
|
||||
|
||||
type Matcher struct {
|
||||
patterns []Pattern
|
||||
matches *cache
|
||||
mut sync.Mutex
|
||||
patterns []Pattern
|
||||
withCache bool
|
||||
matches *cache
|
||||
curHash string
|
||||
stop chan struct{}
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func Load(file string, cache bool) (*Matcher, error) {
|
||||
seen := make(map[string]bool)
|
||||
matcher, err := loadIgnoreFile(file, seen)
|
||||
if !cache || err != nil {
|
||||
return matcher, err
|
||||
func New(withCache bool) *Matcher {
|
||||
m := &Matcher{
|
||||
withCache: withCache,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
|
||||
cacheMut.Lock()
|
||||
defer cacheMut.Unlock()
|
||||
|
||||
// Get the current cache object for the given file
|
||||
cached, ok := caches[file]
|
||||
if !ok || !patternsEqual(cached.patterns, matcher.patterns) {
|
||||
// Nothing in cache or a cache mismatch, create a new cache which will
|
||||
// store matches for the given set of patterns.
|
||||
// Initialize oldMatches to indicate that we are interested in
|
||||
// caching.
|
||||
cached = newCache(matcher.patterns)
|
||||
matcher.matches = cached
|
||||
caches[file] = cached
|
||||
return matcher, nil
|
||||
if withCache {
|
||||
go m.clean(2 * time.Hour)
|
||||
}
|
||||
|
||||
// Patterns haven't changed, so we can reuse the old matches, create a new
|
||||
// matches map and update the pointer. (This prevents matches map from
|
||||
// growing indefinately, as we only cache whatever we've matched in the last
|
||||
// iteration, rather than through runtime history)
|
||||
matcher.matches = cached
|
||||
return matcher, nil
|
||||
return m
|
||||
}
|
||||
|
||||
func Parse(r io.Reader, file string) (*Matcher, error) {
|
||||
seen := map[string]bool{
|
||||
file: true,
|
||||
func (m *Matcher) Load(file string) error {
|
||||
// No locking, Parse() does the locking
|
||||
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
// We do a parse with empty patterns to clear out the hash, cache etc.
|
||||
m.Parse(&bytes.Buffer{}, file)
|
||||
return err
|
||||
}
|
||||
return parseIgnoreFile(r, file, seen)
|
||||
defer fd.Close()
|
||||
|
||||
return m.Parse(fd, file)
|
||||
}
|
||||
|
||||
func (m *Matcher) Parse(r io.Reader, file string) error {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
seen := map[string]bool{file: true}
|
||||
patterns, err := parseIgnoreFile(r, file, seen)
|
||||
// Error is saved and returned at the end. We process the patterns
|
||||
// (possibly blank) anyway.
|
||||
|
||||
newHash := hashPatterns(patterns)
|
||||
if newHash == m.curHash {
|
||||
// We've already loaded exactly these patterns.
|
||||
return err
|
||||
}
|
||||
|
||||
m.curHash = newHash
|
||||
m.patterns = patterns
|
||||
if m.withCache {
|
||||
m.matches = newCache(patterns)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Matcher) Match(file string) (result bool) {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
if len(m.patterns) == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -108,18 +136,53 @@ func (m *Matcher) Match(file string) (result bool) {
|
||||
|
||||
// Patterns return a list of the loaded regexp patterns, as strings
|
||||
func (m *Matcher) Patterns() []string {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
patterns := make([]string, len(m.patterns))
|
||||
for i, pat := range m.patterns {
|
||||
if pat.include {
|
||||
patterns[i] = pat.match.String()
|
||||
} else {
|
||||
patterns[i] = "(?exclude)" + pat.match.String()
|
||||
}
|
||||
patterns[i] = pat.String()
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
func loadIgnoreFile(file string, seen map[string]bool) (*Matcher, error) {
|
||||
func (m *Matcher) Hash() string {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
return m.curHash
|
||||
}
|
||||
|
||||
func (m *Matcher) Stop() {
|
||||
close(m.stop)
|
||||
}
|
||||
|
||||
func (m *Matcher) clean(d time.Duration) {
|
||||
t := time.NewTimer(d / 2)
|
||||
for {
|
||||
select {
|
||||
case <-m.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
m.mut.Lock()
|
||||
if m.matches != nil {
|
||||
m.matches.clean(d)
|
||||
}
|
||||
t.Reset(d / 2)
|
||||
m.mut.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hashPatterns(patterns []Pattern) string {
|
||||
h := md5.New()
|
||||
for _, pat := range patterns {
|
||||
h.Write([]byte(pat.String()))
|
||||
h.Write([]byte("\n"))
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func loadIgnoreFile(file string, seen map[string]bool) ([]Pattern, error) {
|
||||
if seen[file] {
|
||||
return nil, fmt.Errorf("Multiple include of ignore file %q", file)
|
||||
}
|
||||
@@ -134,8 +197,8 @@ func loadIgnoreFile(file string, seen map[string]bool) (*Matcher, error) {
|
||||
return parseIgnoreFile(fd, file, seen)
|
||||
}
|
||||
|
||||
func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (*Matcher, error) {
|
||||
var exps Matcher
|
||||
func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) ([]Pattern, error) {
|
||||
var patterns []Pattern
|
||||
|
||||
addPattern := func(line string) error {
|
||||
include := true
|
||||
@@ -150,27 +213,27 @@ func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (*M
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||
}
|
||||
exps.patterns = append(exps.patterns, Pattern{exp, include})
|
||||
patterns = append(patterns, Pattern{exp, include})
|
||||
} else if strings.HasPrefix(line, "**/") {
|
||||
// Add the pattern as is, and without **/ so it matches in current dir
|
||||
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||
}
|
||||
exps.patterns = append(exps.patterns, Pattern{exp, include})
|
||||
patterns = append(patterns, Pattern{exp, include})
|
||||
|
||||
exp, err = fnmatch.Convert(line[3:], fnmatch.FNM_PATHNAME)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||
}
|
||||
exps.patterns = append(exps.patterns, Pattern{exp, include})
|
||||
patterns = append(patterns, Pattern{exp, include})
|
||||
} else if strings.HasPrefix(line, "#include ") {
|
||||
includeFile := filepath.Join(filepath.Dir(currentFile), line[len("#include "):])
|
||||
includes, err := loadIgnoreFile(includeFile, seen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exps.patterns = append(exps.patterns, includes.patterns...)
|
||||
patterns = append(patterns, includes...)
|
||||
} else {
|
||||
// Path name or pattern, add it so it matches files both in
|
||||
// current directory and subdirs.
|
||||
@@ -178,13 +241,13 @@ func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (*M
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||
}
|
||||
exps.patterns = append(exps.patterns, Pattern{exp, include})
|
||||
patterns = append(patterns, Pattern{exp, include})
|
||||
|
||||
exp, err = fnmatch.Convert("**/"+line, fnmatch.FNM_PATHNAME)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||
}
|
||||
exps.patterns = append(exps.patterns, Pattern{exp, include})
|
||||
patterns = append(patterns, Pattern{exp, include})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -218,17 +281,5 @@ func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (*M
|
||||
}
|
||||
}
|
||||
|
||||
return &exps, nil
|
||||
}
|
||||
|
||||
func patternsEqual(a, b []Pattern) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i].include != b[i].include || a[i].match.String() != b[i].match.String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
return patterns, nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,8 @@ import (
|
||||
)
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
pats, err := Load("testdata/.stignore", true)
|
||||
pats := New(true)
|
||||
err := pats.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -74,7 +75,8 @@ func TestExcludes(t *testing.T) {
|
||||
i*2
|
||||
!ign2
|
||||
`
|
||||
pats, err := Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
pats := New(true)
|
||||
err := pats.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,15 +116,19 @@ func TestBadPatterns(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, pat := range badPatterns {
|
||||
parsed, err := Parse(bytes.NewBufferString(pat), ".stignore")
|
||||
err := New(true).Parse(bytes.NewBufferString(pat), ".stignore")
|
||||
if err == nil {
|
||||
t.Errorf("No error for pattern %q: %v", pat, parsed)
|
||||
t.Errorf("No error for pattern %q", pat)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCaseSensitivity(t *testing.T) {
|
||||
ign, _ := Parse(bytes.NewBufferString("test"), ".stignore")
|
||||
ign := New(true)
|
||||
err := ign.Parse(bytes.NewBufferString("test"), ".stignore")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
match := []string{"test"}
|
||||
dontMatch := []string{"foo"}
|
||||
@@ -170,7 +176,8 @@ func TestCaching(t *testing.T) {
|
||||
|
||||
fd2.WriteString("/y/\n")
|
||||
|
||||
pats, err := Load(fd1.Name(), true)
|
||||
pats := New(true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -193,9 +200,9 @@ func TestCaching(t *testing.T) {
|
||||
t.Fatal("Expected 4 cached results")
|
||||
}
|
||||
|
||||
// Reload file, expect old outcomes to be provided
|
||||
// Reload file, expect old outcomes to be preserved
|
||||
|
||||
pats, err = Load(fd1.Name(), true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -207,7 +214,7 @@ func TestCaching(t *testing.T) {
|
||||
|
||||
fd2.WriteString("/z/\n")
|
||||
|
||||
pats, err = Load(fd1.Name(), true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -222,9 +229,9 @@ func TestCaching(t *testing.T) {
|
||||
pats.Match(letter)
|
||||
}
|
||||
|
||||
// Verify that outcomes provided on next laod
|
||||
// Verify that outcomes preserved on next laod
|
||||
|
||||
pats, err = Load(fd1.Name(), true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -236,7 +243,7 @@ func TestCaching(t *testing.T) {
|
||||
|
||||
fd1.WriteString("/a/\n")
|
||||
|
||||
pats, err = Load(fd1.Name(), true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -252,7 +259,7 @@ func TestCaching(t *testing.T) {
|
||||
|
||||
// Verify that outcomes provided on next laod
|
||||
|
||||
pats, err = Load(fd1.Name(), true)
|
||||
err = pats.Load(fd1.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -273,7 +280,11 @@ func TestCommentsAndBlankLines(t *testing.T) {
|
||||
|
||||
|
||||
`
|
||||
pats, _ := Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
pats := New(true)
|
||||
err := pats.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(pats.patterns) > 0 {
|
||||
t.Errorf("Expected no patterns")
|
||||
}
|
||||
@@ -297,7 +308,11 @@ flamingo
|
||||
*.crow
|
||||
*.crow
|
||||
`
|
||||
pats, _ := Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
pats := New(false)
|
||||
err := pats.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -335,7 +350,8 @@ flamingo
|
||||
}
|
||||
|
||||
// Load the patterns
|
||||
pats, err := Load(fd.Name(), true)
|
||||
pats := New(true)
|
||||
err = pats.Load(fd.Name())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -344,7 +360,7 @@ flamingo
|
||||
|
||||
// This load should now load the cached outcomes as the set of patterns
|
||||
// has not changed.
|
||||
pats, err = Load(fd.Name(), true)
|
||||
err = pats.Load(fd.Name())
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@@ -353,3 +369,152 @@ flamingo
|
||||
result = pats.Match("filename")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheReload(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer fd.Close()
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
// Ignore file matches f1 and f2
|
||||
|
||||
_, err = fd.WriteString("f1\nf2\n")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pats := New(true)
|
||||
err = pats.Load(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify that both are ignored
|
||||
|
||||
if !pats.Match("f1") {
|
||||
t.Error("Unexpected non-match for f1")
|
||||
}
|
||||
if !pats.Match("f2") {
|
||||
t.Error("Unexpected non-match for f2")
|
||||
}
|
||||
if pats.Match("f3") {
|
||||
t.Error("Unexpected match for f3")
|
||||
}
|
||||
|
||||
// Rewrite file to match f1 and f3
|
||||
|
||||
err = fd.Truncate(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fd.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = fd.WriteString("f1\nf3\n")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = pats.Load(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify that the new patterns are in effect
|
||||
|
||||
if !pats.Match("f1") {
|
||||
t.Error("Unexpected non-match for f1")
|
||||
}
|
||||
if pats.Match("f2") {
|
||||
t.Error("Unexpected match for f2")
|
||||
}
|
||||
if !pats.Match("f3") {
|
||||
t.Error("Unexpected non-match for f3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHash(t *testing.T) {
|
||||
p1 := New(true)
|
||||
err := p1.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Same list of patterns as testdata/.stignore, after expansion
|
||||
stignore := `
|
||||
dir2/dfile
|
||||
dir3
|
||||
bfile
|
||||
dir1/cfile
|
||||
**/efile
|
||||
/ffile
|
||||
lost+found
|
||||
`
|
||||
p2 := New(true)
|
||||
err = p2.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not same list of patterns
|
||||
stignore = `
|
||||
dir2/dfile
|
||||
dir3
|
||||
bfile
|
||||
dir1/cfile
|
||||
/ffile
|
||||
lost+found
|
||||
`
|
||||
p3 := New(true)
|
||||
err = p3.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if p1.Hash() == "" {
|
||||
t.Error("p1 hash blank")
|
||||
}
|
||||
if p2.Hash() == "" {
|
||||
t.Error("p2 hash blank")
|
||||
}
|
||||
if p3.Hash() == "" {
|
||||
t.Error("p3 hash blank")
|
||||
}
|
||||
if p1.Hash() != p2.Hash() {
|
||||
t.Error("p1-p2 hashes differ")
|
||||
}
|
||||
if p1.Hash() == p3.Hash() {
|
||||
t.Error("p1-p3 hashes same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashOfEmpty(t *testing.T) {
|
||||
p1 := New(true)
|
||||
err := p1.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
firstHash := p1.Hash()
|
||||
|
||||
// Reloading with a non-existent file should empty the patterns and
|
||||
// recalculate the hash. d41d8cd98f00b204e9800998ecf8427e is the md5 of
|
||||
// nothing.
|
||||
|
||||
p1.Load("file/does/not/exist")
|
||||
secondHash := p1.Hash()
|
||||
|
||||
if firstHash == secondHash {
|
||||
t.Error("hash did not change")
|
||||
}
|
||||
if secondHash != "d41d8cd98f00b204e9800998ecf8427e" {
|
||||
t.Error("second hash is not hash of empty string")
|
||||
}
|
||||
if len(p1.patterns) != 0 {
|
||||
t.Error("there are more than zero patterns")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,6 +79,8 @@ const (
|
||||
type service interface {
|
||||
Serve()
|
||||
Stop()
|
||||
Jobs() ([]string, []string) // In progress, Queued
|
||||
BringToFront(string)
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
@@ -189,6 +191,7 @@ func (m *Model) StartFolderRW(folder string) {
|
||||
copiers: cfg.Copiers,
|
||||
pullers: cfg.Pullers,
|
||||
finishers: cfg.Finishers,
|
||||
queue: newJobQueue(),
|
||||
}
|
||||
m.folderRunners[folder] = p
|
||||
m.fmut.Unlock()
|
||||
@@ -416,22 +419,50 @@ func (m *Model) NeedSize(folder string) (files int, bytes int64) {
|
||||
return
|
||||
}
|
||||
|
||||
// NeedFiles returns the list of currently needed files, stopping at maxFiles
|
||||
// files. Limit <= 0 is ignored.
|
||||
func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {
|
||||
// NeedFiles returns the list of currently needed files in progress, queued,
|
||||
// and to be queued on next puller iteration. Also takes a soft cap which is
|
||||
// only respected when adding files from the model rather than the runner queue.
|
||||
func (m *Model) NeedFolderFiles(folder string, max int) ([]protocol.FileInfoTruncated, []protocol.FileInfoTruncated, []protocol.FileInfoTruncated) {
|
||||
defer m.leveldbPanicWorkaround()
|
||||
|
||||
m.fmut.RLock()
|
||||
defer m.fmut.RUnlock()
|
||||
if rf, ok := m.folderFiles[folder]; ok {
|
||||
fs := make([]protocol.FileInfoTruncated, 0, maxFiles)
|
||||
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
fs = append(fs, f.(protocol.FileInfoTruncated))
|
||||
return maxFiles <= 0 || len(fs) < maxFiles
|
||||
})
|
||||
return fs
|
||||
var progress, queued, rest []protocol.FileInfoTruncated
|
||||
var seen map[string]bool
|
||||
|
||||
runner, ok := m.folderRunners[folder]
|
||||
if ok {
|
||||
progressNames, queuedNames := runner.Jobs()
|
||||
|
||||
progress = make([]protocol.FileInfoTruncated, len(progressNames))
|
||||
queued = make([]protocol.FileInfoTruncated, len(queuedNames))
|
||||
seen = make(map[string]bool, len(progressNames)+len(queuedNames))
|
||||
|
||||
for i, name := range progressNames {
|
||||
progress[i] = rf.GetGlobal(name).ToTruncated() /// XXX: Should implement GetGlobalTruncated directly
|
||||
seen[name] = true
|
||||
}
|
||||
|
||||
for i, name := range queuedNames {
|
||||
queued[i] = rf.GetGlobal(name).ToTruncated() /// XXX: Should implement GetGlobalTruncated directly
|
||||
seen[name] = true
|
||||
}
|
||||
}
|
||||
left := max - len(progress) - len(queued)
|
||||
if max < 1 || left > 0 {
|
||||
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
left--
|
||||
ft := f.(protocol.FileInfoTruncated)
|
||||
if !seen[ft.Name] {
|
||||
rest = append(rest, ft)
|
||||
}
|
||||
return max < 1 || left > 0
|
||||
})
|
||||
}
|
||||
return progress, queued, rest
|
||||
}
|
||||
return nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Index is called when a new device is connected and we receive their full index.
|
||||
@@ -446,13 +477,12 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
|
||||
"folder": folder,
|
||||
"device": deviceID.String(),
|
||||
})
|
||||
l.Warnf("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
|
||||
l.Infof("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
|
||||
return
|
||||
}
|
||||
|
||||
m.fmut.RLock()
|
||||
files, ok := m.folderFiles[folder]
|
||||
ignores, _ := m.folderIgnores[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
@@ -461,9 +491,9 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
|
||||
|
||||
for i := 0; i < len(fs); {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
|
||||
if symlinkInvalid(fs[i].IsSymlink()) {
|
||||
if debug {
|
||||
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
|
||||
l.Debugln("dropping update for unsupported symlink", fs[i])
|
||||
}
|
||||
fs[i] = fs[len(fs)-1]
|
||||
fs = fs[:len(fs)-1]
|
||||
@@ -496,7 +526,6 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
|
||||
m.fmut.RLock()
|
||||
files, ok := m.folderFiles[folder]
|
||||
ignores, _ := m.folderIgnores[folder]
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
@@ -505,9 +534,9 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
|
||||
for i := 0; i < len(fs); {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
if (ignores != nil && ignores.Match(fs[i].Name)) || symlinkInvalid(fs[i].IsSymlink()) {
|
||||
if symlinkInvalid(fs[i].IsSymlink()) {
|
||||
if debug {
|
||||
l.Debugln("dropping update for ignored/unsupported symlink", fs[i])
|
||||
l.Debugln("dropping update for unsupported symlink", fs[i])
|
||||
}
|
||||
fs[i] = fs[len(fs)-1]
|
||||
fs = fs[:len(fs)-1]
|
||||
@@ -1040,7 +1069,8 @@ func (m *Model) AddFolder(cfg config.FolderConfiguration) {
|
||||
m.deviceFolders[device.DeviceID] = append(m.deviceFolders[device.DeviceID], cfg.ID)
|
||||
}
|
||||
|
||||
ignores, _ := ignore.Load(filepath.Join(cfg.Path, ".stignore"), m.cfg.Options().CacheIgnoredFiles)
|
||||
ignores := ignore.New(m.cfg.Options().CacheIgnoredFiles)
|
||||
_ = ignores.Load(filepath.Join(cfg.Path, ".stignore")) // Ignore error, there might not be an .stignore
|
||||
m.folderIgnores[cfg.ID] = ignores
|
||||
|
||||
m.addedFolder = true
|
||||
@@ -1081,25 +1111,25 @@ func (m *Model) ScanFolderSub(folder, sub string) error {
|
||||
|
||||
m.fmut.Lock()
|
||||
fs, ok := m.folderFiles[folder]
|
||||
dir := m.folderCfgs[folder].Path
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
ignores := m.folderIgnores[folder]
|
||||
m.fmut.Unlock()
|
||||
|
||||
ignores, _ := ignore.Load(filepath.Join(dir, ".stignore"), m.cfg.Options().CacheIgnoredFiles)
|
||||
m.folderIgnores[folder] = ignores
|
||||
if !ok {
|
||||
return errors.New("no such folder")
|
||||
}
|
||||
|
||||
_ = ignores.Load(filepath.Join(folderCfg.Path, ".stignore")) // Ignore error, there might not be an .stignore
|
||||
|
||||
w := &scanner.Walker{
|
||||
Dir: dir,
|
||||
Dir: folderCfg.Path,
|
||||
Sub: sub,
|
||||
Matcher: ignores,
|
||||
BlockSize: protocol.BlockSize,
|
||||
TempNamer: defTempNamer,
|
||||
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
|
||||
CurrentFiler: cFiler{m, folder},
|
||||
IgnorePerms: m.folderCfgs[folder].IgnorePerms,
|
||||
}
|
||||
m.fmut.Unlock()
|
||||
|
||||
if !ok {
|
||||
return errors.New("no such folder")
|
||||
IgnorePerms: folderCfg.IgnorePerms,
|
||||
}
|
||||
|
||||
m.setState(folder, FolderScanning)
|
||||
@@ -1170,7 +1200,7 @@ func (m *Model) ScanFolderSub(folder, sub string) error {
|
||||
"size": f.Size(),
|
||||
})
|
||||
batch = append(batch, nf)
|
||||
} else if _, err := os.Lstat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
|
||||
} else if _, err := os.Lstat(filepath.Join(folderCfg.Path, f.Name)); err != nil && os.IsNotExist(err) {
|
||||
// File has been deleted
|
||||
nf := protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
@@ -1337,9 +1367,9 @@ func (m *Model) RemoteLocalVersion(folder string) uint64 {
|
||||
return ver
|
||||
}
|
||||
|
||||
func (m *Model) availability(folder string, file string) []protocol.DeviceID {
|
||||
func (m *Model) availability(folder, file string) []protocol.DeviceID {
|
||||
// Acquire this lock first, as the value returned from foldersFiles can
|
||||
// gen heavily modified on Close()
|
||||
// get heavily modified on Close()
|
||||
m.pmut.RLock()
|
||||
defer m.pmut.RUnlock()
|
||||
|
||||
@@ -1360,6 +1390,17 @@ func (m *Model) availability(folder string, file string) []protocol.DeviceID {
|
||||
return availableDevices
|
||||
}
|
||||
|
||||
// Bump the given files priority in the job queue
|
||||
func (m *Model) BringToFront(folder, file string) {
|
||||
m.pmut.RLock()
|
||||
defer m.pmut.RUnlock()
|
||||
|
||||
runner, ok := m.folderRunners[folder]
|
||||
if ok {
|
||||
runner.BringToFront(file)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) String() string {
|
||||
return fmt.Sprintf("model@%p", m)
|
||||
}
|
||||
|
||||
@@ -16,11 +16,10 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@@ -30,6 +29,7 @@ import (
|
||||
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/ignore"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
"github.com/syncthing/syncthing/internal/scanner"
|
||||
@@ -77,6 +77,7 @@ type Puller struct {
|
||||
copiers int
|
||||
pullers int
|
||||
finishers int
|
||||
queue *jobQueue
|
||||
}
|
||||
|
||||
// Serve will run scans and pulls. It will return when Stop()ed or on a
|
||||
@@ -100,6 +101,7 @@ func (p *Puller) Serve() {
|
||||
}()
|
||||
|
||||
var prevVer uint64
|
||||
var prevIgnoreHash string
|
||||
|
||||
// We don't start pulling files until a scan has been completed.
|
||||
initialScanCompleted := false
|
||||
@@ -125,6 +127,20 @@ loop:
|
||||
continue
|
||||
}
|
||||
|
||||
p.model.fmut.RLock()
|
||||
curIgnores := p.model.folderIgnores[p.folder]
|
||||
p.model.fmut.RUnlock()
|
||||
|
||||
if newHash := curIgnores.Hash(); newHash != prevIgnoreHash {
|
||||
// The ignore patterns have changed. We need to re-evaluate if
|
||||
// there are files we need now that were ignored before.
|
||||
if debug {
|
||||
l.Debugln(p, "ignore patterns have changed, resetting prevVer")
|
||||
}
|
||||
prevVer = 0
|
||||
prevIgnoreHash = newHash
|
||||
}
|
||||
|
||||
// RemoteLocalVersion() is a fast call, doesn't touch the database.
|
||||
curVer := p.model.RemoteLocalVersion(p.folder)
|
||||
if curVer == prevVer {
|
||||
@@ -140,16 +156,10 @@ loop:
|
||||
}
|
||||
p.model.setState(p.folder, FolderSyncing)
|
||||
tries := 0
|
||||
checksum := false
|
||||
for {
|
||||
tries++
|
||||
// Last resort mode, to get around corrupt/invalid block maps.
|
||||
if tries == 10 {
|
||||
l.Infoln("Desperation mode ON")
|
||||
checksum = true
|
||||
}
|
||||
|
||||
changed := p.pullerIteration(checksum)
|
||||
changed := p.pullerIteration(curIgnores)
|
||||
if debug {
|
||||
l.Debugln(p, "changed", changed)
|
||||
}
|
||||
@@ -167,7 +177,7 @@ loop:
|
||||
// them, but at the same time we have the local
|
||||
// version that includes those files in curVer. So we
|
||||
// catch the case that localVersion might have
|
||||
// decresed here.
|
||||
// decreased here.
|
||||
l.Debugln(p, "adjusting curVer", lv)
|
||||
curVer = lv
|
||||
}
|
||||
@@ -208,10 +218,14 @@ loop:
|
||||
}
|
||||
p.model.setState(p.folder, FolderIdle)
|
||||
if p.scanIntv > 0 {
|
||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||
sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4
|
||||
intv := time.Duration(sleepNanos) * time.Nanosecond
|
||||
|
||||
if debug {
|
||||
l.Debugln(p, "next rescan in", p.scanIntv)
|
||||
l.Debugln(p, "next rescan in", intv)
|
||||
}
|
||||
scanTimer.Reset(p.scanIntv)
|
||||
scanTimer.Reset(intv)
|
||||
}
|
||||
if !initialScanCompleted {
|
||||
l.Infoln("Completed initial scan (rw) of folder", p.folder)
|
||||
@@ -233,7 +247,7 @@ func (p *Puller) String() string {
|
||||
// returns the number items that should have been synced (even those that
|
||||
// might have failed). One puller iteration handles all files currently
|
||||
// flagged as needed in the folder.
|
||||
func (p *Puller) pullerIteration(checksum bool) int {
|
||||
func (p *Puller) pullerIteration(ignores *ignore.Matcher) int {
|
||||
pullChan := make(chan pullBlockState)
|
||||
copyChan := make(chan copyBlocksState)
|
||||
finisherChan := make(chan *sharedPullerState)
|
||||
@@ -250,7 +264,7 @@ func (p *Puller) pullerIteration(checksum bool) int {
|
||||
copyWg.Add(1)
|
||||
go func() {
|
||||
// copierRoutine finishes when copyChan is closed
|
||||
p.copierRoutine(copyChan, pullChan, finisherChan, checksum)
|
||||
p.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
copyWg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -298,6 +312,11 @@ func (p *Puller) pullerIteration(checksum bool) int {
|
||||
|
||||
file := intf.(protocol.FileInfo)
|
||||
|
||||
if ignores.Match(file.Name) {
|
||||
// This is an ignored file. Skip it, continue iteration.
|
||||
return true
|
||||
}
|
||||
|
||||
events.Default.Log(events.ItemStarted, map[string]string{
|
||||
"folder": p.folder,
|
||||
"item": file.Name,
|
||||
@@ -316,15 +335,23 @@ func (p *Puller) pullerIteration(checksum bool) int {
|
||||
p.handleDir(file)
|
||||
default:
|
||||
// A new or changed file or symlink. This is the only case where we
|
||||
// do stuff in the background; the other three are done
|
||||
// synchronously.
|
||||
p.handleFile(file, copyChan, finisherChan)
|
||||
// do stuff concurrently in the background
|
||||
p.queue.Push(file.Name)
|
||||
}
|
||||
|
||||
changed++
|
||||
return true
|
||||
})
|
||||
|
||||
for {
|
||||
fileName, ok := p.queue.Pop()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
f := p.model.CurrentGlobalFile(p.folder, fileName)
|
||||
p.handleFile(f, copyChan, finisherChan)
|
||||
}
|
||||
|
||||
// Signal copy and puller routines that we are done with the in data for
|
||||
// this iteration. Wait for them to finish.
|
||||
close(copyChan)
|
||||
@@ -462,6 +489,7 @@ func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksSt
|
||||
if debug {
|
||||
l.Debugln(p, "taking shortcut on", file.Name)
|
||||
}
|
||||
p.queue.Done(file.Name)
|
||||
if file.IsSymlink() {
|
||||
p.shortcutSymlink(curFile, file)
|
||||
} else {
|
||||
@@ -577,7 +605,7 @@ func (p *Puller) shortcutSymlink(curFile, file protocol.FileInfo) {
|
||||
|
||||
// copierRoutine reads copierStates until the in channel closes and performs
|
||||
// the relevant copies when possible, or passes it to the puller routine.
|
||||
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState, checksum bool) {
|
||||
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||
buf := make([]byte, protocol.BlockSize)
|
||||
|
||||
nextFile:
|
||||
@@ -613,7 +641,6 @@ nextFile:
|
||||
}
|
||||
p.model.fmut.RUnlock()
|
||||
|
||||
hasher := sha256.New()
|
||||
for _, block := range state.blocks {
|
||||
buf = buf[:int(block.Size)]
|
||||
found := p.model.finder.Iterate(block.Hash, func(folder, file string, index uint32) bool {
|
||||
@@ -637,12 +664,9 @@ nextFile:
|
||||
return false
|
||||
}
|
||||
|
||||
// Only done on second to last puller attempt
|
||||
if checksum {
|
||||
hasher.Write(buf)
|
||||
hash := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
if !bytes.Equal(hash, block.Hash) {
|
||||
hash, err := scanner.VerifyBuffer(buf, block)
|
||||
if err != nil {
|
||||
if hash != nil {
|
||||
if debug {
|
||||
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
||||
}
|
||||
@@ -650,8 +674,10 @@ nextFile:
|
||||
if err != nil {
|
||||
l.Warnln("finder fix:", err)
|
||||
}
|
||||
return false
|
||||
} else if debug {
|
||||
l.Debugln("Finder failed to verify buffer", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = dstFd.WriteAt(buf, block.Offset)
|
||||
@@ -686,20 +712,9 @@ nextFile:
|
||||
}
|
||||
|
||||
func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||
nextBlock:
|
||||
for state := range in {
|
||||
if state.failed() != nil {
|
||||
continue nextBlock
|
||||
}
|
||||
|
||||
// Select the least busy device to pull the block from. If we found no
|
||||
// feasible device at all, fail the block (and in the long run, the
|
||||
// file).
|
||||
potentialDevices := p.model.availability(p.folder, state.file.Name)
|
||||
selected := activity.leastBusy(potentialDevices)
|
||||
if selected == (protocol.DeviceID{}) {
|
||||
state.earlyClose("pull", errNoDevice)
|
||||
continue nextBlock
|
||||
continue
|
||||
}
|
||||
|
||||
// Get an fd to the temporary file. Tehcnically we don't need it until
|
||||
@@ -707,45 +722,58 @@ nextBlock:
|
||||
// no point in issuing the request to the network.
|
||||
fd, err := state.tempFile()
|
||||
if err != nil {
|
||||
continue nextBlock
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch the block, while marking the selected device as in use so that
|
||||
// leastBusy can select another device when someone else asks.
|
||||
activity.using(selected)
|
||||
buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
||||
activity.done(selected)
|
||||
if err != nil {
|
||||
state.earlyClose("pull", err)
|
||||
continue nextBlock
|
||||
}
|
||||
var lastError error
|
||||
potentialDevices := p.model.availability(p.folder, state.file.Name)
|
||||
for {
|
||||
// Select the least busy device to pull the block from. If we found no
|
||||
// feasible device at all, fail the block (and in the long run, the
|
||||
// file).
|
||||
selected := activity.leastBusy(potentialDevices)
|
||||
if selected == (protocol.DeviceID{}) {
|
||||
if lastError != nil {
|
||||
state.earlyClose("pull", lastError)
|
||||
} else {
|
||||
state.earlyClose("pull", errNoDevice)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Save the block data we got from the cluster
|
||||
_, err = fd.WriteAt(buf, state.block.Offset)
|
||||
if err != nil {
|
||||
state.earlyClose("save", err)
|
||||
continue nextBlock
|
||||
}
|
||||
potentialDevices = removeDevice(potentialDevices, selected)
|
||||
|
||||
state.pullDone()
|
||||
out <- state.sharedPullerState
|
||||
// Fetch the block, while marking the selected device as in use so that
|
||||
// leastBusy can select another device when someone else asks.
|
||||
activity.using(selected)
|
||||
buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
||||
activity.done(selected)
|
||||
if lastError != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify that the received block matches the desired hash, if not
|
||||
// try pulling it from another device.
|
||||
_, lastError = scanner.VerifyBuffer(buf, state.block)
|
||||
if lastError != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Save the block data we got from the cluster
|
||||
_, err = fd.WriteAt(buf, state.block.Offset)
|
||||
if err != nil {
|
||||
state.earlyClose("save", err)
|
||||
} else {
|
||||
state.pullDone()
|
||||
out <- state.sharedPullerState
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Puller) performFinish(state *sharedPullerState) {
|
||||
// Verify the file against expected hashes
|
||||
fd, err := os.Open(state.tempName)
|
||||
if err != nil {
|
||||
l.Warnln("puller: final:", err)
|
||||
return
|
||||
}
|
||||
err = scanner.Verify(fd, protocol.BlockSize, state.file.Blocks)
|
||||
fd.Close()
|
||||
if err != nil {
|
||||
l.Infoln("puller:", state.file.Name, err, "(file changed during pull?)")
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
// Set the correct permission bits on the new file
|
||||
if !p.ignorePerms {
|
||||
err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777))
|
||||
@@ -829,6 +857,7 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
continue
|
||||
}
|
||||
|
||||
p.queue.Done(state.file.Name)
|
||||
p.performFinish(state)
|
||||
p.model.receivedFile(p.folder, state.file.Name)
|
||||
if p.progressEmitter != nil {
|
||||
@@ -838,6 +867,15 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
|
||||
}
|
||||
}
|
||||
|
||||
// Moves the given filename to the front of the job queue
|
||||
func (p *Puller) BringToFront(filename string) {
|
||||
p.queue.BringToFront(filename)
|
||||
}
|
||||
|
||||
func (p *Puller) Jobs() ([]string, []string) {
|
||||
return p.queue.Jobs()
|
||||
}
|
||||
|
||||
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
|
||||
for i := range cfg.Folders {
|
||||
folder := &cfg.Folders[i]
|
||||
@@ -847,3 +885,13 @@ func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDevice(devices []protocol.DeviceID, device protocol.DeviceID) []protocol.DeviceID {
|
||||
for i := range devices {
|
||||
if devices[i] == device {
|
||||
devices[i] = devices[len(devices)-1]
|
||||
return devices[:len(devices)-1]
|
||||
}
|
||||
}
|
||||
return devices
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single fetcher routine
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan, false)
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
|
||||
p.handleFile(requiredFile, copyChan, finisherChan)
|
||||
|
||||
@@ -317,9 +317,8 @@ func TestCopierCleanup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// On the 10th iteration, we start hashing the content which we receive by
|
||||
// following blockfinder's instructions. Make sure that the copier routine
|
||||
// hashes the content when asked, and pulls if it fails to find the block.
|
||||
// Make sure that the copier routine hashes the content when asked, and pulls
|
||||
// if it fails to find the block.
|
||||
func TestLastResortPulling(t *testing.T) {
|
||||
fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"}
|
||||
cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}}
|
||||
@@ -361,8 +360,8 @@ func TestLastResortPulling(t *testing.T) {
|
||||
pullChan := make(chan pullBlockState, 1)
|
||||
finisherChan := make(chan *sharedPullerState, 1)
|
||||
|
||||
// Run a single copier routine with checksumming enabled
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan, true)
|
||||
// Run a single copier routine
|
||||
go p.copierRoutine(copyChan, pullChan, finisherChan)
|
||||
|
||||
p.handleFile(file, copyChan, finisherChan)
|
||||
|
||||
|
||||
94
internal/model/queue.go
Normal file
94
internal/model/queue.go
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import "sync"
|
||||
|
||||
type jobQueue struct {
|
||||
progress []string
|
||||
queued []string
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func newJobQueue() *jobQueue {
|
||||
return &jobQueue{}
|
||||
}
|
||||
|
||||
func (q *jobQueue) Push(file string) {
|
||||
q.mut.Lock()
|
||||
q.queued = append(q.queued, file)
|
||||
q.mut.Unlock()
|
||||
}
|
||||
|
||||
func (q *jobQueue) Pop() (string, bool) {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
if len(q.queued) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var f string
|
||||
f = q.queued[0]
|
||||
q.queued = q.queued[1:]
|
||||
q.progress = append(q.progress, f)
|
||||
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (q *jobQueue) BringToFront(filename string) {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
for i, cur := range q.queued {
|
||||
if cur == filename {
|
||||
if i > 0 {
|
||||
// Shift the elements before the selected element one step to
|
||||
// the right, overwriting the selected element
|
||||
copy(q.queued[1:i+1], q.queued[0:])
|
||||
// Put the selected element at the front
|
||||
q.queued[0] = cur
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *jobQueue) Done(file string) {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
for i := range q.progress {
|
||||
if q.progress[i] == file {
|
||||
copy(q.progress[i:], q.progress[i+1:])
|
||||
q.progress = q.progress[:len(q.progress)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *jobQueue) Jobs() ([]string, []string) {
|
||||
q.mut.Lock()
|
||||
defer q.mut.Unlock()
|
||||
|
||||
progress := make([]string, len(q.progress))
|
||||
copy(progress, q.progress)
|
||||
|
||||
queued := make([]string, len(q.queued))
|
||||
copy(queued, q.queued)
|
||||
|
||||
return progress, queued
|
||||
}
|
||||
200
internal/model/queue_test.go
Normal file
200
internal/model/queue_test.go
Normal file
@@ -0,0 +1,200 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJobQueue(t *testing.T) {
|
||||
// Some random actions
|
||||
q := newJobQueue()
|
||||
q.Push("f1")
|
||||
q.Push("f2")
|
||||
q.Push("f3")
|
||||
q.Push("f4")
|
||||
|
||||
progress, queued := q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 4 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
for i := 1; i < 5; i++ {
|
||||
n, ok := q.Pop()
|
||||
if !ok || n != fmt.Sprintf("f%d", i) {
|
||||
t.Fatal("Wrong element")
|
||||
}
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 1 || len(queued) != 3 {
|
||||
t.Log(progress)
|
||||
t.Log(queued)
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
q.Done(n)
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 3 {
|
||||
t.Fatal("Wrong length", len(progress), len(queued))
|
||||
}
|
||||
|
||||
q.Push(n)
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 4 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
q.Done("f5") // Does not exist
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 4 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
}
|
||||
|
||||
if len(q.progress) > 0 || len(q.queued) != 4 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
for i := 4; i > 0; i-- {
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 4-i || len(queued) != i {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("f%d", i)
|
||||
|
||||
q.BringToFront(s)
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 4-i || len(queued) != i {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
n, ok := q.Pop()
|
||||
if !ok || n != s {
|
||||
t.Fatal("Wrong element")
|
||||
}
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 5-i || len(queued) != i-1 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
q.Done("f5") // Does not exist
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 5-i || len(queued) != i-1 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := q.Pop()
|
||||
if len(q.progress) != 4 || ok {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
q.Done("f1")
|
||||
q.Done("f2")
|
||||
q.Done("f3")
|
||||
q.Done("f4")
|
||||
q.Done("f5") // Does not exist
|
||||
|
||||
_, ok = q.Pop()
|
||||
if len(q.progress) != 0 || ok {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 0 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
q.BringToFront("")
|
||||
q.Done("f5") // Does not exist
|
||||
progress, queued = q.Jobs()
|
||||
if len(progress) != 0 || len(queued) != 0 {
|
||||
t.Fatal("Wrong length")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBringToFront(t *testing.T) {
|
||||
q := newJobQueue()
|
||||
q.Push("f1")
|
||||
q.Push("f2")
|
||||
q.Push("f3")
|
||||
q.Push("f4")
|
||||
|
||||
_, queued := q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
|
||||
t.Errorf("Incorrect order %v at start", queued)
|
||||
}
|
||||
|
||||
q.BringToFront("f1") // corner case: does nothing
|
||||
|
||||
_, queued = q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
|
||||
t.Errorf("Incorrect order %v", queued)
|
||||
}
|
||||
|
||||
q.BringToFront("f3")
|
||||
|
||||
_, queued = q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f3", "f1", "f2", "f4"}) {
|
||||
t.Errorf("Incorrect order %v", queued)
|
||||
}
|
||||
|
||||
q.BringToFront("f2")
|
||||
|
||||
_, queued = q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f2", "f3", "f1", "f4"}) {
|
||||
t.Errorf("Incorrect order %v", queued)
|
||||
}
|
||||
|
||||
q.BringToFront("f4") // corner case: last element
|
||||
|
||||
_, queued = q.Jobs()
|
||||
if !reflect.DeepEqual(queued, []string{"f4", "f2", "f3", "f1"}) {
|
||||
t.Errorf("Incorrect order %v", queued)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkJobQueueBump(b *testing.B) {
|
||||
files := genFiles(b.N)
|
||||
|
||||
q := newJobQueue()
|
||||
for _, f := range files {
|
||||
q.Push(f.Name)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
q.BringToFront(files[i].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkJobQueuePushPopDone10k(b *testing.B) {
|
||||
files := genFiles(10000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
q := newJobQueue()
|
||||
for _, f := range files {
|
||||
q.Push(f.Name)
|
||||
}
|
||||
for range files {
|
||||
n, _ := q.Pop()
|
||||
q.Done(n)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -17,6 +17,7 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -63,7 +64,9 @@ func (s *Scanner) Serve() {
|
||||
return
|
||||
}
|
||||
|
||||
timer.Reset(s.intv)
|
||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||
sleepNanos := (s.intv.Nanoseconds()*3 + rand.Int63n(2*s.intv.Nanoseconds())) / 4
|
||||
timer.Reset(time.Duration(sleepNanos) * time.Nanosecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,3 +78,9 @@ func (s *Scanner) Stop() {
|
||||
func (s *Scanner) String() string {
|
||||
return fmt.Sprintf("scanner/%s@%p", s.folder, s)
|
||||
}
|
||||
|
||||
func (s *Scanner) BringToFront(string) {}
|
||||
|
||||
func (s *Scanner) Jobs() ([]string, []string) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -110,6 +110,17 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
|
||||
flags := os.O_WRONLY
|
||||
if s.reused == 0 {
|
||||
flags |= os.O_CREATE | os.O_EXCL
|
||||
} else {
|
||||
// With sufficiently bad luck when exiting or crashing, we may have
|
||||
// had time to chmod the temp file to read only state but not yet
|
||||
// moved it to it's final name. This leaves us with a read only temp
|
||||
// file that we're going to try to reuse. To handle that, we need to
|
||||
// make sure we have write permissions on the file before opening it.
|
||||
err := os.Chmod(s.tempName, 0644)
|
||||
if err != nil {
|
||||
s.earlyCloseLocked("dst create chmod", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fd, err := os.OpenFile(s.tempName, flags, 0644)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,17 @@ func (f FileInfo) HasPermissionBits() bool {
|
||||
return f.Flags&FlagNoPermBits == 0
|
||||
}
|
||||
|
||||
func (f FileInfo) ToTruncated() FileInfoTruncated {
|
||||
return FileInfoTruncated{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: f.Modified,
|
||||
Version: f.Version,
|
||||
LocalVersion: f.LocalVersion,
|
||||
NumBlocks: uint32(len(f.Blocks)),
|
||||
}
|
||||
}
|
||||
|
||||
// Used for unmarshalling a FileInfo structure but skipping the actual block list
|
||||
type FileInfoTruncated struct {
|
||||
Name string // max:8192
|
||||
|
||||
@@ -130,6 +130,24 @@ func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyBuffer(buf []byte, block protocol.BlockInfo) ([]byte, error) {
|
||||
if len(buf) != int(block.Size) {
|
||||
return nil, fmt.Errorf("length mismatch %d != %d", len(buf), block.Size)
|
||||
}
|
||||
hf := sha256.New()
|
||||
_, err := hf.Write(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := hf.Sum(nil)
|
||||
|
||||
if !bytes.Equal(hash, block.Hash) {
|
||||
return hash, fmt.Errorf("hash mismatch %x != %x", hash, block.Hash)
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// BlockEqual returns whether two slices of blocks are exactly the same hash
|
||||
// and index pair wise.
|
||||
func BlocksEqual(src, tgt []protocol.BlockInfo) bool {
|
||||
|
||||
@@ -143,26 +143,27 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
// Index wise symlinks are always files, regardless of what the target
|
||||
// is, because symlinks carry their target path as their content.
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
var rval error
|
||||
// If the target is a directory, do NOT descend down there.
|
||||
// This will cause files to get tracked, and removing the symlink
|
||||
// will as a result remove files in their real location.
|
||||
// But do not SkipDir if the target is not a directory, as it will
|
||||
// stop scanning the current directory.
|
||||
// If the target is a directory, do NOT descend down there. This
|
||||
// will cause files to get tracked, and removing the symlink will
|
||||
// as a result remove files in their real location. But do not
|
||||
// SkipDir if the target is not a directory, as it will stop
|
||||
// scanning the current directory.
|
||||
if info.IsDir() {
|
||||
rval = filepath.SkipDir
|
||||
}
|
||||
|
||||
// We always rehash symlinks as they have no modtime or
|
||||
// permissions.
|
||||
// We check if they point to the old target by checking that
|
||||
// their existing blocks match with the blocks in the index.
|
||||
// If we don't have a filer or don't support symlinks, skip.
|
||||
if w.CurrentFiler == nil || !symlinks.Supported {
|
||||
// If we don't support symlinks, skip.
|
||||
if !symlinks.Supported {
|
||||
return rval
|
||||
}
|
||||
|
||||
// We always rehash symlinks as they have no modtime or
|
||||
// permissions. We check if they point to the old target by
|
||||
// checking that their existing blocks match with the blocks in
|
||||
// the index.
|
||||
|
||||
target, flags, err := symlinks.Read(p)
|
||||
flags = flags & protocol.SymlinkTypeMask
|
||||
if err != nil {
|
||||
@@ -180,9 +181,17 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
return rval
|
||||
}
|
||||
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
if !cf.IsDeleted() && cf.IsSymlink() && SymlinkTypeEqual(flags, cf.Flags) && BlocksEqual(cf.Blocks, blocks) {
|
||||
return rval
|
||||
if w.CurrentFiler != nil {
|
||||
// A symlink is "unchanged", if
|
||||
// - it wasn't deleted (because it isn't now)
|
||||
// - it was a symlink
|
||||
// - it wasn't invalid
|
||||
// - the symlink type (file/dir) was the same
|
||||
// - the block list (i.e. hash of target) was the same
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
if !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(flags, cf.Flags) && BlocksEqual(cf.Blocks, blocks) {
|
||||
return rval
|
||||
}
|
||||
}
|
||||
|
||||
f := protocol.FileInfo{
|
||||
@@ -204,9 +213,15 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
if info.Mode().IsDir() {
|
||||
if w.CurrentFiler != nil {
|
||||
// A directory is "unchanged", if it
|
||||
// - has the same permissions as previously, unless we are ignoring permissions
|
||||
// - was not marked deleted (since it apparently exists now)
|
||||
// - was a directory previously (not a file or something else)
|
||||
// - was not a symlink (since it's a directory now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if !cf.IsDeleted() && cf.IsDirectory() && permUnchanged && !cf.IsSymlink() {
|
||||
if permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -232,9 +247,18 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
if info.Mode().IsRegular() {
|
||||
if w.CurrentFiler != nil {
|
||||
// A file is "unchanged", if it
|
||||
// - has the same permissions as previously, unless we are ignoring permissions
|
||||
// - was not marked deleted (since it apparently exists now)
|
||||
// - had the same modification time as it has now
|
||||
// - was not a directory previously (since it's a file now)
|
||||
// - was not a symlink (since it's a file now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
// - has the same size as previously
|
||||
cf := w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if !cf.IsDeleted() && cf.Modified == info.ModTime().Unix() && permUnchanged {
|
||||
if permUnchanged && !cf.IsDeleted() && cf.Modified == info.ModTime().Unix() && !cf.IsDirectory() &&
|
||||
!cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,8 @@ func init() {
|
||||
}
|
||||
|
||||
func TestWalkSub(t *testing.T) {
|
||||
ignores, err := ignore.Load("testdata/.stignore", false)
|
||||
ignores := ignore.New(false)
|
||||
err := ignores.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -93,7 +94,8 @@ func TestWalkSub(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
ignores, err := ignore.Load("testdata/.stignore", false)
|
||||
ignores := ignore.New(false)
|
||||
err := ignores.Load("testdata/.stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -67,8 +67,38 @@ func To(rel Release) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns 1 if a>b, -1 if a<b and 0 if they are equal
|
||||
func CompareVersions(a, b string) int {
|
||||
// A wrapper around actual implementations
|
||||
func ToURL(url string) error {
|
||||
select {
|
||||
case <-upgradeUnlocked:
|
||||
path, err := osext.Executable()
|
||||
if err != nil {
|
||||
upgradeUnlocked <- true
|
||||
return err
|
||||
}
|
||||
err = upgradeToURL(path, url)
|
||||
// If we've failed to upgrade, unlock so that another attempt could be made
|
||||
if err != nil {
|
||||
upgradeUnlocked <- true
|
||||
}
|
||||
return err
|
||||
default:
|
||||
return ErrUpgradeInProgress
|
||||
}
|
||||
}
|
||||
|
||||
type Relation int
|
||||
|
||||
const (
|
||||
MajorOlder Relation = -2 // Older by a major version (x in x.y.z or 0.x.y).
|
||||
Older = -1 // Older by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
Equal = 0 // Versions are semantically equal
|
||||
Newer = 1 // Newer by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
MajorNewer = 2 // Newer by a major version (x in x.y.z or 0.x.y).
|
||||
)
|
||||
|
||||
// Returns a relation describing how a compares to b.
|
||||
func CompareVersions(a, b string) Relation {
|
||||
arel, apre := versionParts(a)
|
||||
brel, bpre := versionParts(b)
|
||||
|
||||
@@ -80,27 +110,39 @@ func CompareVersions(a, b string) int {
|
||||
// First compare major-minor-patch versions
|
||||
for i := 0; i < minlen; i++ {
|
||||
if arel[i] < brel[i] {
|
||||
return -1
|
||||
if i == 0 {
|
||||
return MajorOlder
|
||||
}
|
||||
if i == 1 && arel[0] == 0 {
|
||||
return MajorOlder
|
||||
}
|
||||
return Older
|
||||
}
|
||||
if arel[i] > brel[i] {
|
||||
return 1
|
||||
if i == 0 {
|
||||
return MajorNewer
|
||||
}
|
||||
if i == 1 && arel[0] == 0 {
|
||||
return MajorNewer
|
||||
}
|
||||
return Newer
|
||||
}
|
||||
}
|
||||
|
||||
// Longer version is newer, when the preceding parts are equal
|
||||
if len(arel) < len(brel) {
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
if len(arel) > len(brel) {
|
||||
return 1
|
||||
return Newer
|
||||
}
|
||||
|
||||
// Prerelease versions are older, if the versions are the same
|
||||
if len(apre) == 0 && len(bpre) > 0 {
|
||||
return 1
|
||||
return Newer
|
||||
}
|
||||
if len(apre) > 0 && len(bpre) == 0 {
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
|
||||
minlen = len(apre)
|
||||
@@ -115,24 +157,24 @@ func CompareVersions(a, b string) int {
|
||||
switch bv := bpre[i].(type) {
|
||||
case int:
|
||||
if av < bv {
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
if av > bv {
|
||||
return 1
|
||||
return Newer
|
||||
}
|
||||
case string:
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
case string:
|
||||
switch bv := bpre[i].(type) {
|
||||
case int:
|
||||
return 1
|
||||
return Newer
|
||||
case string:
|
||||
if av < bv {
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
if av > bv {
|
||||
return 1
|
||||
return Newer
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,14 +182,14 @@ func CompareVersions(a, b string) int {
|
||||
|
||||
// If all else is equal, longer prerelease string is newer
|
||||
if len(apre) < len(bpre) {
|
||||
return -1
|
||||
return Older
|
||||
}
|
||||
if len(apre) > len(bpre) {
|
||||
return 1
|
||||
return Newer
|
||||
}
|
||||
|
||||
// Looks like they're actually the same
|
||||
return 0
|
||||
return Equal
|
||||
}
|
||||
|
||||
// Split a version into parts.
|
||||
|
||||
@@ -13,13 +13,16 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !windows,!noupgrade
|
||||
// +build !noupgrade
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -28,43 +31,10 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
||||
func upgradeTo(path string, rel Release) error {
|
||||
expectedRelease := releaseName(rel.Tag)
|
||||
if debug {
|
||||
l.Debugf("expected release asset %q", expectedRelease)
|
||||
}
|
||||
for _, asset := range rel.Assets {
|
||||
if debug {
|
||||
l.Debugln("considering release", asset)
|
||||
}
|
||||
if strings.HasPrefix(asset.Name, expectedRelease) {
|
||||
if strings.HasSuffix(asset.Name, ".tar.gz") {
|
||||
fname, err := readTarGZ(asset.URL, filepath.Dir(path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old := path + ".old"
|
||||
err = os.Rename(path, old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Rename(fname, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ErrVersionUnknown
|
||||
}
|
||||
|
||||
// Returns the latest release, including prereleases or not depending on the argument
|
||||
func LatestRelease(prerelease bool) (Release, error) {
|
||||
resp, err := http.Get("https://api.github.com/repos/syncthing/syncthing/releases?per_page=10")
|
||||
@@ -97,7 +67,47 @@ func LatestRelease(prerelease bool) (Release, error) {
|
||||
return Release{}, ErrVersionUnknown
|
||||
}
|
||||
|
||||
func readTarGZ(url string, dir string) (string, error) {
|
||||
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
||||
func upgradeTo(binary string, rel Release) error {
|
||||
expectedRelease := releaseName(rel.Tag)
|
||||
if debug {
|
||||
l.Debugf("expected release asset %q", expectedRelease)
|
||||
}
|
||||
for _, asset := range rel.Assets {
|
||||
assetName := path.Base(asset.Name)
|
||||
if debug {
|
||||
l.Debugln("considering release", assetName)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(assetName, expectedRelease) {
|
||||
upgradeToURL(binary, asset.URL)
|
||||
}
|
||||
}
|
||||
|
||||
return ErrVersionUnknown
|
||||
}
|
||||
|
||||
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
||||
func upgradeToURL(binary string, url string) error {
|
||||
fname, err := readRelease(filepath.Dir(binary), url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old := binary + ".old"
|
||||
_ = os.Remove(old)
|
||||
err = os.Rename(binary, old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Rename(fname, binary)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readRelease(dir, url string) (string, error) {
|
||||
if debug {
|
||||
l.Debugf("loading %q", url)
|
||||
}
|
||||
@@ -114,17 +124,26 @@ func readTarGZ(url string, dir string) (string, error) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
gr, err := gzip.NewReader(resp.Body)
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return readZip(dir, resp.Body)
|
||||
default:
|
||||
return readTarGz(dir, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
func readTarGz(dir string, r io.Reader) (string, error) {
|
||||
gr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tr := tar.NewReader(gr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var tempName, actualMD5, expectedMD5 string
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
fileLoop:
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -134,26 +153,177 @@ func readTarGZ(url string, dir string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
shortName := path.Base(hdr.Name)
|
||||
|
||||
if debug {
|
||||
l.Debugf("considering file %q", hdr.Name)
|
||||
l.Debugf("considering file %q", shortName)
|
||||
}
|
||||
|
||||
if path.Base(hdr.Name) == "syncthing" {
|
||||
of, err := ioutil.TempFile(dir, "syncthing")
|
||||
switch shortName {
|
||||
case "syncthing":
|
||||
if debug {
|
||||
l.Debugln("writing and hashing binary")
|
||||
}
|
||||
tempName, actualMD5, err = writeBinary(dir, tr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
io.Copy(of, tr)
|
||||
err = of.Close()
|
||||
|
||||
if expectedMD5 != "" {
|
||||
// We're done
|
||||
break fileLoop
|
||||
}
|
||||
|
||||
case "syncthing.md5":
|
||||
bs, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
os.Remove(of.Name())
|
||||
return "", err
|
||||
}
|
||||
|
||||
os.Chmod(of.Name(), os.FileMode(hdr.Mode))
|
||||
return of.Name(), nil
|
||||
expectedMD5 = strings.TrimSpace(string(bs))
|
||||
if debug {
|
||||
l.Debugln("expected md5 is", actualMD5)
|
||||
}
|
||||
|
||||
if actualMD5 != "" {
|
||||
// We're done
|
||||
break fileLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tempName != "" && actualMD5 != "" {
|
||||
// We found and saved something to disk.
|
||||
if expectedMD5 == "" {
|
||||
if debug {
|
||||
l.Debugln("there is no md5 to compare with")
|
||||
}
|
||||
} else if actualMD5 != expectedMD5 {
|
||||
// There was an md5 file included in the archive, and it doesn't
|
||||
// match what we just wrote to disk.
|
||||
return "", fmt.Errorf("incorrect MD5 checksum")
|
||||
}
|
||||
return tempName, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no upgrade found")
|
||||
}
|
||||
|
||||
func readZip(dir string, r io.Reader) (string, error) {
|
||||
body, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
archive, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var tempName, actualMD5, expectedMD5 string
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
fileLoop:
|
||||
for _, file := range archive.File {
|
||||
shortName := path.Base(file.Name)
|
||||
|
||||
if debug {
|
||||
l.Debugf("considering file %q", shortName)
|
||||
}
|
||||
|
||||
switch shortName {
|
||||
case "syncthing.exe":
|
||||
if debug {
|
||||
l.Debugln("writing and hashing binary")
|
||||
}
|
||||
|
||||
inFile, err := file.Open()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
tempName, actualMD5, err = writeBinary(dir, inFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if expectedMD5 != "" {
|
||||
// We're done
|
||||
break fileLoop
|
||||
}
|
||||
|
||||
case "syncthing.exe.md5":
|
||||
inFile, err := file.Open()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
bs, err := ioutil.ReadAll(inFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
expectedMD5 = strings.TrimSpace(string(bs))
|
||||
if debug {
|
||||
l.Debugln("expected md5 is", actualMD5)
|
||||
}
|
||||
|
||||
if actualMD5 != "" {
|
||||
// We're done
|
||||
break fileLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tempName != "" && actualMD5 != "" {
|
||||
// We found and saved something to disk.
|
||||
if expectedMD5 == "" {
|
||||
if debug {
|
||||
l.Debugln("there is no md5 to compare with")
|
||||
}
|
||||
} else if actualMD5 != expectedMD5 {
|
||||
// There was an md5 file included in the archive, and it doesn't
|
||||
// match what we just wrote to disk.
|
||||
return "", fmt.Errorf("incorrect MD5 checksum")
|
||||
}
|
||||
return tempName, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("No upgrade found")
|
||||
}
|
||||
|
||||
func writeBinary(dir string, inFile io.Reader) (filename, md5sum string, err error) {
|
||||
outFile, err := ioutil.TempFile(dir, "syncthing")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Write the binary both a temporary file and to the MD5 hasher.
|
||||
|
||||
h := md5.New()
|
||||
mw := io.MultiWriter(h, outFile)
|
||||
|
||||
_, err = io.Copy(mw, inFile)
|
||||
if err != nil {
|
||||
os.Remove(outFile.Name())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
err = outFile.Close()
|
||||
if err != nil {
|
||||
os.Remove(outFile.Name())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
err = os.Chmod(outFile.Name(), os.FileMode(0755))
|
||||
if err != nil {
|
||||
os.Remove(outFile.Name())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
actualMD5 := fmt.Sprintf("%x", h.Sum(nil))
|
||||
if debug {
|
||||
l.Debugln("actual md5 is", actualMD5)
|
||||
}
|
||||
|
||||
return outFile.Name(), actualMD5, nil
|
||||
}
|
||||
|
||||
@@ -19,33 +19,37 @@ import "testing"
|
||||
|
||||
var testcases = []struct {
|
||||
a, b string
|
||||
r int
|
||||
r Relation
|
||||
}{
|
||||
{"0.1.2", "0.1.2", 0},
|
||||
{"0.1.3", "0.1.2", 1},
|
||||
{"0.1.1", "0.1.2", -1},
|
||||
{"0.3.0", "0.1.2", 1},
|
||||
{"0.0.9", "0.1.2", -1},
|
||||
{"1.1.2", "0.1.2", 1},
|
||||
{"0.1.2", "1.1.2", -1},
|
||||
{"0.1.10", "0.1.9", 1},
|
||||
{"0.10.0", "0.2.0", 1},
|
||||
{"30.10.0", "4.9.0", 1},
|
||||
{"0.9.0-beta7", "0.9.0-beta6", 1},
|
||||
{"1.0.0-alpha", "1.0.0-alpha.1", -1},
|
||||
{"1.0.0-alpha.1", "1.0.0-alpha.beta", -1},
|
||||
{"1.0.0-alpha.beta", "1.0.0-beta", -1},
|
||||
{"1.0.0-beta", "1.0.0-beta.2", -1},
|
||||
{"1.0.0-beta.2", "1.0.0-beta.11", -1},
|
||||
{"1.0.0-beta.11", "1.0.0-rc.1", -1},
|
||||
{"1.0.0-rc.1", "1.0.0", -1},
|
||||
{"1.0.0+45", "1.0.0+23-dev-foo", 0},
|
||||
{"1.0.0-beta.23+45", "1.0.0-beta.23+23-dev-foo", 0},
|
||||
{"1.0.0-beta.3+99", "1.0.0-beta.24+0", -1},
|
||||
{"0.1.2", "0.1.2", Equal},
|
||||
{"0.1.3", "0.1.2", Newer},
|
||||
{"0.1.1", "0.1.2", Older},
|
||||
{"0.3.0", "0.1.2", MajorNewer},
|
||||
{"0.0.9", "0.1.2", MajorOlder},
|
||||
{"1.3.0", "1.1.2", Newer},
|
||||
{"1.0.9", "1.1.2", Older},
|
||||
{"2.3.0", "1.1.2", MajorNewer},
|
||||
{"1.0.9", "2.1.2", MajorOlder},
|
||||
{"1.1.2", "0.1.2", MajorNewer},
|
||||
{"0.1.2", "1.1.2", MajorOlder},
|
||||
{"0.1.10", "0.1.9", Newer},
|
||||
{"0.10.0", "0.2.0", MajorNewer},
|
||||
{"30.10.0", "4.9.0", MajorNewer},
|
||||
{"0.9.0-beta7", "0.9.0-beta6", Newer},
|
||||
{"1.0.0-alpha", "1.0.0-alpha.1", Older},
|
||||
{"1.0.0-alpha.1", "1.0.0-alpha.beta", Older},
|
||||
{"1.0.0-alpha.beta", "1.0.0-beta", Older},
|
||||
{"1.0.0-beta", "1.0.0-beta.2", Older},
|
||||
{"1.0.0-beta.2", "1.0.0-beta.11", Older},
|
||||
{"1.0.0-beta.11", "1.0.0-rc.1", Older},
|
||||
{"1.0.0-rc.1", "1.0.0", Older},
|
||||
{"1.0.0+45", "1.0.0+23-dev-foo", Equal},
|
||||
{"1.0.0-beta.23+45", "1.0.0-beta.23+23-dev-foo", Equal},
|
||||
{"1.0.0-beta.3+99", "1.0.0-beta.24+0", Older},
|
||||
|
||||
{"v1.1.2", "1.1.2", 0},
|
||||
{"v1.1.2", "V1.1.2", 0},
|
||||
{"1.1.2", "V1.1.2", 0},
|
||||
{"v1.1.2", "1.1.2", Equal},
|
||||
{"v1.1.2", "V1.1.2", Equal},
|
||||
{"1.1.2", "V1.1.2", Equal},
|
||||
}
|
||||
|
||||
func TestCompareVersions(t *testing.T) {
|
||||
|
||||
@@ -17,7 +17,11 @@
|
||||
|
||||
package upgrade
|
||||
|
||||
func upgradeTo(path string, rel Release) error {
|
||||
func upgradeTo(binary string, rel Release) error {
|
||||
return ErrUpgradeUnsupported
|
||||
}
|
||||
|
||||
func upgradeToURL(binary, url string) error {
|
||||
return ErrUpgradeUnsupported
|
||||
}
|
||||
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build windows,!noupgrade
|
||||
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
||||
func upgradeTo(path string, rel Release) error {
|
||||
expectedRelease := releaseName(rel.Tag)
|
||||
if debug {
|
||||
l.Debugf("expected release asset %q", expectedRelease)
|
||||
}
|
||||
for _, asset := range rel.Assets {
|
||||
if debug {
|
||||
l.Debugln("considering release", asset)
|
||||
}
|
||||
if strings.HasPrefix(asset.Name, expectedRelease) {
|
||||
if strings.HasSuffix(asset.Name, ".zip") {
|
||||
fname, err := readZip(asset.URL, filepath.Dir(path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old := path + ".old"
|
||||
|
||||
os.Remove(old)
|
||||
err = os.Rename(path, old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Rename(fname, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ErrVersionUnknown
|
||||
}
|
||||
|
||||
// Returns the latest release, including prereleases or not depending on the argument
|
||||
func LatestRelease(prerelease bool) (Release, error) {
|
||||
resp, err := http.Get("https://api.github.com/repos/syncthing/syncthing/releases?per_page=10")
|
||||
if err != nil {
|
||||
return Release{}, err
|
||||
}
|
||||
if resp.StatusCode > 299 {
|
||||
return Release{}, fmt.Errorf("API call returned HTTP error: %s", resp.Status)
|
||||
}
|
||||
|
||||
var rels []Release
|
||||
json.NewDecoder(resp.Body).Decode(&rels)
|
||||
resp.Body.Close()
|
||||
|
||||
if len(rels) == 0 {
|
||||
return Release{}, ErrVersionUnknown
|
||||
}
|
||||
|
||||
if prerelease {
|
||||
// We are a beta version. Use the latest.
|
||||
return rels[0], nil
|
||||
} else {
|
||||
// We are a regular release. Only consider non-prerelease versions for upgrade.
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
return rel, nil
|
||||
}
|
||||
}
|
||||
return Release{}, ErrVersionUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func readZip(url, dir string) (string, error) {
|
||||
if debug {
|
||||
l.Debugf("loading %q", url)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Add("Accept", "application/octet-stream")
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
archive, err := zip.NewReader(bytes.NewReader(body), resp.ContentLength)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for _, file := range archive.File {
|
||||
|
||||
if debug {
|
||||
l.Debugf("considering file %q", file.Name)
|
||||
}
|
||||
|
||||
if path.Base(file.Name) == "syncthing.exe" {
|
||||
infile, err := file.Open()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
outfile, err := ioutil.TempFile(dir, "syncthing")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = io.Copy(outfile, infile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = infile.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = outfile.Close()
|
||||
if err != nil {
|
||||
os.Remove(outfile.Name())
|
||||
return "", err
|
||||
}
|
||||
|
||||
os.Chmod(outfile.Name(), file.Mode())
|
||||
return outfile.Name(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("No upgrade found")
|
||||
}
|
||||
@@ -158,7 +158,7 @@ Mx: %d
|
||||
var results []IGD
|
||||
resultChannel := make(chan IGD, 8)
|
||||
|
||||
socket, err := net.ListenUDP("udp4", &net.UDPAddr{})
|
||||
socket, err := net.ListenMulticastUDP("udp4", nil, &net.UDPAddr{IP: ssdp.IP})
|
||||
if err != nil {
|
||||
l.Infoln(err)
|
||||
return results
|
||||
|
||||
@@ -232,7 +232,9 @@ func (v Staggered) expire(versions []string) {
|
||||
|
||||
versionTime, err := time.Parse(TimeFormat, filenameTag(file))
|
||||
if err != nil {
|
||||
l.Infof("Versioner: file name %q is invalid: %v", file, err)
|
||||
if debug {
|
||||
l.Debugf("Versioner: file name %q is invalid: %v", file, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
age := int64(time.Since(versionTime).Seconds())
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
Device Discovery Protocol v2
|
||||
==========================
|
||||
|
||||
Mode of Operation
|
||||
-----------------
|
||||
|
||||
There are two distinct modes: "local discovery", performed on a LAN
|
||||
segment (broadcast domain) and "global discovery" performed over the
|
||||
Internet in general with the support of a well known server.
|
||||
|
||||
Local discovery does not use Query packets. Instead Announcement packets
|
||||
are sent periodically and each participating device keeps a table of the
|
||||
announcements it has seen. On multihomed hosts the announcement packets
|
||||
should be sent on each interface that syncthing will accept connections.
|
||||
|
||||
It is recommended that local discovery Announcement packets are sent on
|
||||
a 30 to 60 second interval, possibly with forced transmissions when a
|
||||
previously unknown device is discovered.
|
||||
|
||||
Global discovery is made possible by periodically updating a global server
|
||||
using Announcement packets indentical to those transmitted for local
|
||||
discovery. The device performing discovery will transmit a Query packet to
|
||||
the global server and expect an Announcement packet in response. In case
|
||||
the global server has no knowledge of the queried device ID, there will be
|
||||
no response. A timeout is to be used to determine lookup failure.
|
||||
|
||||
There is no message to unregister from the global server; instead
|
||||
registrations are forgotten after 60 minutes. It is recommended to
|
||||
send Announcement packets to the global server on a 30 minute interval.
|
||||
|
||||
Packet Formats
|
||||
--------------
|
||||
|
||||
The Announcement packet has the following structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic (0x9D79BC39) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Device Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Extra Devices |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Device Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
Device Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Addresses |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Address Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
Address Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of IP |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ IP (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Port | 0x0000 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
This is the XDR encoding of:
|
||||
|
||||
struct Announcement {
|
||||
unsigned int Magic;
|
||||
Device This;
|
||||
Device Extra<>;
|
||||
}
|
||||
|
||||
struct Device {
|
||||
string ID<>;
|
||||
Address Addresses<>;
|
||||
}
|
||||
|
||||
struct Address {
|
||||
opaque IP<>;
|
||||
unsigned short Port;
|
||||
}
|
||||
|
||||
The first Device structure contains information about the sending device.
|
||||
The following zero or more Extra devices contain information about other
|
||||
devices known to the sending device.
|
||||
|
||||
In the Address structure, the IP field can be of three differnt kinds;
|
||||
|
||||
- A zero length indicates that the IP address should be taken from the
|
||||
source address of the announcement packet, be it IPv4 or IPv6. The
|
||||
source address must be a valid unicast address. This is only valid
|
||||
in the first device structure, not in the list of extras.
|
||||
|
||||
- A four byte length indicates that the address is an IPv4 unicast
|
||||
address.
|
||||
|
||||
- A sixteen byte length indicates that the address is an IPv6 unicast
|
||||
address.
|
||||
|
||||
The Query packet has the following structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic Number (0x2CA856F5) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Device ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Device ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
This is the XDR encoding of:
|
||||
|
||||
struct Announcement {
|
||||
unsigned int MagicNumber;
|
||||
string DeviceID<>;
|
||||
}
|
||||
@@ -1,723 +0,0 @@
|
||||
Block Exchange Protocol v1
|
||||
==========================
|
||||
|
||||
Introduction and Definitions
|
||||
----------------------------
|
||||
|
||||
BEP is used between two or more _devices_ thus forming a _cluster_. Each
|
||||
device has one or more _folders_ of files described by the _local
|
||||
model_, containing metadata and block hashes. The local model is sent to
|
||||
the other devices in the cluster. The union of all files in the local
|
||||
models, with files selected for highest change version, forms the
|
||||
_global model_. Each device strives to get its folders in sync with
|
||||
the global model by requesting missing or outdated blocks from the other
|
||||
devices in the cluster.
|
||||
|
||||
File data is described and transferred in units of _blocks_, each being
|
||||
128 KiB (131072 bytes) in size.
|
||||
|
||||
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
|
||||
NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
|
||||
"OPTIONAL" in this document are to be interpreted as described in
|
||||
RFC 2119.
|
||||
|
||||
Transport and Authentication
|
||||
----------------------------
|
||||
|
||||
BEP is deployed as the highest level in a protocol stack, with the lower
|
||||
level protocols providing encryption and authentication.
|
||||
|
||||
+-----------------------------|
|
||||
| Block Exchange Protocol |
|
||||
|-----------------------------|
|
||||
| Encryption & Auth (TLS 1.2) |
|
||||
|-----------------------------|
|
||||
| TCP |
|
||||
|-----------------------------|
|
||||
v ... v
|
||||
|
||||
The encryption and authentication layer SHALL use TLS 1.2 or a higher
|
||||
revision. A strong cipher suite SHALL be used, with "strong cipher
|
||||
suite" being defined as being without known weaknesses and providing
|
||||
Perfect Forward Secrecy (PFS). Examples of strong cipher suites are
|
||||
given at the end of this document. This is not to be taken as an
|
||||
exhaustive list of allowed cipher suites but represents best practices
|
||||
at the time of writing.
|
||||
|
||||
The exact nature of the authentication is up to the application, however
|
||||
it SHALL be based on the TLS certificate presented at the start of the
|
||||
connection. Possibilities include certificates signed by a common
|
||||
trusted CA, preshared certificates, preshared certificate fingerprints
|
||||
or certificate pinning combined with some out of band first
|
||||
verification. The reference implementation uses preshared certificate
|
||||
fingerprints (SHA-256) referred to as "Device IDs".
|
||||
|
||||
There is no required order or synchronization among BEP messages except
|
||||
as noted per message type - any message type may be sent at any time and
|
||||
the sender need not await a response to one message before sending
|
||||
another. Responses MUST however be sent in the same order as the
|
||||
requests are received.
|
||||
|
||||
The underlying transport protocol MUST be TCP.
|
||||
|
||||
Messages
|
||||
--------
|
||||
|
||||
Every message starts with one 32 bit word indicating the message
|
||||
version, type and ID, followed by the length of the message. The header
|
||||
is in network byte order, i.e. big endian.
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Ver | Message ID | Type | Reserved |C|
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
For BEP v1 the Version field is set to zero. Future versions with
|
||||
incompatible message formats will increment the Version field. A message
|
||||
with an unknown version is a protocol error and MUST result in the
|
||||
connection being terminated. A client supporting multiple versions MAY
|
||||
retry with a different protocol version upon disconnection.
|
||||
|
||||
The Message ID is set to a unique value for each transmitted request
|
||||
message. In response messages it is set to the Message ID of the
|
||||
corresponding request message. The uniqueness requirement implies that
|
||||
no more than 4096 messages may be outstanding at any given moment. The
|
||||
ordering requirement implies that a response to a given message ID also
|
||||
means that all preceding messages have been received, specifically those
|
||||
which do not otherwise demand a response. Hence their message ID:s may
|
||||
be reused.
|
||||
|
||||
The Type field indicates the type of data following the message header
|
||||
and is one of the integers defined below. A message of an unknown type
|
||||
is a protocol error and MUST result in the connection being terminated.
|
||||
|
||||
The Compression bit "C" indicates the compression used for the message.
|
||||
|
||||
For C=1:
|
||||
|
||||
* The Length field contains the length, in bytes, of the
|
||||
compressed message data plus a four byte uncompressed length field.
|
||||
|
||||
* The compressed message data is preceeded by a 32 bit field denoting
|
||||
the length of the uncompressed message.
|
||||
|
||||
* The message data is compressed using the LZ4 format and algorithm
|
||||
described in https://code.google.com/p/lz4/.
|
||||
|
||||
For C=0:
|
||||
|
||||
* The Length field contains the length, in bytes, of the
|
||||
uncompressed message data.
|
||||
|
||||
* The message is not compressed.
|
||||
|
||||
All data within the message (post decompression, if compression is
|
||||
in use) MUST be in XDR (RFC 1014) encoding. All fields shorter than 32
|
||||
bits and all variable length data MUST be padded to a multiple of 32
|
||||
bits. The actual data types in use by BEP, in XDR naming convention, are
|
||||
the following:
|
||||
|
||||
- (unsigned) int -- (unsigned) 32 bit integer
|
||||
- (unsigned) hyper -- (unsigned) 64 bit integer
|
||||
- opaque<> -- variable length opaque data
|
||||
- string<> -- variable length string
|
||||
|
||||
The transmitted length of string and opaque data is the length of actual
|
||||
data, excluding any added padding. The encoding of opaque<> and string<>
|
||||
are identical, the distinction being solely one of interpretation.
|
||||
Opaque data should not be interpreted but can be compared bytewise to
|
||||
other opaque data. All strings MUST use the Unicode UTF-8 encoding,
|
||||
normalization form C.
|
||||
|
||||
### Cluster Config (Type = 0)
|
||||
|
||||
This informational message provides information about the cluster
|
||||
configuration as it pertains to the current connection. A Cluster Config
|
||||
message MUST be the first message sent on a BEP connection. Additional
|
||||
Cluster Config messages MUST NOT be sent after the initial exchange.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
ClusterConfigMessage Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of ClientName |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ ClientName (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of ClientVersion |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ ClientVersion (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Folders |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Folder Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Options |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Option Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Folder Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Devices |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Device Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Device Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Max Local Version (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
Option Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Key |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Key (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Value (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The ClientName and ClientVersion fields identify the implementation. The
|
||||
values SHOULD be simple strings identifying the implementation name, as
|
||||
a user would expect to see it, and the version string in the same
|
||||
manner. An example ClientName is "syncthing" and an example
|
||||
ClientVersion is "v0.7.2". The ClientVersion field SHOULD follow the
|
||||
patterns laid out in the [Semantic Versioning](http://semver.org/)
|
||||
standard.
|
||||
|
||||
The Folders field lists all folders that will be synchronized
|
||||
over the current connection. Each folder has a list of participating
|
||||
Devices. Each device has an associated Flags field to indicate the sharing
|
||||
mode of that device for the folder in question. See the discussion on
|
||||
Sharing Modes.
|
||||
|
||||
The Device Flags field contains the following single bit flags:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |Pri| Reserved |I|R|T|
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
- Bit 31 ("T", Trusted) is set for devices that participate in trusted
|
||||
mode.
|
||||
|
||||
- Bit 30 ("R", Read Only) is set for devices that participate in read
|
||||
only mode.
|
||||
|
||||
- Bit 29 ("I", Introducer) is set for devices that are trusted as cluster
|
||||
introducers.
|
||||
|
||||
- Bits 16 through 28 are reserved and MUST be set to zero.
|
||||
|
||||
- Bits 14-15 ("Pri) indicate the device's upload priority for this
|
||||
folder. Possible values are:
|
||||
|
||||
- 00: The default. Normal priority.
|
||||
|
||||
- 01: High priority. Other devices SHOULD favour requesting files from
|
||||
this device over devices with normal or low priority.
|
||||
|
||||
- 10: Low priority. Other devices SHOULD avoid requesting files from
|
||||
this device when they are available from other devices.
|
||||
|
||||
- 11: Sharing disabled. Other devices SHOULD NOT request files from
|
||||
this device.
|
||||
|
||||
- Bits 0 through 14 are reserved and MUST be set to zero.
|
||||
|
||||
Exactly one of the T and R bits MUST be set.
|
||||
|
||||
The per device Max Local Version field contains the highest local file
|
||||
version number of the files already known to be in the index sent by
|
||||
this device. If nothing is known about the index of a given device, this
|
||||
field MUST be set to zero. When receiving a Cluster Config message with
|
||||
a non-zero Max Local Version for the local device ID, a device MAY elect to
|
||||
send an Index Update message containing only files with higher local
|
||||
version numbers in place of the initial Index message.
|
||||
|
||||
The Options field contain option values to be used in an implementation
|
||||
specific manner. The options list is conceptually a map of Key => Value
|
||||
items, although it is transmitted in the form of a list of (Key, Value)
|
||||
pairs, both of string type. Key ID:s are implementation specific. An
|
||||
implementation MUST ignore unknown keys. An implementation MAY impose
|
||||
limits on the length keys and values. The options list may be used to
|
||||
inform devices of relevant local configuration options such as rate
|
||||
limiting or make recommendations about request parallelism, device
|
||||
priorities, etc. An empty options list is valid for devices not having any
|
||||
such information to share. Devices MAY NOT make any assumptions about
|
||||
peers acting in a specific manner as a result of sent options.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct ClusterConfigMessage {
|
||||
string ClientName<>;
|
||||
string ClientVersion<>;
|
||||
Folder Folders<>;
|
||||
Option Options<>;
|
||||
}
|
||||
|
||||
struct Folder {
|
||||
string ID<>;
|
||||
Device Devices<>;
|
||||
}
|
||||
|
||||
struct Device {
|
||||
string ID<>;
|
||||
unsigned int Flags;
|
||||
unsigned hyper MaxLocalVersion;
|
||||
}
|
||||
|
||||
struct Option {
|
||||
string Key<>;
|
||||
string Value<>;
|
||||
}
|
||||
|
||||
### Index (Type = 1) and Index Update (Type = 6)
|
||||
|
||||
The Index and Index Update messages define the contents of the senders
|
||||
folder. An Index message represents the full contents of the
|
||||
folder and thus supersedes any previous index. An Index Update
|
||||
amends an existing index with new information, not affecting any entries
|
||||
not included in the message. An Index Update MAY NOT be sent unless
|
||||
preceded by an Index, unless a non-zero Max Local Version has been
|
||||
announced for the given folder by the peer device.
|
||||
|
||||
An Index or Index Update message MUST be sent for each folder
|
||||
included in the Cluster Config message, and MUST be sent before any
|
||||
other message referring to that folder. A device with no data to
|
||||
advertise MUST send an empty Index message (a file list of zero length).
|
||||
If the folder contents change from non-empty to empty, an empty
|
||||
Index message MUST be sent. There is no response to the Index message.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
IndexMessage Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Folder |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Folder (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Files |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more FileInfo Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
FileInfo Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Modified (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Version (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Local Version (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Blocks |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more BlockInfo Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
BlockInfo Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Size |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Hash |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Hash (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Folder field identifies the folder that the index message
|
||||
pertains to. For single folder implementations the device MAY send an
|
||||
empty folder ID or use the string "default".
|
||||
|
||||
The Name is the file name path relative to the folder root. Like all
|
||||
strings in BEP, the Name is always in UTF-8 NFC regardless of operating
|
||||
system or file system specific conventions. The Name field uses the
|
||||
slash character ("/") as path separator, regardless of the
|
||||
implementation's operating system conventions. The combination of
|
||||
Folder and Name uniquely identifies each file in a cluster.
|
||||
|
||||
The Version field is the value of a cluster wide Lamport clock
|
||||
indicating when the change was detected. The clock ticks on every
|
||||
detected and received change. The combination of Folder, Name and
|
||||
Version uniquely identifies the contents of a file at a given point in
|
||||
time.
|
||||
|
||||
The Local Version field is the value of a device local monotonic clock at
|
||||
the time of last local database update to a file. The clock ticks on
|
||||
every local database update.
|
||||
|
||||
The Flags field is made up of the following single bit flags:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |U|S|P|I|D| Unix Perm. & Mode |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
- The lower 12 bits hold the common Unix permission and mode bits. An
|
||||
implementation MAY ignore or interpret these as is suitable on the host
|
||||
operating system.
|
||||
|
||||
- Bit 19 ("D") is set when the file has been deleted. The block list
|
||||
SHALL be of length zero and the modification time indicates the time
|
||||
of deletion or, if the time of deletion is not reliably determinable,
|
||||
the last known modification time.
|
||||
|
||||
- Bit 18 ("I") is set when the file is invalid and unavailable for
|
||||
synchronization. A peer MAY set this bit to indicate that it can
|
||||
temporarily not serve data for the file.
|
||||
|
||||
- Bit 17 ("P") is set when there is no permission information for the
|
||||
file. This is the case when it originates on a non-permission-
|
||||
supporting file system. Changes to only permission bits SHOULD be
|
||||
disregarded on files with this bit set. The permissions bits MUST be
|
||||
set to the octal value 0666.
|
||||
|
||||
- Bit 16 ("S") is set when the file is a symbolic link. The block list
|
||||
SHALL be of one or more blocks since the target of the symlink is
|
||||
stored within the blocks of the file.
|
||||
|
||||
- Bit 15 ("U") is set when the symbolic links target does not exist.
|
||||
On systems where symbolic links have types, this bit being means
|
||||
that the default file symlink SHALL be used. If this bit is unset
|
||||
bit 19 will decide the type of symlink to be created.
|
||||
|
||||
- Bit 0 through 14 are reserved for future use and SHALL be set to
|
||||
zero.
|
||||
|
||||
The hash algorithm is implied by the Hash length. Currently, the hash
|
||||
MUST be 32 bytes long and computed by SHA256.
|
||||
|
||||
The Modified time is expressed as the number of seconds since the Unix
|
||||
Epoch (1970-01-01 00:00:00 UTC).
|
||||
|
||||
In the rare occasion that a file is simultaneously and independently
|
||||
modified by two devices in the same cluster and thus end up on the same
|
||||
Version number after modification, the Modified field is used as a tie
|
||||
breaker (higher being better), followed by the hash values of the file
|
||||
blocks (lower being better).
|
||||
|
||||
The Blocks list contains the size and hash for each block in the file.
|
||||
Each block represents a 128 KiB slice of the file, except for the last
|
||||
block which may represent a smaller amount of data.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct IndexMessage {
|
||||
string Folder<>;
|
||||
FileInfo Files<>;
|
||||
}
|
||||
|
||||
struct FileInfo {
|
||||
string Name<>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
unsigned hyper Version;
|
||||
unsigned hyper LocalVer;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
|
||||
struct BlockInfo {
|
||||
unsigned int Size;
|
||||
opaque Hash<>;
|
||||
}
|
||||
|
||||
### Request (Type = 2)
|
||||
|
||||
The Request message expresses the desire to receive a data block
|
||||
corresponding to a part of a certain file in the peer's folder.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
RequestMessage Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Folder |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Folder (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Offset (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Size |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Folder and Name fields are as documented for the Index message.
|
||||
The Offset and Size fields specify the region of the file to be
|
||||
transferred. This SHOULD equate to exactly one block as seen in an Index
|
||||
message.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct RequestMessage {
|
||||
string Folder<>;
|
||||
string Name<>;
|
||||
unsigned hyper Offset;
|
||||
unsigned int Size;
|
||||
}
|
||||
|
||||
### Response (Type = 3)
|
||||
|
||||
The Response message is sent in response to a Request message.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Data field contains either a full 128 KiB block, a shorter block in
|
||||
the case of the last block in a file, or is empty (zero length) if the
|
||||
requested block is not available.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct ResponseMessage {
|
||||
opaque Data<>
|
||||
}
|
||||
|
||||
### Ping (Type = 4)
|
||||
|
||||
The Ping message is used to determine that a connection is alive, and to
|
||||
keep connections alive through state tracking network elements such as
|
||||
firewalls and NAT gateways. The Ping message has no contents.
|
||||
|
||||
### Pong (Type = 5)
|
||||
|
||||
The Pong message is sent in response to a Ping. The Pong message has no
|
||||
contents, but copies the Message ID from the Ping.
|
||||
|
||||
### Close (Type = 7)
|
||||
|
||||
The Close message MAY be sent to indicate that the connection will be
|
||||
torn down due to an error condition. A Close message MUST NOT be
|
||||
followed by further messages.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Reason |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Reason (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Reason field contains a human description of the error condition,
|
||||
suitable for consumption by a human.
|
||||
|
||||
struct CloseMessage {
|
||||
string Reason<1024>;
|
||||
}
|
||||
|
||||
Sharing Modes
|
||||
-------------
|
||||
|
||||
### Trusted
|
||||
|
||||
Trusted mode is the default sharing mode. Updates are exchanged in both
|
||||
directions.
|
||||
|
||||
+------------+ Updates /---------\
|
||||
| | -----------> / \
|
||||
| Device | | Cluster |
|
||||
| | <----------- \ /
|
||||
+------------+ Updates \---------/
|
||||
|
||||
### Read Only
|
||||
|
||||
In read only mode, a device does not synchronize the local folder to
|
||||
the cluster, but publishes changes to its local folder contents as
|
||||
usual. The local folder can be seen as a "master copy" that is never
|
||||
affected by the actions of other cluster devices.
|
||||
|
||||
+------------+ Updates /---------\
|
||||
| | -----------> / \
|
||||
| Device | | Cluster |
|
||||
| | \ /
|
||||
+------------+ \---------/
|
||||
|
||||
Message Limits
|
||||
--------------
|
||||
|
||||
An implementation MAY impose reasonable limits on the length of message
|
||||
fields to aid robustness in the face of corruption or broken
|
||||
implementations. These limits, if imposed, SHOULD NOT be more
|
||||
restrictive than the following:
|
||||
|
||||
### Index and Index Update Messages
|
||||
|
||||
- Folder: 64 bytes
|
||||
- Number of Files: 10.000.000
|
||||
- Name: 1024 bytes
|
||||
- Number of Blocks: 1.000.000
|
||||
- Hash: 64 bytes
|
||||
|
||||
### Request Messages
|
||||
|
||||
- Folder: 64 bytes
|
||||
- Name: 1024 bytes
|
||||
|
||||
### Response Messages
|
||||
|
||||
- Data: 256 KiB
|
||||
|
||||
### Options Message
|
||||
|
||||
- Number of Options: 64
|
||||
- Key: 64 bytes
|
||||
- Value: 1024 bytes
|
||||
|
||||
Example Exchange
|
||||
----------------
|
||||
|
||||
A B
|
||||
1. Index-> <-Index
|
||||
2. Request->
|
||||
3. Request->
|
||||
4. Request->
|
||||
5. Request->
|
||||
6. <-Response
|
||||
7. <-Response
|
||||
8. <-Response
|
||||
9. <-Response
|
||||
10. Index Update->
|
||||
...
|
||||
11. Ping->
|
||||
12. <-Pong
|
||||
|
||||
The connection is established and at 1. both peers send Index records.
|
||||
The Index records are received and both peers recompute their knowledge
|
||||
of the data in the cluster. In this example, peer A has four missing or
|
||||
outdated blocks. At 2 through 5 peer A sends requests for these blocks.
|
||||
The requests are received by peer B, who retrieves the data from the
|
||||
folder and transmits Response records (6 through 9). Device A updates
|
||||
their folder contents and transmits an Index Update message (10).
|
||||
Both peers enter idle state after 10. At some later time 11, peer A
|
||||
determines that it has not seen data from B for some time and sends a
|
||||
Ping request. A response is sent at 12.
|
||||
|
||||
Examples of Strong Cipher Suites
|
||||
--------------------------------
|
||||
|
||||
* 0x009F DHE-RSA-AES256-GCM-SHA384 (TLSv1.2 DH RSA AESGCM(256) AEAD)
|
||||
* 0x006B DHE-RSA-AES256-SHA256 (TLSv1.2 DH RSA AES(256) SHA256)
|
||||
* 0xC030 ECDHE-RSA-AES256-GCM-SHA384 (TLSv1.2 ECDH RSA AESGCM(256) AEAD)
|
||||
* 0xC028 ECDHE-RSA-AES256-SHA384 (TLSv1.2 ECDH RSA AES(256) SHA384)
|
||||
* 0x009E DHE-RSA-AES128-GCM-SHA256 (TLSv1.2 DH RSA AESGCM(128) AEAD)
|
||||
* 0x0067 DHE-RSA-AES128-SHA256 (TLSv1.2 DH RSA AES(128) SHA256)
|
||||
* 0xC02F ECDHE-RSA-AES128-GCM-SHA256 (TLSv1.2 ECDH RSA AESGCM(128) AEAD)
|
||||
* 0xC027 ECDHE-RSA-AES128-SHA256 (TLSv1.2 ECDH RSA AES(128) SHA256)
|
||||
2
protocol/README.md
Normal file
2
protocol/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
Syncthing uses the protocols defined in
|
||||
https://github.com/syncthing/protocol/.
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
go test -tags integration -v -short
|
||||
./test-merge.sh
|
||||
./test-delupd.sh
|
||||
@@ -15,11 +15,12 @@
|
||||
|
||||
// +build integration
|
||||
|
||||
package integration_test
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -54,6 +55,14 @@ func TestCLIReset(t *testing.T) {
|
||||
t.Errorf("%s still exists", dir)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up
|
||||
|
||||
dirs, err = filepath.Glob("*.syncthing-reset-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
removeAll(dirs...)
|
||||
}
|
||||
|
||||
func TestCLIGenerate(t *testing.T) {
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID3jCCAkigAwIBAgIBADALBgkqhkiG9w0BAQUwFDESMBAGA1UEAxMJc3luY3Ro
|
||||
aW5nMB4XDTE0MDMxNDA3MDA1M1oXDTQ5MTIzMTIzNTk1OVowFDESMBAGA1UEAxMJ
|
||||
c3luY3RoaW5nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEArDOcd5ft
|
||||
R7SnalxF1ckU3lDQpgfMIPhFDU//4dvdSSFevrMuVDTbUYhyCfGtg/g+F5TmKhZg
|
||||
E2peYhllITupz5MP7OHGaO2GHf2XnUDD4QUO3E+KVAUw7dyFSwy09esqApVLzH3+
|
||||
ov+QXyyzmRWPsJe9u18BHU1Hob/RmBhS9m2CAJgzN6EJ8KGjApiW3iR8lD/hjVyi
|
||||
IVde8IRD6qYHEJYiPJuziTVcQpCblVYxTz3ScmmT190/O9UvViIpcOPQdwgOdewP
|
||||
NNMK35c9Edt0AH5flYp6jgrja9NkLQJ3+KOiro6yl9IUS5w87GMxI8qzI8SgCAZZ
|
||||
pYSoLbu1FJPvxV4p5eHwuprBCwmFYZWw6Y7rqH0sN52C+3TeObJCMNP9ilPadqRI
|
||||
+G0Q99TCaloeR022x33r/8D8SIn3FP35zrlFM+DvqlxoS6glbNb/Bj3p9vN0XONO
|
||||
RCuynOGe9F/4h/DaNnrbrRWqJOxBsZTsbbcJaKATfWU/Z9GcC+pUpPRhAgMBAAGj
|
||||
PzA9MA4GA1UdDwEB/wQEAwIAoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQUDggGBAFF8dklGoC43fMrUZfb4
|
||||
6areRWG8quO6cSX6ATzRQVJ8WJ5VcC7OJk8/FeiYA+wcvUJ/1Zm/VHMYugtOz5M8
|
||||
CrWAF1r9D3Xfe5D8qfrEOYG2XjxD2nFHCnkbY4fP+SMSuXaDs7ixQnzw0UFh1wsV
|
||||
9Jy/QrgXFAIFZtu1Nz+rrvoAgw24gkDhY3557MbmYfmfPsJ8cw+WJ845sxGMPFF2
|
||||
c+5EN0jiSm0AwZK11BMJda36ke829UZctDkopbGEg1peydDR5LiyhiTAPtWn7uT/
|
||||
PkzHYLuaECAkVbWC3bZLocMGOP6F1pG+BMr00NJgVy05ASQzi4FPjcZQNNY8s69R
|
||||
ZgoCIBaJZq3ti1EsZQ1H0Ynm2c2NMVKdj4czoy8a9ZC+DCuhG7EV5Foh20VhCWgA
|
||||
RfPhlHVJthuimsWBx39X85gjSBR017uk0AxOJa6pzh/b/RPCRtUfX8EArInS3XCf
|
||||
RvRtdrnBZNI3tiREopZGt0SzgDZUs4uDVBUX8HnHzyFJrg==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,32 +0,0 @@
|
||||
<configuration version="2">
|
||||
<folder id="default" directory="s1" ro="true" ignorePerms="false">
|
||||
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></device>
|
||||
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></device>
|
||||
<versioning></versioning>
|
||||
</folder>
|
||||
<device id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</device>
|
||||
<device id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8081</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22001</listenAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>500</maxSendKbps>
|
||||
<rescanIntervalS>10</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<urAccepted>-1</urAccepted>
|
||||
</options>
|
||||
</configuration>
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID5TCCAk+gAwIBAgIIHozuBlC9RPswCwYJKoZIhvcNAQELMBQxEjAQBgNVBAMT
|
||||
CXN5bmN0aGluZzAeFw0xNDA5MTUwNTE0MDBaFw00OTEyMzEyMzU5NTlaMBQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGB
|
||||
ALxVRAh6EWr1Vum1EDafEGNTxWYcsMFssl4lc18cgQ5SRFtdkDi2IxELqiAPT0K9
|
||||
11DkD563o7MIdCvB/XCA2hEaapfKKMiba/yH6tSnE7Fud5Wq8AtsAZV5weCjhGrc
|
||||
s2YL8Nm57tiJC4W0K7txB8Ob5Q9gxvSpzLak2eh/fqhNoO6DxvUF/iE3VsdhKbKb
|
||||
epXsTEvR1T3Qx0MUam7Dkz1eT4kXpwPGwLKfXY+BOxybxHAKM12qKV4R5Ebs/JZB
|
||||
5GajZqd6XpwgldIg7KHWpWWSSjH4ojnP4XmEy5WZA33t0o+xkrg+EcQbzSEFhRMD
|
||||
KT7fKT9+Evf+xB0j8FJ1+kL2ajTVfOPx3Fyg+YAPK9OVI6fr9OMJD6pLxZMLaRi9
|
||||
R8IHEgOLo4vLRNLCmJWVIqfMtUlsVkXLM+alDo0maPUesPDTXeCuMG9TpDyHFQI5
|
||||
6H4OYD3/9DOEOYMXXCCpPqpjk0CpS4GAtI6qXFG3dUY3EdfOcrKvnKi/DoRrpdzp
|
||||
7wIDAQABoz8wPTAOBgNVHQ8BAf8EBAMCAKAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4IBgQB4MajA42Wp
|
||||
L1xFMb4Ba7+aW3o3j0N7wqJjHuU5HtbpwdYgv3tgmn+Ju6rvy6BwAQt8OsVNbai9
|
||||
ptwsHbR6epoXPzAGyqY/9A74PJr6GraYJS148zuEgCir2Q0TfzbPtd4rDYN3LE81
|
||||
STwtBvcaQsuAukQbeTQJkayobLQH8ve34BVX43XHUchEPLeZqAec5/btVyR9xwWz
|
||||
rwJyEfCtx1/YxkPRBPs1cBQOK0Edn9nBKF114ogpWKlKt1Ou1HZzsMS2usWPdme1
|
||||
IATpCR9i5/ZRIC7vhSK3se7IRaMrmOXc0kpgmbi9pr+Tg2sgJSC1Bvyfh3ReC53F
|
||||
R6WJrmqut+SqCJlefQh+6On4MaW9hnrv62OTkyBKZZ3ogCQ+FMNFs+jRJHpdtxs4
|
||||
ZVkuv1NZu8nB7NsY8i6vHKkOE6XCejg6HZL4R70iXoDDgo9E6A8TwTN7TZz7SdWh
|
||||
+Bh3GGJAMubzuHysmEkKBSYHYlrW8GuUwCbLCl+HcQnfOtN7ffJwJb4=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvFVECHoRavVW6bUQNp8QY1PFZhywwWyyXiVzXxyBDlJEW12Q
|
||||
OLYjEQuqIA9PQr3XUOQPnrejswh0K8H9cIDaERpql8ooyJtr/Ifq1KcTsW53larw
|
||||
C2wBlXnB4KOEatyzZgvw2bnu2IkLhbQru3EHw5vlD2DG9KnMtqTZ6H9+qE2g7oPG
|
||||
9QX+ITdWx2Epspt6lexMS9HVPdDHQxRqbsOTPV5PiRenA8bAsp9dj4E7HJvEcAoz
|
||||
XaopXhHkRuz8lkHkZqNmp3penCCV0iDsodalZZJKMfiiOc/heYTLlZkDfe3Sj7GS
|
||||
uD4RxBvNIQWFEwMpPt8pP34S9/7EHSPwUnX6QvZqNNV84/HcXKD5gA8r05Ujp+v0
|
||||
4wkPqkvFkwtpGL1HwgcSA4uji8tE0sKYlZUip8y1SWxWRcsz5qUOjSZo9R6w8NNd
|
||||
4K4wb1OkPIcVAjnofg5gPf/0M4Q5gxdcIKk+qmOTQKlLgYC0jqpcUbd1RjcR185y
|
||||
sq+cqL8OhGul3OnvAgMBAAECggGAZTGzgpKEdWIqNx1Q/uhtF9HVSU61MtlC5g9d
|
||||
dIeOWLGfhTA65B4JrYkE+oD/Z6812IMSWYf276XlNfXgRekWQwZcq/6190R7u48U
|
||||
gPrdPANNQiA9JwX7u+NWZ2u1JO49fuF/op2jVrocdNUggnDzaQmFBMRNYv0xwBnH
|
||||
9IM8/RXpGP+5kcKMkDB58luk2hFsxs3XGQ5AdByQVNzNa4KuxNS+C72nwgGzXMcA
|
||||
sLERoAeaf1Eb1IIwBBm8/NctyVbRhKvIQVjdLaHtckiJipXgS5byxW62Zd+Cxx/i
|
||||
WBQi0dW38VWfHLDkZd/YXMwGxNrl2hTwDNoatI5KkBLD+fCkXJWajfHR7ZKXcVAT
|
||||
4vgApnd6k3ii92//BB3Y8xD643Lg+iBWUnMBU04JOml5KKTHEZYqGM6nRqeW7Sg6
|
||||
8Xp1PY/XKJ+HViDMJGRECOtg2ZBuQSj8R2mA5fh9nQftngM78shcNFdaiIkR80t2
|
||||
hVsKaya1SvpjFJWmPSnGYnqDtIJRAoHBAOPnx9GaZs4VbMb47QXEi3nanUehm/P0
|
||||
vkiqdSDFu/Dy4iWVe2Fi5uVZHoGovvZBZOqrl5Qmdx7s18kd5bQj4OdX2z/o4Ude
|
||||
uOK7///x7pM0hc2GOkFdMmXYwyukqjdi4VvN/0acW0wmpj5W3MUNENGRP2VuIbHa
|
||||
yjKFIwdOwPIjIslFFTrKHMgOhb1KKErqUItMoeKeN93cXgQJcAcsIg72UYblSP7P
|
||||
bfpSrGBb25Dh6Wrwo1YgO3N+oEhGKEXAfQKBwQDTjKpFHBmqBVJzdsaTr4BXlAk9
|
||||
UlRVBYQQZUDsYaysoRIut+Uhq4NWpyCyM/bUD5tvrPltCeIGsDGi3NI2w8DjvXJr
|
||||
LDSPEEnnuwiSCt0nxn0PKuX5R8jS4109uy2GKXiNUzoP5yf7mh4w8yjGJpGYI49B
|
||||
WMW7goK01+ANmopzzXlNHw466DQ+IXNBM5PMkrl6z18PobU+OOENzucRu546anU8
|
||||
DxJfxWUjoqBHEoaSnkZSM/i/utjr+L0gF8dBa9sCgcEAnqTVX36PWZ1oXwkgVQd/
|
||||
347iNN62ZJdVbdfaOLnsHcm0ylzHyf7Co5vptG/2ngzfZsuTdDlialCL1R/Oqhrf
|
||||
j6qEoHRHfRresFYV2eBbJnVFPs/U9XMehe7hzRuOsYdPQEyhClIE63lr97EXdMOn
|
||||
lXn6G20SX2/hmFE9FPUpMmRq7pf8MzRF3KzfQ+i/K4b4Ej+B4PIqCXJAr6ayKQv7
|
||||
mVa1YaVxro5ODBZIj7rhmHTputtPl8BQIhFfGXBc0FExAoHAIDBjKCjibtBof1Ev
|
||||
XgFyUeEglsgUNOul8Ki3fEBQeeP4VEt+/eSPE3xSqUrm39WQHSoAueqrDcF5jAJ1
|
||||
qgeXLhABfPU4+hvMYwo+f5pPlGHLXad1XrzhfdVCtsXoY2WkBj0HtKvDlbEZrvEQ
|
||||
3zW3KaMfhR3w2Fs/cCz41pkRQBWfw3BaRfRXHq0QUHd8ocAhoOI04LgGT/VvqR42
|
||||
Yqhdpx3TwNO6RABRJ17zbF0RRPX4VUG7M9FGeIFcpal4lCfJAoHAabh+TWnKN8Vu
|
||||
rva9Kjl2x7+6Nkw/8CsDxUG3plhcbLnXW68JXig/OuhpLGAnMHqvcHFtvIbhTpI7
|
||||
g+Hga6wrV9vLsLMrI2zU+NvbfmqjoNy42eGYB77nUI8p7VRSBzAEzHM+PTJzyjNK
|
||||
E62uZN9MoU00hwyAcFZXSXxdsICwMajfwRVsdrbqb4MYy9KCVu/PC5U9vnKM3Gxw
|
||||
UMsAjtcHgyuOJXgFxcHHhjRxknp2pZsDFOD/zq4XiQKaf9fcqR1L
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5AIBAAKCAYEArDOcd5ftR7SnalxF1ckU3lDQpgfMIPhFDU//4dvdSSFevrMu
|
||||
VDTbUYhyCfGtg/g+F5TmKhZgE2peYhllITupz5MP7OHGaO2GHf2XnUDD4QUO3E+K
|
||||
VAUw7dyFSwy09esqApVLzH3+ov+QXyyzmRWPsJe9u18BHU1Hob/RmBhS9m2CAJgz
|
||||
N6EJ8KGjApiW3iR8lD/hjVyiIVde8IRD6qYHEJYiPJuziTVcQpCblVYxTz3ScmmT
|
||||
190/O9UvViIpcOPQdwgOdewPNNMK35c9Edt0AH5flYp6jgrja9NkLQJ3+KOiro6y
|
||||
l9IUS5w87GMxI8qzI8SgCAZZpYSoLbu1FJPvxV4p5eHwuprBCwmFYZWw6Y7rqH0s
|
||||
N52C+3TeObJCMNP9ilPadqRI+G0Q99TCaloeR022x33r/8D8SIn3FP35zrlFM+Dv
|
||||
qlxoS6glbNb/Bj3p9vN0XONORCuynOGe9F/4h/DaNnrbrRWqJOxBsZTsbbcJaKAT
|
||||
fWU/Z9GcC+pUpPRhAgMBAAECggGAL8+Unc/c3Y/W+7zq1tShqqgdhjub/XtxEKUp
|
||||
kngNFITjXWc6cb7LNfQAVap4Vq/R7ZI15XGY80sRMYODhJqgJzXZshdtkyx/lEwY
|
||||
kFyvBgb1fU3IRlO6phAYIiJBDBZi75ysEvbYgEEcwJAUvWgzIQDAeQmDsbMHNG2h
|
||||
r+zw++Kjua6IaeWYcOsv60Safsr6m96wrSMPENrFTVor0TaPt5c3okRIsMvT9ddY
|
||||
mzn3Lt0nVQTjO4f+SoqCPhP2FZXqksfKlZlKlr6BLxXGt6b49OrLSXM5eQXIcIZn
|
||||
ZDRsO24X5z8156qPgM9cA8oNEjuSdnArUTreBOsTwNoSpf24Qadsv/uTZlaHM19V
|
||||
q6zQvkjH3ERcOpixmg48TKdIj8cPYxezvcbNqSbZmdyQuaVlgDbUxwYI8A4IhhWl
|
||||
6xhwpX3qPDgw/QHIEngFIWfiIfCk11EPY0SN4cGO6f1rLYug8kqxMPuIQ5Jz9Hhx
|
||||
eFSRnr/fWoJcVYG6bMDKn9YWObQBAoHBAM8NahsLbjl8mdT43LH1Od1tDmDch+0Y
|
||||
JM7TgiIN/GM3piZSpGMOFqToLAqvY+Gf3l4sPgNs10cqdPAEpMk8MJ/IXGmbKq38
|
||||
iVmMaqHTQorCxyUbc54q9AbFU4HKv//F6ZN6K1wSaJt2RBeZpYI+MyBXr5baFiBZ
|
||||
ddXtXlqoEcCFyNR0DhlXrlZPs+cnyM2ZDp++lpn9Wfy+zkv36+NWpAkXVnARjxdF
|
||||
l6M+L7OlurYAWiyJE4uHUjawAM82i5+w8QKBwQDU6RCN6/AMmVrYqPy+7QcnAq67
|
||||
tPDv25gzVExeMKLBAMoz1TkMS+jIF1NMp3cYg5GbLqvx8Qd27fjFbWe/GPeZvlgL
|
||||
qdQI/T8J60dHAySMeOFOB2QWXhI1kwh0b2X0SDkTgfdJBKGdrKVcLTuLyVE24exu
|
||||
yRc8cXpYwBtVkXNBYFd7XEM+tC4b1khO23OJXHJUen9+hgsmn8/zUjASAoq3+Zly
|
||||
J+OHwwXcDcTFLeok3kX3A9NuqIV/Fa9DOGYlenECgcEAvO1onDTZ5uqjE4nhFyDE
|
||||
JB+WtxuDi/wz2eV1IM3SNlZY7S8LgLciQmb3iOhxIzdVGGkWTNnLtcwv17LlCho5
|
||||
5BJXAKXtU8TTLzrJMdArL6J7RIi//tsCwAreH9h5SVG1yDP5zJGfkftgNoikVSuc
|
||||
Sy63sdZdyjbXJtTo+5/QUvPARNuA4e73zRn89jd/Kts2VNz7XpemvND+PKOEQnSU
|
||||
SRdab/gVsQ53RyU/MZVPwTKhFXIeu3pGsk/27RzAWn6BAoHBAMIRYwaKDffd/SHJ
|
||||
/v+lHEThvBXa21c26ae36hhc6q1UI/tVGrfrpVZldIdFilgs7RbvVsmksvIj/gMv
|
||||
M0bL4j0gdC7FcUF0XPaUoBbJdZIZSP0P3ZpJyv1MdYN0WxFsl6IBcD79WrdXPC8m
|
||||
B8XmDgIhsppU77onkaa+DOxVNSJdR8BpG95W7ERxcN14SPrm6ku4kOfqFNXzC+C1
|
||||
hJ2V9Y22lLiqRUplaLzpS/eTX36VoF6E/T87mtt5D5UNHoaA8QKBwH5sRqZXoatU
|
||||
X+vw1MHU5eptMwG7LXR0gw2xmvG3cCN4hbnnBp5YaXlWPiIMmaWhpvschgBIo1TP
|
||||
qGWUpMEETGES18NenLBym+tWIXlfuyZH3B4NUi4kItiZaKb09LzmTjFvzdfQzun4
|
||||
HzIeigTNBDHdS0rdicNIn83QLZ4pJaOZJHq79+mFYkp+9It7UUoWsws6DGl/qX8o
|
||||
0cj4NmJB6QiJa1QCzrGkaajbtThbFoQal9Twk2h3jHgJzX3FbwCpLw==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID3jCCAkigAwIBAgIBADALBgkqhkiG9w0BAQUwFDESMBAGA1UEAxMJc3luY3Ro
|
||||
aW5nMB4XDTE0MDMxNDA3MDEwNFoXDTQ5MTIzMTIzNTk1OVowFDESMBAGA1UEAxMJ
|
||||
c3luY3RoaW5nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAsIV0syyR
|
||||
O56BvIOro4bIqB6iFJsNc4zX8MiM4QPTWgqGlYwsKSVmNppTdlACZCJIqyzoscrF
|
||||
qJPto8/e2Fc3oaTdEREGIs7cmc7LSXfot/mAgPpy71SVWtb7xNmXro2JJPZjRBCS
|
||||
pl1ulPug+/8w7fSKQdLMjh4Hp2YlwVBfVu0bYEEW+7Vl9PZVTv+NbTqXYvYVc9R6
|
||||
QFIbN/njWAuo2wpjJlY7vqNnSYZyskAaaAC17fFJkVQKKblTeTk1C9PxTmVTB1j9
|
||||
yOoD3+V/6IrTYKXdTHGJ1MqdieTHj1jHXe5TOeSB+Hjgq4tr25mPfQ4ixXqDqIcx
|
||||
5390DAjInuSKNUJ5pqiFrVe9eIDmySZCg5/JIL3c8phy6g1bxiJN14+Dn0om/0+9
|
||||
UrHK8LVzWMmtFRVycWVUYmARWFY3EE10k0RXU2HtzmjfnBkRrl13b0ExizlA1qJ3
|
||||
3ngxF5rNEDSMpwf4og5uYOjRUPYuvCL9XtQKr254NFO/sg/qqPV4hFWTAgMBAAGj
|
||||
PzA9MA4GA1UdDwEB/wQEAwIAoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQUDggGBAAZSU08zAzyuGqKqqU/c
|
||||
Pr+xML8oKiJqko5pb3ETDQC+uVw+qUHwiGYsvHI1cih4ix+tKvf+Yaiizp/35VkP
|
||||
qwls3a4ljq1Ww0Sf7J87QX0DumYpBGOfoCpmV4MacyjLhpLRKRGZHwIbOeFsmEu9
|
||||
oO38co+GvDy4CiAt3tuOdjBNs0gNOAdTTxqgm97raB9oXeg2i4Fb4MCT4UBUdXLM
|
||||
ZNLCifza+PWkBxmfBORvlKGeJBruLpXHBWnWEigZSLXIFjn3JJUy4fKd+/JMp063
|
||||
8Pjo6zUOckBCH8Lv90vzfrmdlQK555jWpcebN0l9neESEXw19l0OlqkJGVTr6JKq
|
||||
w5kjiL4eP7kpKKwCezhDSX3jf4P36wdF8MpOUBxVqfM+Oh5tHIcZctnurhYV7rXs
|
||||
jR70FMqWjHBmwemsXGrObNVt8c75yB+19U6DAulr2RhRw5GD74U1znP00eGZ8TJf
|
||||
RN1FYilUPCawMYeQoB8WIn9So7zIm0MfOl4KXNWDX02+Kw==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,34 +0,0 @@
|
||||
<configuration version="2">
|
||||
<folder id="default" directory="s2" ro="false" ignorePerms="false">
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||
<versioning type="simple">
|
||||
<param key="keep" val="5"></param>
|
||||
</versioning>
|
||||
</folder>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8082</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22002</listenAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>15</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<urAccepted>-1</urAccepted>
|
||||
</options>
|
||||
</configuration>
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID5TCCAk+gAwIBAgIIORN2TOZWOjwwCwYJKoZIhvcNAQELMBQxEjAQBgNVBAMT
|
||||
CXN5bmN0aGluZzAeFw0xNDA5MTUwNTE0MDJaFw00OTEyMzEyMzU5NTlaMBQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGB
|
||||
ANizpGVpWvlhkwRddVkVHo68JFSRG74ostSECMUGZTt3vpoUFuCYBUsh138KOPUH
|
||||
qN59mlgGoCbvH0RVN54UDTjw32n3r5c/Pzm+ky842O1gkrexizxrODtVL/SQnp35
|
||||
nPsiU86GUZ8IJIfiqYmcjd+exHmi3iU0f+m39pHQuxjeHpSNBJ9VsV30pCouFcSu
|
||||
i2PI1VEsPWG/w9gL19al7JcOPhxrLQBcUzG8rTnXTFq0cxGaZre4NsVacQPC2IcE
|
||||
Jdis6M6b/eTCbHquPSVs+gd9NT8ubJy6CzX5w3FGODTsQAtW1lpI7qgOQ3DRT6+n
|
||||
LY3SP792le1ZEs/C0wd/xIP0gkM+PD6ZW070xmdDTbgSj48JvJIn1Xo3+XVTSVJ0
|
||||
bzEbGMfGwxstZR+d/vIH1XX8gu8M9reJsT1InTLvGvJDuiZjp1aHUo8z4E6ZPKRb
|
||||
I/d7h/V4nZmBaCZYjht5hxR4e3jcpzaEBq4Foh4/0e/iLnZn8JxgejDys2NqqUTx
|
||||
8wIDAQABoz8wPTAOBgNVHQ8BAf8EBAMCAKAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4IBgQANaMq2yf7A
|
||||
R5r2ac1+MCwzgLSBFTMrdfperjhEvkkyC7ln73qU6FhJzbccU3NCR1pI6052X5sM
|
||||
68wFBf/opIEFJLm/KRmfAlNQ1I02a0gCmmpm8stjtfynC6Iu7fKdWjytZRfXY6F7
|
||||
bXCNoUlbQmePHOawIS26JP3QMauhnetRPUJt72/Yt+HFgt2cEdazzuWpbs/JVp1T
|
||||
UFI/GvVTKkAgDWcb93mm1dKkHd8pOA0ATURnV+zyfOOky85Gky3++WOMre6Cb9rQ
|
||||
PlooY9tQGvvuBqc17yY/RPcOBMtpUna6SKw+gW9D67fIjp529lIhcmInh1nuBjHS
|
||||
EbSIxeQBGlYQKHcMfOW2HQrax0QSsMT39ppIHuiJ7ZHBO26HhtdnryhJNMiSpEs3
|
||||
BK2/kGZev8CQCJrNTXCRMEPLhDHMKhHGHTSMkYJnAVEsKtzY9yFqfL7LiFA6k2Lu
|
||||
CPNNZ5Ftb1RpEV2rkiNK2Le/oqg15c3la+UbTy+rfMzjLvkV33kFYcQ=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5QIBAAKCAYEA2LOkZWla+WGTBF11WRUejrwkVJEbviiy1IQIxQZlO3e+mhQW
|
||||
4JgFSyHXfwo49Qeo3n2aWAagJu8fRFU3nhQNOPDfafevlz8/Ob6TLzjY7WCSt7GL
|
||||
PGs4O1Uv9JCenfmc+yJTzoZRnwgkh+KpiZyN357EeaLeJTR/6bf2kdC7GN4elI0E
|
||||
n1WxXfSkKi4VxK6LY8jVUSw9Yb/D2AvX1qXslw4+HGstAFxTMbytOddMWrRzEZpm
|
||||
t7g2xVpxA8LYhwQl2Kzozpv95MJseq49JWz6B301Py5snLoLNfnDcUY4NOxAC1bW
|
||||
WkjuqA5DcNFPr6ctjdI/v3aV7VkSz8LTB3/Eg/SCQz48PplbTvTGZ0NNuBKPjwm8
|
||||
kifVejf5dVNJUnRvMRsYx8bDGy1lH53+8gfVdfyC7wz2t4mxPUidMu8a8kO6JmOn
|
||||
VodSjzPgTpk8pFsj93uH9XidmYFoJliOG3mHFHh7eNynNoQGrgWiHj/R7+Iudmfw
|
||||
nGB6MPKzY2qpRPHzAgMBAAECggGBAL698Rhqke8smdGfyejtlAYjSP8+8uKAxFgX
|
||||
F/kE1hpwHk9VG4X5ib9GPH7QKq5TXarpd++/dTyQAj+NmvUDxVe3fY+yutYwj6Bu
|
||||
RPOt4BOhi8Mw/dPitI5VP27P1S5MRocvAgGpbTLEYhNRydUc/iw1fc9rMoohGe5J
|
||||
RTm4Ntd+vAAZ2FW/ge2nptCR3AtRb9QXNNzMSgM+Xk5Orl97kTKtELLHC8djfL8s
|
||||
ynU9MzIr35VBCOTxuxQftZaP7TN6y46obQFTpwNhX2ouXT5CH4QzGOLebo3Af2gT
|
||||
3CWFtW+o6ISKJyEsGUOsxuxCKWJX3X3FNQ4TeQm38hzV9ocZOfOXq7vQMTdkB8SP
|
||||
Y9LNFzaQwGAzBsBiY8aeDSWZ3TCOI+HyDIhkSjWE7ybYkcFcUv8z2yuB/gspIEgW
|
||||
VhYwBoorrIZYRQXVwrWd9SKzlt/nmLOuEQWLBifEt31zmMzJhZZkUaYEssyfBvhh
|
||||
/zi3BMbngkDhr2g+G6bQznwsoAQv8QKBwQDvgIdlWJbUTrfB5HBzAsW6z44phyBj
|
||||
utDlhyaflAHs4BOPYPkpeeYrn/LvhhwTyhR2nIbbKCpzorjnm9LvxLu0t96SD22p
|
||||
qnYJ5M7dMj6tfN7CYLqRSUkrDFm8MLDrOEjqFPriyYoITQNjbVFHwPv0k4yem8y2
|
||||
RoCfM0iU9mS9q+9sKcVN7HBY5dR9KDf+k6fljYE+bETcTrTkDiacxhJhPO/PKmp2
|
||||
0YvgQzb9MJaKIhZ+ovy0/0YtA9F8ZGonmf8CgcEA56ELGwfPYALv/IczCgUsheWX
|
||||
S4OO4Lukv5LtgqHylvog+ZeSGxNh6wDgDhVK5XIuzVW3GiTC8sxaynsSvpyGMSeS
|
||||
V7Crs85VqMryw20Con3NVttELslsYQ46ljtNNP4yFOdKIPMFrP8x7EwegqNHZ976
|
||||
1/fGI9h7CMrYbUU5sndducUZoVDU79shRILJ/+Z+UZvsaMznqPCiHo1w22WPxW2W
|
||||
Eo9zNnZ9t7L+jybevro0NVlKPMrHg/ZQf1WTfeANAoHAGdc1RJMFWwzPOMVL+KzA
|
||||
5sIEJajlrrz2Uv19BlSyzHr0wVCGMZpsYiKU1JEUsHHqOU30Ius3gVh6OMsQPDxu
|
||||
wDXidsHhZB/3MmQUibslFhTV+AT1vD06/sELYYmjXQ2qmE8BLrzt/q1Ig07FKUfC
|
||||
J4ZP8sD+mmAK+qJO33uiLPDDGVl8Z0bubDkH7yUKvZXy1Iqq+jA2UcrQK5b3RYz9
|
||||
aK5pdWGvMPi07dJyuWinpWm+IZW2TFUKnkq+LHytE27DAoHBAN4ZvKVhmsZMasOw
|
||||
/A66oVOOr8EX19PD+Zg8kYO2N//uvdm2LcHKlxSY1T6LyjIyh5AahaUK5Oedbd1D
|
||||
n9ioC8BsWlW9MRcLXXWpjJg5GdKnYFLNkxZty39Q/np5SHHs4CbNFHZ9sM6OMNeM
|
||||
saDAYcLGu66Ehjhu5qKqplY4j7eB35w203msIVIQw1iHNJws7qjgIxLmj6edfUZg
|
||||
h3vIadB8YO9RH790ZN3VQ2QOeH1X3KHfCWE7a44sjElczD1hrQKBwQDnnERBvdNS
|
||||
47FYK+OZ88eLMedXxFuWF2PRntnSvhHohY5FcqBEh1QXbrEldS5bj+lJwk29Skqd
|
||||
u+qfOKtG0VwzZcn9Rq/PG8D8cO3WwOYP+/Y1CFpDSQavVgzGgpt6DMv6lMDWD/LT
|
||||
H4s6kiSfcjVm7T9FeMUKHTFbE8A0VAXMsFyasRfycdaCKfmnSpI3mMXppM26pb4C
|
||||
Z/gKcbosQm2MANDrvUwnIaFeghRUtDRP3KK9kEQJX1xP6TcMxiMH58k=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5AIBAAKCAYEAsIV0syyRO56BvIOro4bIqB6iFJsNc4zX8MiM4QPTWgqGlYws
|
||||
KSVmNppTdlACZCJIqyzoscrFqJPto8/e2Fc3oaTdEREGIs7cmc7LSXfot/mAgPpy
|
||||
71SVWtb7xNmXro2JJPZjRBCSpl1ulPug+/8w7fSKQdLMjh4Hp2YlwVBfVu0bYEEW
|
||||
+7Vl9PZVTv+NbTqXYvYVc9R6QFIbN/njWAuo2wpjJlY7vqNnSYZyskAaaAC17fFJ
|
||||
kVQKKblTeTk1C9PxTmVTB1j9yOoD3+V/6IrTYKXdTHGJ1MqdieTHj1jHXe5TOeSB
|
||||
+Hjgq4tr25mPfQ4ixXqDqIcx5390DAjInuSKNUJ5pqiFrVe9eIDmySZCg5/JIL3c
|
||||
8phy6g1bxiJN14+Dn0om/0+9UrHK8LVzWMmtFRVycWVUYmARWFY3EE10k0RXU2Ht
|
||||
zmjfnBkRrl13b0ExizlA1qJ33ngxF5rNEDSMpwf4og5uYOjRUPYuvCL9XtQKr254
|
||||
NFO/sg/qqPV4hFWTAgMBAAECggGAH6SMuuGuVyWe1BA2YGX06k4zd8Yjryb8Pql0
|
||||
t5Fb/bQNVBmAgQ+3NuqLM5Y8F38dz7GJNPXIYOPDoa3NoLJhwpQvHLQUiYDTgq7T
|
||||
OiRIj1ImevhqSgS7kUEgeLUYv62XfAy+1qCx6Siuff5taT7hooZHkm0bRg6UCKoC
|
||||
8phZvtdaJPMGD7EAydyuhi7BR2dNY+wBBHZ+Q7F0N6CP5GSSrFE8XM7wfsgD5+Y2
|
||||
AUYEdchK1JCAQ5DxEXGrSPu8SpZ/SuhMjLc3/JDwB8SZPT0C1jX7YMeUiPONy9VK
|
||||
J6Fdnl0FMhS9VJHocL4o5IU9OLoahAcpq/Z25arm9z7yyxUoO4nVUAl3H9N7+N7A
|
||||
cwpbSgMld15bQ9iPV8MCB/eVKzfgLbWuhpZr6h6oJF9pgIq9DDCK/mc9KYzSGd1J
|
||||
dOVuizi0dMYS+iOJRFR3kIrNW7dGCecniigZfrrprqqkycl7823VTi0zIU4CHbDm
|
||||
ypu/b8sbs+h6mHN71muWAlmChz3hAoHBAN5Cm3ZZeQJj/p7Kb3sn6WAXlRqnnDz9
|
||||
fJDaa3788o4VQ3ie4odDNzALF7bHhYnfovXWrh/4XGkjiW98GPczpEFEdYF9gCGO
|
||||
mAaHV/unvtjbGF7Wk3xjgaXwPeKXGU8vZrQ4y41u5eZWpA1fwSK3T+AQ79t4R2jr
|
||||
kRgFz7iIJ8iQGleI+F9X4PRjhoOSsdaUkJRB6pxvxcsiYIKxDi7VScTx8iD0pgwn
|
||||
tgcQ0do1A0ZQsnJMtBnfIj1/J0sSMHEE9QKBwQDLUVyLmVjv0apqDxnNCw0laIFm
|
||||
ofp7S/q4pXfDDg3SqrM05Wgm4CHijzKzoqvFLILQvI004LShRcNXTMAsAbbIRzcY
|
||||
YbEOYytHB+k9WfEjAFJkNM4qB4w8erELKwnvjflodLgBw9k97cybhYIZCnwWIIHp
|
||||
SwXPT9AI5Ck8E6wifo1nWjpgMZtg5PaH6yfGa+o+ahmetKoU+4ENzVeU95XuKbVa
|
||||
x/6UW+wNbPqo3oEfV/K25U6WGHoGfX2X8wn/m2cCgcEAnlABXi5i9GH3ZnG5MJcA
|
||||
M3L4wNCsiADirmb19LEFsFDTC2LY5hHpiG4OSSIbK1bBQ6zTwG/umvE2HtPdEI+X
|
||||
KuoxbLfRAZYJEXVsJROZ6+s7k6nxycMzANh7rB+GZpHT7QEbdDWOyh/ioKgY8Lpz
|
||||
yZ0mzEQDUWehpOPWzpElDUYfjURB7d+xm0Ic+TEPPVH7Ha9KBn3S/FsTNWQaPx+r
|
||||
eP4BQpoggD30+VlwsKXcHES0ppeeHWODhxxAB8f/+zDVAoHBALJY3GVYTruPn30J
|
||||
YgiK+S0nTttImwAs1fHCtBtV6KozMp/j3Ei9svuZwU/yEdsUAGw5+WO4+Lm/CGs7
|
||||
2BbCKiPk1F9+0mFcfEoCloZKr0uUrLFZ4L7dgBZNSaASUNTiJTWLrR1fPuEkB6ck
|
||||
pcpxeAew3ERYmvAPgt1JxyH737Mib8eJTkuzOCj2r4rqrClR4Fh/mZmtwMRHGh2R
|
||||
UpJJ3CreS0cmyBo7yAS+4+HdzEZCT5Y/73+aWO/4hIMVnl+pYQKBwCpUb85zm5zg
|
||||
UnZ8nBS22FLGTcvBs8hbyXUtioSNadNteuqk6jsN2F+Pwsh6eHbVHW4Lu9j6Gn+J
|
||||
S1ss/ztgGkErvQF/9DpxMeYt01FpvZaUJthThQVQ9xvr9i7utgthtdspNvQ0fux1
|
||||
9Xg2fhLnDz707PUt7OhmVW7d+XOfoc19mYZlN0IOHsqMUMphIW97Lp5QWlZXxr23
|
||||
Zrv2j5mTvv3Fq2TRDNfz5dwijFMvv7kpGfHA1950ZIbobQvYYsoC7A==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -15,13 +15,11 @@
|
||||
|
||||
// +build integration
|
||||
|
||||
// This currently fails; it should be fixed
|
||||
package integration_test
|
||||
package integration
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -128,7 +126,7 @@ func testFileTypeChange(t *testing.T) {
|
||||
for {
|
||||
comp, err := sender.peerCompletion()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
if isTimeout(err) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
@@ -200,7 +198,7 @@ func testFileTypeChange(t *testing.T) {
|
||||
for {
|
||||
comp, err := sender.peerCompletion()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
if isTimeout(err) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
128
test/genfiles.go
128
test/genfiles.go
@@ -1,128 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ReadRand(bs []byte) (int, error) {
|
||||
var r uint32
|
||||
for i := range bs {
|
||||
if i%4 == 0 {
|
||||
r = uint32(rand.Int63())
|
||||
}
|
||||
bs[i] = byte(r >> uint((i%4)*8))
|
||||
}
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func name() string {
|
||||
var b [16]byte
|
||||
ReadRand(b[:])
|
||||
return fmt.Sprintf("%x", b[:])
|
||||
}
|
||||
|
||||
func main() {
|
||||
var files int
|
||||
var maxexp int
|
||||
var srcname string
|
||||
var random bool
|
||||
|
||||
flag.IntVar(&files, "files", 1000, "Number of files")
|
||||
flag.IntVar(&maxexp, "maxexp", 20, "Maximum file size (max = 2^n + 128*1024 B)")
|
||||
flag.StringVar(&srcname, "src", "/usr/share/dict/words", "Source material")
|
||||
flag.BoolVar(&random, "random", true, "When false, always generate the same set of file")
|
||||
flag.Parse()
|
||||
|
||||
if random {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
} else {
|
||||
rand.Seed(42)
|
||||
}
|
||||
|
||||
fd, err := os.Open(srcname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
n := name()
|
||||
p0 := filepath.Join(string(n[0]), n[0:2])
|
||||
err = os.MkdirAll(p0, 0755)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s := 1 << uint(rand.Intn(maxexp))
|
||||
a := 128 * 1024
|
||||
if a > s {
|
||||
a = s
|
||||
}
|
||||
s += rand.Intn(a)
|
||||
|
||||
src := io.LimitReader(&inifiteReader{fd}, int64(s))
|
||||
|
||||
p1 := filepath.Join(p0, n)
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(dst, src)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = dst.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
err = os.Chtimes(p1, t, t)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type inifiteReader struct {
|
||||
rd io.ReadSeeker
|
||||
}
|
||||
|
||||
func (i *inifiteReader) Read(bs []byte) (int, error) {
|
||||
n, err := i.rd.Read(bs)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
i.rd.Seek(0, 0)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -43,8 +43,8 @@
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
|
||||
<maxSendKbps>50000</maxSendKbps>
|
||||
<maxRecvKbps>50000</maxRecvKbps>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
|
||||
@@ -1,106 +1,9 @@
|
||||
<configuration version="7">
|
||||
<folder id="default" path="s2" ro="false" rescanIntervalS="15" ignorePerms="false">
|
||||
<device id="AF2HXQA-DOKKIMI-PKOG4RE-E25UTJ7-PSGQ7T5-WEY7YT5-SG6N7W5-NNA4BQM"></device>
|
||||
<device id="AND7GW6-DZN66F2-TKYJSTC-ACI7MYT-75X4T63-SE5S4MQ-GBSDHDL-4CLHJA6"></device>
|
||||
<device id="ATFGYHM-ZTA5Z7B-NYPY4OK-VJWDD6Y-SLCDKED-L2VDESD-TUBLMLH-CHAJEAI"></device>
|
||||
<device id="AUI2JXT-PXK3PMB-YATDUNI-RIGBTKQ-VBUJLUH-JLWZEEZ-3UV5X2G-AWKOVQH"></device>
|
||||
<device id="AZCVBDG-6GATBA3-XX7HOH3-LY5R3L2-SPCQOIT-KFVLUCC-GQ3KJTJ-ZSP3RAQ"></device>
|
||||
<device id="A4W7JO6-IM2ZWBX-AYYLCEP-NFTJKQV-2QRGZJ4-QGSP3HI-R7UHNYF-GAKDLAS"></device>
|
||||
<device id="BDZ662J-Q5IXRE3-CEPHNIM-V5D76GA-U26VZRI-UHTDERX-UABQEQ3-CKE5CQD"></device>
|
||||
<device id="BP6X6FF-2SEA3QR-RW2Y55O-B4X7LR6-5EXGA6D-KM725CC-CIDD7ZZ-7VNKXAT"></device>
|
||||
<device id="CDUKUIK-56DOHM3-M7VHCL6-5HGTZHB-QULOOBK-3DK6QXC-4DDDXKS-H5OOZAQ"></device>
|
||||
<device id="CJRFL4X-PBD3H6K-2GCPPYS-JCZ73GF-SN63HMU-WGYMZQZ-W2AWEDG-AZ623QB"></device>
|
||||
<device id="DDGR3XN-CKDGXS4-VR6G2T3-GQJVO7E-XDZGWUJ-733J3G7-NMGQFS5-ETFWKA6"></device>
|
||||
<device id="DFAWRIU-3MSLR65-L7DZE53-UNR6KEX-FTJZJ2B-NVYEPQH-ZYDYU6D-ROAKFAJ"></device>
|
||||
<device id="DMFDJGV-UDOMXZU-FKKYXCT-BEGE7K7-OVAFMQW-NBLQ46C-J6VORML-27T3SA2"></device>
|
||||
<device id="DM4ZS3J-WF2WJGP-YNPTRRZ-MKUEDUC-7N5Q6EX-IHLR7DM-VHMP3FE-KXGHDAG"></device>
|
||||
<device id="DPJWHYQ-S7N7D4C-P4CY65M-ZJ6MFOD-OR5CF7N-2TYSQ6T-IZQDK67-NYDMCQR"></device>
|
||||
<device id="DYVI4HB-DO3WULH-YC4CVUV-KOHXVYH-MSRQH5O-P4JX5T4-WCVDJK2-QPWZPQQ"></device>
|
||||
<device id="EAELU3R-WFMEOHV-VUF3ZQU-T6QLRUQ-S7HFGGG-447PNIB-Z32VNM6-3XRG2QM"></device>
|
||||
<device id="EHYW5K4-WLONYO4-LVJFY7T-7Y6W7EA-N3O5ADS-NPXYFBA-TOSC3X2-J4BYNAV"></device>
|
||||
<device id="E3KQXYD-ST7GNHR-EQVEBUK-I7X4NFR-J6JEGSL-XSU3236-PXDEOYF-SIGVJQD"></device>
|
||||
<device id="FDN276E-J7AQ32B-Y4CBY3Q-TWCQ2RV-QQTQ5NJ-NLLFCGR-UOXSYQN-VLHSHAK"></device>
|
||||
<device id="FOUTNQ4-M3X4ZL6-VPG3ZD6-3EFUKTT-XVNS6N6-G2E56OA-LYBDCXY-S52OLQJ"></device>
|
||||
<device id="F3CJABZ-VOJPUJJ-A6V6J2O-PQF32IV-5VZY45B-ASPFXOE-OMZOBYX-IIVGJQR"></device>
|
||||
<device id="F722I3J-JNESQSZ-NWHMLX7-OX5YQPY-Q4P7AKS-RAVRYFO-LUFHTBF-MV5ZEAW"></device>
|
||||
<device id="GRCZC6E-PRZ5EPJ-5XZK2WL-K7PBC4D-EU7J5K3-YHME4GD-AKHMR3U-HTGE3AJ"></device>
|
||||
<device id="GTZORAO-4U2BMRT-NJ7VGJV-DEFE7X4-GK7GOKP-56NADQB-DKRY64G-GK4JSAN"></device>
|
||||
<device id="HHKXJNI-2FOFGMU-KT55UTA-J7CDGBX-KE3RMHK-NDVQYCB-DPUAFTN-C62LTAJ"></device>
|
||||
<device id="HOA63TU-7NASFTP-7JKRNDD-CEBCBTL-3UB6GEH-LE3TBF6-UHBJUX2-B53JYQO"></device>
|
||||
<device id="H4IJPSS-SGRGAHF-4HVWJXK-AQ2EV4C-EMWQWG4-63ORWGX-CTBONEL-3IO2VA2"></device>
|
||||
<device id="IEDANYN-UEK237R-Z6J75BD-3SK2RPX-66FJ2F2-T347HN3-KYY2PW5-47ZAIAE"></device>
|
||||
<device id="IHFBE2Q-VJKGLJN-PYEX3SL-ORFUM6B-GUDQVUN-32TZFZO-GMYDUFI-VBICSA3"></device>
|
||||
<device id="ITFMT23-LPJ4LN3-Y6HQUYG-GO47ZU7-PO6SUJ6-7UO2JNG-SKI5CTG-WJCK2AP"></device>
|
||||
<device id="IWWUWMQ-3KMUGCT-JJII2HN-J6NN6OW-43AM5TT-MHJXRCR-D7MJVWE-HLAD4QV"></device>
|
||||
<device id="IW5I76B-NKCHGJ5-HAQNOBW-LRHKYSH-54VEQQC-HQAXHVC-DX4ME3O-C7XGUAQ"></device>
|
||||
<device id="I2VEKOT-P63JCHG-LKHJAOJ-XL4VNH2-Y7WQGAW-JKQQQQY-QHIOB77-Q7WLYAW"></device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||
<device id="JWPMLRX-U5Q3LSU-LZJCM2Z-GIZLQO2-UBSZKUO-INZXYTZ-7E6PDK2-PPDQWA4"></device>
|
||||
<device id="KBBKAQ6-VINPVTG-LXSDXXF-T67N5DU-ONRJI3P-WIAZBY7-UCMAJJ4-274ZBA5"></device>
|
||||
<device id="KNOUKFP-JEJTAZO-FUI7KMO-4PQJOMB-CX255W3-U3SHR2I-LJ4LBHL-AY6BCA5"></device>
|
||||
<device id="KOUXI7Y-6PWCH2V-552D2Y4-6D3HQJZ-CEKC5IY-XVUV2KC-452GVYG-AJN4TQB"></device>
|
||||
<device id="K3Z2ESL-SKLK2UF-CLA2RLL-DWQVXMU-7PQQ5MV-2BWWSCI-MAOF7YR-277YSAL"></device>
|
||||
<device id="LW7D6SQ-3UIMICX-EEPGTNA-P6NNR2M-35WLLB5-QKW5KWV-3PGTWA3-QM65DA4"></device>
|
||||
<device id="LYYJOWF-WZWZ3Z6-O4JRBOM-44LPK33-YDB5EFZ-CTX2FNI-C5BNMOH-ZBREKAK"></device>
|
||||
<device id="L5GRUQX-3FPGQG6-CWJ3CQN-QJO7BFV-FDXYBXG-AKAZ465-3D37XCR-SHPHDQS"></device>
|
||||
<device id="L7L37HS-3STS6WA-G76FKTT-FAIFH4G-46EORMF-6PA5JFZ-RW4QJDM-SDSBIAM"></device>
|
||||
<device id="MNLYDAL-BTM5JJL-PYOD2C6-MWKXW5N-I7KGYQJ-TAJ2NQE-K2MI7C5-RIM7CA7"></device>
|
||||
<device id="NBO5E2S-OIKGPUN-AXQSTWG-YNSZTQD-YSQ3JGC-BAPD72J-5BUYGBC-4U75XQK"></device>
|
||||
<device id="NWKOEM4-T62TE6V-H4X75L2-GX75Q3C-XCA3OS7-X7MHSVV-IOS5V3U-6Y4IAAZ"></device>
|
||||
<device id="NZKXQ5V-GXMQEY4-77E6DUN-UJK5O3O-633AEIQ-2ZB6FJL-ZPT3AFY-IFMTVAS"></device>
|
||||
<device id="OYRDGZZ-BBBYTOG-PPOEKNF-RFOU5YJ-LOJXGDC-PZFHUMW-EHKAEGM-ZI5O6QX"></device>
|
||||
<device id="O5BUAKV-5PXZUBI-TGCAIHE-646RUCT-5KSKVSY-NMVEZNC-XWIDH4M-N4FWJQK"></device>
|
||||
<device id="PR7FK7P-O57IUUQ-L7NVOBM-BJ6B6HV-A4Q2ZM7-3ZB7YPB-2CEI225-VNE27QZ"></device>
|
||||
<device id="P74TJEI-WJADRBZ-J5ZWR3D-WNGTH7F-QNTKVBW-QJSYCKN-VSXWFFY-BFZXZQH"></device>
|
||||
<device id="QLQ4VDH-DCRQOCW-45ZU3NH-KGVNZ4K-RY5VH5M-E54B35V-3GDPECV-FATMQQN"></device>
|
||||
<device id="QTSMMK3-7LDPTPP-NFQJBKI-MXPNRLC-RBJ46YH-EUOIHQX-X5OBHY5-DSEHQAB"></device>
|
||||
<device id="RNEPSQ3-XBYBJX2-DC63XIJ-KOV6OWX-3RBQ245-SDO5NPG-7DNYUIJ-V3BVBAR"></device>
|
||||
<device id="RSPOEAQ-WEOFVAO-7F5OHVV-FPEK2XA-KG23ACJ-ISANINA-EVNGPFM-6GUGUAY"></device>
|
||||
<device id="SMFBW3X-ZG7GEVQ-Q5EY2HV-QTTJ2AK-CMKRW7K-4ZYN5KF-D5LHXBA-J2WJPQA"></device>
|
||||
<device id="SRC4EBU-57YZZF2-U72JRON-LWMXERT-NL3R4YY-7T3H6FI-VPK7KMQ-Q7LGBQE"></device>
|
||||
<device id="SVUEDXK-GOM6UEO-BK725GI-4R4GRA2-SL6EJQ7-2TQZDF4-IQEM34P-P32HLAN"></device>
|
||||
<device id="S257JIU-2LMK7R6-J4EPHQE-DHBSA2V-5S45UV5-NQNI6G4-UUPXTWK-5L645AB"></device>
|
||||
<device id="S475AJS-UF2H3WA-ZKSFH7J-YJC6Q4P-L6O7NDM-Q7LJKVE-CQ2DTVS-N6TY6QJ"></device>
|
||||
<device id="TE3RAAB-2F65ZF2-MHDELWZ-YJHF72E-5S7HGWF-NJ6CKZK-3LJHQ72-YGOPOAR"></device>
|
||||
<device id="TJEEOEJ-ADKANZG-T3RYCWO-FYJW3GQ-TX7FDLE-I3X3QLX-TQ37HW4-DQQZEAK"></device>
|
||||
<device id="TN64K4J-D7S33RD-55TMUBA-D2VKVRW-FAPQRGY-2VXAMMZ-F2RBYC7-DJAXFAI"></device>
|
||||
<device id="TP4ORAM-C2K6L3Z-3SFLGQF-NILNVZQ-3VSHEGT-LLKQU7K-BV66VQZ-TP65EAS"></device>
|
||||
<device id="T345SQH-CZNXG3Q-WVMQDHY-S524RKJ-C3AP767-QTYGY3X-GASFDCK-LURRWAC"></device>
|
||||
<device id="UMMN7QY-46JHZ4X-UJ5TWWV-EU6OKDA-4GM76QK-UUJJLPH-27H6P22-XQALWQ6"></device>
|
||||
<device id="UZPBG42-GSUBWXD-ALOS27M-AQAWWWJ-XP5DA46-3GWEZ4U-I5I2JAZ-5VPHYQI"></device>
|
||||
<device id="VU764LI-72YK4KV-APLQAXJ-4Q46MTT-FCQFB33-JTCH4E5-G7OJRFI-YGPJYAJ"></device>
|
||||
<device id="WDGE4EI-CO3MSS6-JM5ASRP-YHBCKBN-EWAT5KQ-GGQQTZE-OSDTGH2-P2BE3AA"></device>
|
||||
<device id="WGURZJA-JLZYF3P-KRQDIAL-6RGUYSZ-UZEJ3DO-U6JOGTO-PYQRA7K-NOEFSQ2"></device>
|
||||
<device id="WLTEXSS-RUQSDSJ-FWYVE4T-S25G37F-4XPT7GI-VLBSHCS-SLHXBEF-QSKCQAK"></device>
|
||||
<device id="WNK25O3-CDDKOM6-VDJKB2C-GZ6L6GA-HI7WHXU-G2SCLLF-EEDJX6N-AYY5OQM"></device>
|
||||
<device id="WTA46HO-WMIMK2N-GMNLWSQ-UOZI2O7-JVG3YY2-FWXQ3NJ-NWG5PLX-RN26AAG"></device>
|
||||
<device id="XG6FGXG-CCB54FX-AVREF7W-YMR66ED-66H4QQD-KXK5K7C-DVVYKU2-GF77LQK"></device>
|
||||
<device id="XSWE7HJ-AHTZGEZ-UNH6ZSK-WZHRS2C-55LQZDR-7GLSE6Z-VWEDFWT-ZS4DCAK"></device>
|
||||
<device id="XXXMH6A-3J53GPW-7BVBR76-5UJNW3E-6H2ACDZ-DY46Z6T-FD7GRPE-RGIQFAE"></device>
|
||||
<device id="X2J2EIR-6MCQUSZ-DZU37TI-Y5S4QUN-JPTDKHT-ANBGXU5-YRD3EGF-6VOEHQG"></device>
|
||||
<device id="X3WHUR4-N5BSQ4T-YFC3A3S-KI6KXQ2-6GL66YV-G2YGVKY-LEKVIXY-M62C6A7"></device>
|
||||
<device id="X4EX7JM-CQSNCZ5-H5UNVUW-RNWT4U7-VPFKPB4-P65IWZC-N74SCAM-RLLFUAT"></device>
|
||||
<device id="YEKECF3-5YAC2EZ-ADYLARL-THNGWI3-ZFU6UNK-3KCFZWB-IRYFH6X-LO5ZHQC"></device>
|
||||
<device id="YP66HGG-CIWZ6N2-A3T4PXT-KP47YWK-3ZVW5UX-MGTZRHL-YNB22IK-IEEWNAY"></device>
|
||||
<device id="YRG374E-AB2PK5W-UKR2MOA-E43VSQF-ICO72FN-FDZDX7F-Q75UE2H-EUCIFQJ"></device>
|
||||
<device id="ZCZPFDT-GHJKTPJ-22WKCWP-2UO7X33-NUD2YAK-P6MDHN6-2JVAETG-JPXXLAR"></device>
|
||||
<device id="2MDVGTI-EGVOHIO-SPFMLV6-NRFP2EC-HWMLWVO-QVLLTC3-DSQPHMY-JDEIQQ5"></device>
|
||||
<device id="22UBH4D-HGCP3EW-EHVJTZ4-6PB2HOB-LDPOC3X-ONY6PA6-2DFNBIX-D3X5UQT"></device>
|
||||
<device id="3OQ6R42-O76KQJM-JYN7EGL-PWKZUPG-276FWG2-CP3CTV3-SM545L3-KTTH3AG"></device>
|
||||
<device id="3QW353X-J52M5LW-3DUY2Z5-SG5JU7Z-6NSJFAM-74YPGMA-VHL2W3N-5PX2JQO"></device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||
<device id="4IB56JJ-IXYAHRI-6W75NTN-TEIMPRE-25NAG6F-MYGBJBG-JB33DXY-L2WINQK"></device>
|
||||
<device id="4JSTGIZ-Q2UJXIG-V6QT62A-FSRBKLF-J6FWGIY-PFIAFC3-7GP7DOY-RVNGKQU"></device>
|
||||
<device id="5BKVB65-I2E5UCX-DWWPLNP-IQ7VRZY-I3P42KF-ZKAF7XO-TQTCLVU-TH45GA3"></device>
|
||||
<device id="5OOL3EO-A5DTJ5N-CZHPYVP-YH74NL4-DE3O7AU-ZDFSM6D-KG5IUWA-WJJXBQL"></device>
|
||||
<device id="5RXIYVZ-NOEGGYD-TY4WQTH-U6FQ5IR-LJTW3P3-HFQBN6C-RC4AB5V-MO44PQV"></device>
|
||||
<device id="5WGZMYP-FNTWGLG-PQKEB5V-PJ4LEPL-53SRHBR-HHSHHLD-5KFL3P2-W6MTRQY"></device>
|
||||
<device id="6I5KLVE-VVRXZ55-Z3IQIZH-3FBHKTZ-6MIPAYO-V4QX6K7-7J432VD-T5TY6AU"></device>
|
||||
<device id="6R2BAIE-VRYZJXA-ZKJT4ZK-BQTXA46-VBWMLNY-PINTWYG-TS4Y6GZ-7Z2KVQG"></device>
|
||||
<device id="7BT3IUI-RYS757D-YJPBP6U-WPL2ZI6-CMICBLW-UDYDSVT-NG62TB5-PM2O3A6"></device>
|
||||
<device id="7TVCUYX-MLZH6GF-GDLVMJ6-REPXXUD-DCLPXP2-67HS7VR-QTTABOA-4ZXJOQD"></device>
|
||||
<versioning></versioning>
|
||||
<versioning type="staggered"></versioning>
|
||||
<lenientMtimes>false</lenientMtimes>
|
||||
<copiers>1</copiers>
|
||||
<pullers>16</pullers>
|
||||
@@ -124,306 +27,15 @@
|
||||
<pullers>16</pullers>
|
||||
<finishers>1</finishers>
|
||||
</folder>
|
||||
<device id="AF2HXQA-DOKKIMI-PKOG4RE-E25UTJ7-PSGQ7T5-WEY7YT5-SG6N7W5-NNA4BQM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AND7GW6-DZN66F2-TKYJSTC-ACI7MYT-75X4T63-SE5S4MQ-GBSDHDL-4CLHJA6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ATFGYHM-ZTA5Z7B-NYPY4OK-VJWDD6Y-SLCDKED-L2VDESD-TUBLMLH-CHAJEAI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AUI2JXT-PXK3PMB-YATDUNI-RIGBTKQ-VBUJLUH-JLWZEEZ-3UV5X2G-AWKOVQH" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="AZCVBDG-6GATBA3-XX7HOH3-LY5R3L2-SPCQOIT-KFVLUCC-GQ3KJTJ-ZSP3RAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="A4W7JO6-IM2ZWBX-AYYLCEP-NFTJKQV-2QRGZJ4-QGSP3HI-R7UHNYF-GAKDLAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="BDZ662J-Q5IXRE3-CEPHNIM-V5D76GA-U26VZRI-UHTDERX-UABQEQ3-CKE5CQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="BP6X6FF-2SEA3QR-RW2Y55O-B4X7LR6-5EXGA6D-KM725CC-CIDD7ZZ-7VNKXAT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="CDUKUIK-56DOHM3-M7VHCL6-5HGTZHB-QULOOBK-3DK6QXC-4DDDXKS-H5OOZAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="CJRFL4X-PBD3H6K-2GCPPYS-JCZ73GF-SN63HMU-WGYMZQZ-W2AWEDG-AZ623QB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DDGR3XN-CKDGXS4-VR6G2T3-GQJVO7E-XDZGWUJ-733J3G7-NMGQFS5-ETFWKA6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DFAWRIU-3MSLR65-L7DZE53-UNR6KEX-FTJZJ2B-NVYEPQH-ZYDYU6D-ROAKFAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DMFDJGV-UDOMXZU-FKKYXCT-BEGE7K7-OVAFMQW-NBLQ46C-J6VORML-27T3SA2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DM4ZS3J-WF2WJGP-YNPTRRZ-MKUEDUC-7N5Q6EX-IHLR7DM-VHMP3FE-KXGHDAG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DPJWHYQ-S7N7D4C-P4CY65M-ZJ6MFOD-OR5CF7N-2TYSQ6T-IZQDK67-NYDMCQR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="DYVI4HB-DO3WULH-YC4CVUV-KOHXVYH-MSRQH5O-P4JX5T4-WCVDJK2-QPWZPQQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="EAELU3R-WFMEOHV-VUF3ZQU-T6QLRUQ-S7HFGGG-447PNIB-Z32VNM6-3XRG2QM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="EHYW5K4-WLONYO4-LVJFY7T-7Y6W7EA-N3O5ADS-NPXYFBA-TOSC3X2-J4BYNAV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="E3KQXYD-ST7GNHR-EQVEBUK-I7X4NFR-J6JEGSL-XSU3236-PXDEOYF-SIGVJQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="FDN276E-J7AQ32B-Y4CBY3Q-TWCQ2RV-QQTQ5NJ-NLLFCGR-UOXSYQN-VLHSHAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="FOUTNQ4-M3X4ZL6-VPG3ZD6-3EFUKTT-XVNS6N6-G2E56OA-LYBDCXY-S52OLQJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="F3CJABZ-VOJPUJJ-A6V6J2O-PQF32IV-5VZY45B-ASPFXOE-OMZOBYX-IIVGJQR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="F722I3J-JNESQSZ-NWHMLX7-OX5YQPY-Q4P7AKS-RAVRYFO-LUFHTBF-MV5ZEAW" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="GRCZC6E-PRZ5EPJ-5XZK2WL-K7PBC4D-EU7J5K3-YHME4GD-AKHMR3U-HTGE3AJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="GTZORAO-4U2BMRT-NJ7VGJV-DEFE7X4-GK7GOKP-56NADQB-DKRY64G-GK4JSAN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="HHKXJNI-2FOFGMU-KT55UTA-J7CDGBX-KE3RMHK-NDVQYCB-DPUAFTN-C62LTAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="HOA63TU-7NASFTP-7JKRNDD-CEBCBTL-3UB6GEH-LE3TBF6-UHBJUX2-B53JYQO" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="H4IJPSS-SGRGAHF-4HVWJXK-AQ2EV4C-EMWQWG4-63ORWGX-CTBONEL-3IO2VA2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IEDANYN-UEK237R-Z6J75BD-3SK2RPX-66FJ2F2-T347HN3-KYY2PW5-47ZAIAE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IHFBE2Q-VJKGLJN-PYEX3SL-ORFUM6B-GUDQVUN-32TZFZO-GMYDUFI-VBICSA3" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ITFMT23-LPJ4LN3-Y6HQUYG-GO47ZU7-PO6SUJ6-7UO2JNG-SKI5CTG-WJCK2AP" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IWWUWMQ-3KMUGCT-JJII2HN-J6NN6OW-43AM5TT-MHJXRCR-D7MJVWE-HLAD4QV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="IW5I76B-NKCHGJ5-HAQNOBW-LRHKYSH-54VEQQC-HQAXHVC-DX4ME3O-C7XGUAQ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="I2VEKOT-P63JCHG-LKHJAOJ-XL4VNH2-Y7WQGAW-JKQQQQY-QHIOB77-Q7WLYAW" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</device>
|
||||
<device id="JWPMLRX-U5Q3LSU-LZJCM2Z-GIZLQO2-UBSZKUO-INZXYTZ-7E6PDK2-PPDQWA4" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KBBKAQ6-VINPVTG-LXSDXXF-T67N5DU-ONRJI3P-WIAZBY7-UCMAJJ4-274ZBA5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KNOUKFP-JEJTAZO-FUI7KMO-4PQJOMB-CX255W3-U3SHR2I-LJ4LBHL-AY6BCA5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="KOUXI7Y-6PWCH2V-552D2Y4-6D3HQJZ-CEKC5IY-XVUV2KC-452GVYG-AJN4TQB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="K3Z2ESL-SKLK2UF-CLA2RLL-DWQVXMU-7PQQ5MV-2BWWSCI-MAOF7YR-277YSAL" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="LW7D6SQ-3UIMICX-EEPGTNA-P6NNR2M-35WLLB5-QKW5KWV-3PGTWA3-QM65DA4" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="LYYJOWF-WZWZ3Z6-O4JRBOM-44LPK33-YDB5EFZ-CTX2FNI-C5BNMOH-ZBREKAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="L5GRUQX-3FPGQG6-CWJ3CQN-QJO7BFV-FDXYBXG-AKAZ465-3D37XCR-SHPHDQS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="L7L37HS-3STS6WA-G76FKTT-FAIFH4G-46EORMF-6PA5JFZ-RW4QJDM-SDSBIAM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="MNLYDAL-BTM5JJL-PYOD2C6-MWKXW5N-I7KGYQJ-TAJ2NQE-K2MI7C5-RIM7CA7" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NBO5E2S-OIKGPUN-AXQSTWG-YNSZTQD-YSQ3JGC-BAPD72J-5BUYGBC-4U75XQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NWKOEM4-T62TE6V-H4X75L2-GX75Q3C-XCA3OS7-X7MHSVV-IOS5V3U-6Y4IAAZ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="NZKXQ5V-GXMQEY4-77E6DUN-UJK5O3O-633AEIQ-2ZB6FJL-ZPT3AFY-IFMTVAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="OYRDGZZ-BBBYTOG-PPOEKNF-RFOU5YJ-LOJXGDC-PZFHUMW-EHKAEGM-ZI5O6QX" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="O5BUAKV-5PXZUBI-TGCAIHE-646RUCT-5KSKVSY-NMVEZNC-XWIDH4M-N4FWJQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="PR7FK7P-O57IUUQ-L7NVOBM-BJ6B6HV-A4Q2ZM7-3ZB7YPB-2CEI225-VNE27QZ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="P74TJEI-WJADRBZ-J5ZWR3D-WNGTH7F-QNTKVBW-QJSYCKN-VSXWFFY-BFZXZQH" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="QLQ4VDH-DCRQOCW-45ZU3NH-KGVNZ4K-RY5VH5M-E54B35V-3GDPECV-FATMQQN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="QTSMMK3-7LDPTPP-NFQJBKI-MXPNRLC-RBJ46YH-EUOIHQX-X5OBHY5-DSEHQAB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="RNEPSQ3-XBYBJX2-DC63XIJ-KOV6OWX-3RBQ245-SDO5NPG-7DNYUIJ-V3BVBAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="RSPOEAQ-WEOFVAO-7F5OHVV-FPEK2XA-KG23ACJ-ISANINA-EVNGPFM-6GUGUAY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SMFBW3X-ZG7GEVQ-Q5EY2HV-QTTJ2AK-CMKRW7K-4ZYN5KF-D5LHXBA-J2WJPQA" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SRC4EBU-57YZZF2-U72JRON-LWMXERT-NL3R4YY-7T3H6FI-VPK7KMQ-Q7LGBQE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="SVUEDXK-GOM6UEO-BK725GI-4R4GRA2-SL6EJQ7-2TQZDF4-IQEM34P-P32HLAN" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="S257JIU-2LMK7R6-J4EPHQE-DHBSA2V-5S45UV5-NQNI6G4-UUPXTWK-5L645AB" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="S475AJS-UF2H3WA-ZKSFH7J-YJC6Q4P-L6O7NDM-Q7LJKVE-CQ2DTVS-N6TY6QJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TE3RAAB-2F65ZF2-MHDELWZ-YJHF72E-5S7HGWF-NJ6CKZK-3LJHQ72-YGOPOAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TJEEOEJ-ADKANZG-T3RYCWO-FYJW3GQ-TX7FDLE-I3X3QLX-TQ37HW4-DQQZEAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TN64K4J-D7S33RD-55TMUBA-D2VKVRW-FAPQRGY-2VXAMMZ-F2RBYC7-DJAXFAI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="TP4ORAM-C2K6L3Z-3SFLGQF-NILNVZQ-3VSHEGT-LLKQU7K-BV66VQZ-TP65EAS" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="T345SQH-CZNXG3Q-WVMQDHY-S524RKJ-C3AP767-QTYGY3X-GASFDCK-LURRWAC" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="UMMN7QY-46JHZ4X-UJ5TWWV-EU6OKDA-4GM76QK-UUJJLPH-27H6P22-XQALWQ6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="UZPBG42-GSUBWXD-ALOS27M-AQAWWWJ-XP5DA46-3GWEZ4U-I5I2JAZ-5VPHYQI" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="VU764LI-72YK4KV-APLQAXJ-4Q46MTT-FCQFB33-JTCH4E5-G7OJRFI-YGPJYAJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WDGE4EI-CO3MSS6-JM5ASRP-YHBCKBN-EWAT5KQ-GGQQTZE-OSDTGH2-P2BE3AA" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WGURZJA-JLZYF3P-KRQDIAL-6RGUYSZ-UZEJ3DO-U6JOGTO-PYQRA7K-NOEFSQ2" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WLTEXSS-RUQSDSJ-FWYVE4T-S25G37F-4XPT7GI-VLBSHCS-SLHXBEF-QSKCQAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WNK25O3-CDDKOM6-VDJKB2C-GZ6L6GA-HI7WHXU-G2SCLLF-EEDJX6N-AYY5OQM" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="WTA46HO-WMIMK2N-GMNLWSQ-UOZI2O7-JVG3YY2-FWXQ3NJ-NWG5PLX-RN26AAG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XG6FGXG-CCB54FX-AVREF7W-YMR66ED-66H4QQD-KXK5K7C-DVVYKU2-GF77LQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XSWE7HJ-AHTZGEZ-UNH6ZSK-WZHRS2C-55LQZDR-7GLSE6Z-VWEDFWT-ZS4DCAK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="XXXMH6A-3J53GPW-7BVBR76-5UJNW3E-6H2ACDZ-DY46Z6T-FD7GRPE-RGIQFAE" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X2J2EIR-6MCQUSZ-DZU37TI-Y5S4QUN-JPTDKHT-ANBGXU5-YRD3EGF-6VOEHQG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X3WHUR4-N5BSQ4T-YFC3A3S-KI6KXQ2-6GL66YV-G2YGVKY-LEKVIXY-M62C6A7" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="X4EX7JM-CQSNCZ5-H5UNVUW-RNWT4U7-VPFKPB4-P65IWZC-N74SCAM-RLLFUAT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YEKECF3-5YAC2EZ-ADYLARL-THNGWI3-ZFU6UNK-3KCFZWB-IRYFH6X-LO5ZHQC" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YP66HGG-CIWZ6N2-A3T4PXT-KP47YWK-3ZVW5UX-MGTZRHL-YNB22IK-IEEWNAY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="YRG374E-AB2PK5W-UKR2MOA-E43VSQF-ICO72FN-FDZDX7F-Q75UE2H-EUCIFQJ" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="ZCZPFDT-GHJKTPJ-22WKCWP-2UO7X33-NUD2YAK-P6MDHN6-2JVAETG-JPXXLAR" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="2MDVGTI-EGVOHIO-SPFMLV6-NRFP2EC-HWMLWVO-QVLLTC3-DSQPHMY-JDEIQQ5" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="22UBH4D-HGCP3EW-EHVJTZ4-6PB2HOB-LDPOC3X-ONY6PA6-2DFNBIX-D3X5UQT" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="3OQ6R42-O76KQJM-JYN7EGL-PWKZUPG-276FWG2-CP3CTV3-SM545L3-KTTH3AG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="3QW353X-J52M5LW-3DUY2Z5-SG5JU7Z-6NSJFAM-74YPGMA-VHL2W3N-5PX2JQO" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</device>
|
||||
<device id="4IB56JJ-IXYAHRI-6W75NTN-TEIMPRE-25NAG6F-MYGBJBG-JB33DXY-L2WINQK" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="4JSTGIZ-Q2UJXIG-V6QT62A-FSRBKLF-J6FWGIY-PFIAFC3-7GP7DOY-RVNGKQU" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5BKVB65-I2E5UCX-DWWPLNP-IQ7VRZY-I3P42KF-ZKAF7XO-TQTCLVU-TH45GA3" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5OOL3EO-A5DTJ5N-CZHPYVP-YH74NL4-DE3O7AU-ZDFSM6D-KG5IUWA-WJJXBQL" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5RXIYVZ-NOEGGYD-TY4WQTH-U6FQ5IR-LJTW3P3-HFQBN6C-RC4AB5V-MO44PQV" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="5WGZMYP-FNTWGLG-PQKEB5V-PJ4LEPL-53SRHBR-HHSHHLD-5KFL3P2-W6MTRQY" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="6I5KLVE-VVRXZ55-Z3IQIZH-3FBHKTZ-6MIPAYO-V4QX6K7-7J432VD-T5TY6AU" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="6R2BAIE-VRYZJXA-ZKJT4ZK-BQTXA46-VBWMLNY-PINTWYG-TS4Y6GZ-7Z2KVQG" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="7BT3IUI-RYS757D-YJPBP6U-WPL2ZI6-CMICBLW-UDYDSVT-NG62TB5-PM2O3A6" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="7TVCUYX-MLZH6GF-GDLVMJ6-REPXXUD-DCLPXP2-67HS7VR-QTTABOA-4ZXJOQD" compression="false" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8082</address>
|
||||
<apikey>abc123</apikey>
|
||||
@@ -439,7 +51,7 @@
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<upnpLeaseMinutes>0</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>30</upnpRenewalMinutes>
|
||||
<urAccepted>-1</urAccepted>
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID3jCCAkigAwIBAgIBADALBgkqhkiG9w0BAQswFDESMBAGA1UEAxMJc3luY3Ro
|
||||
aW5nMB4XDTE0MDUxMDAwNTM0N1oXDTQ5MTIzMTIzNTk1OVowFDESMBAGA1UEAxMJ
|
||||
c3luY3RoaW5nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA9MRyBtAr
|
||||
Sjt29azNoCWxx5xZF3RodBcQu+wv5sRR8lWozrr4brfUJLslcQHowqaAprOU1NP+
|
||||
BH12P5CSymsUrwAmCwSQ54CimXrNi5RiNMl7dtInJksk4Kp6nJgfyR7TqeQgqxtv
|
||||
+skVWdJY7ptxqpVuDfkf1JnNr68dbANw8hEJpPaGm3qOt81YvSg37R75HiOCzv+h
|
||||
FcSjKpPyFMvPARMCOHuZS0fYRJtI5nwmR0mWtKfnH/2204YNiQUne/8h2fgtkpxy
|
||||
OjxKOs2KJxbmpV6Uur/YyGyinb5+Aa0df3KCBuZmE+i/AsZcTsk0fgefe+bshWG/
|
||||
hzrNfV0wsX3TYjYOSBJ04+f/uQW00G1GGSxPwTsShGqVuwfJkTqkjAXX5wcH+PgJ
|
||||
ewG/dyMzKklMg19Y65WkhpWa/19o2KSZNw6TO8YM1arwT0STcMc+4fdrVB09lX6q
|
||||
NJA8UL8hUX+jbKBzatDY64h1d9E8PE0ODHYgYFO2Ko7e2GnWCQeijGmnAgMBAAGj
|
||||
PzA9MA4GA1UdDwEB/wQEAwIAoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggGBANFiHcATP5Lm11o65wbh
|
||||
sKk7yteTapRohMoLNdW44YNyM8ZkELnrdNY8pe3CWSGy3spBH01+4jbUT+gSltQr
|
||||
KTLVxSZ7f91696Og5ag4BQCeFY6ghKD/G9+PlBSj6yb3Y98NZsx8huLfylH+XuJw
|
||||
2gP5Nqov4uXaKgYylx2gdaeCb2M+wM/br1DO2HCPCmgbZE5g8RM5JxzojGn/41Le
|
||||
IbCd39zdI6NKj9c7T1Bxmt20uzca4nRgXVVzJymedEoF+//sBRk6PQzqgjgn/r3S
|
||||
h9vrqo5j8ly/+ojFjBaVY7gq2XHM6/q0LTjeKkv2MUQw+vEEZX65GpBOgBZ8U0Wb
|
||||
/NMUUhhDjGE/0G6TCJgq/HdkjmsNaWjO5sWjhnwXNImYXBdH4OenhXIrHcLhcnxN
|
||||
2n5sPkDc6n0LVVV7VAjBPXcTmu2uOSK02yqNZLLWJygp1Wl6lbiqLS3bJgYrUv2m
|
||||
YkRaR+IqVPw5EPs/QlH0qLBeCyIasaSWUVZeitVwRmqIUA==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,61 +0,0 @@
|
||||
<configuration version="7">
|
||||
<folder id="unique" path="s4" ro="false" rescanIntervalS="60" ignorePerms="false">
|
||||
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"></device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU"></device>
|
||||
<versioning></versioning>
|
||||
<lenientMtimes>false</lenientMtimes>
|
||||
<copiers>1</copiers>
|
||||
<pullers>16</pullers>
|
||||
<finishers>1</finishers>
|
||||
</folder>
|
||||
<folder id="default" path="s4d" ro="false" rescanIntervalS="60" ignorePerms="false">
|
||||
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK"></device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"></device>
|
||||
<versioning></versioning>
|
||||
<lenientMtimes>false</lenientMtimes>
|
||||
<copiers>1</copiers>
|
||||
<pullers>16</pullers>
|
||||
<finishers>1</finishers>
|
||||
</folder>
|
||||
<device id="EJHMPAQ-OGCVORE-ISB4IS3-SYYVJXF-TKJGLTU-66DIQPF-GJ5D2GX-GQ3OWQK" name="s4" compression="true" introducer="false">
|
||||
<address>dynamic</address>
|
||||
</device>
|
||||
<device id="I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU" name="s1" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</device>
|
||||
<device id="JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU" name="s2" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</device>
|
||||
<device id="373HSRP-QLPNLIE-JYKZVQF-P4PKZ63-R2ZE6K3-YD442U2-JHBGBQG-WWXAHAU" name="s3" compression="true" introducer="false">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</device>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8084</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>:22004</listenAddress>
|
||||
<globalAnnounceServer>udp4://announce.syncthing.net:22026</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>false</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<localAnnounceMCAddr>[ff32::5222]:21026</localAnnounceMCAddr>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<reconnectionIntervalS>10</reconnectionIntervalS>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
<upnpLeaseMinutes>0</upnpLeaseMinutes>
|
||||
<upnpRenewalMinutes>30</upnpRenewalMinutes>
|
||||
<urAccepted>-1</urAccepted>
|
||||
<urUniqueID></urUniqueID>
|
||||
<restartOnWakeup>true</restartOnWakeup>
|
||||
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
|
||||
<keepTemporariesH>24</keepTemporariesH>
|
||||
<cacheIgnoredFiles>true</cacheIgnoredFiles>
|
||||
<progressUpdateIntervalS>5</progressUpdateIntervalS>
|
||||
<symlinksEnabled>true</symlinksEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
@@ -1,23 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID5TCCAk+gAwIBAgIISAohRYcPi4cwCwYJKoZIhvcNAQELMBQxEjAQBgNVBAMT
|
||||
CXN5bmN0aGluZzAeFw0xNDA5MTQyMjI0MTBaFw00OTEyMzEyMzU5NTlaMBQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGB
|
||||
ANqeoSRjMEnjCOTO6yAaAl3XeV3nAYlC1MK2qPPIBo4NuY8ilEXM0Q7BL1ux8f+k
|
||||
V7VPRL2QBizRai7/DqdsXdVQGPY38p4MKpIyp/PXQcPxLyIgZXPE8OQqX9sBltKV
|
||||
Xjbe9aJbiGnFrLeQye10DBkq+2UXCMeNX06KjVPaxNf7O4fdowgfYu28QsyMb1lw
|
||||
g72ve501lKvtjEobBN2+NAxm1vBKLVU9onbU6ewGZBMHtap9qE+gDulkS419U62p
|
||||
79HG/xvBcazWIGSWw9AsHNO6GOtvsjb1U2m2KUHqFAIKmmNPuiy/RupXHoHpgiec
|
||||
r+sZHx3OFL66gdnZWNRTMcDUrpflD8f/Mp6gba2+1CM2RB3bIzNaEiYbgippH5FM
|
||||
/Hpa9q931+Rucll+NMwPb3ORTG8XSAB7DoNqudixid1S+Grc1bqF2dq9ZzJUwIqH
|
||||
uJBtshb1U/p8t7I1pEOTGnWs1b/tasDOIHiHx1AWCKa8mMLzqqx6d9IuzUI5vcql
|
||||
xwIDAQABoz8wPTAOBgNVHQ8BAf8EBAMCAKAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4IBgQCYFBRPZqpc
|
||||
1+A7CpfcYezH4/9Cd9yUMeYBFebGnB9NEsp6Q/BaLAHBaeDfxjed45etOH3WaYBJ
|
||||
IUru+bAuk8xKitYmpXi4AByPmYp3pI1gsldqYSZWS6ivpdGD/t4Yd6nOxNZokOOq
|
||||
Ygxdg7WCu1d5fgvReZ3GbBlRn6+uym7ZZuTcpgOqbdeMY3EFKwoBR5LtW9iaDeZ5
|
||||
gQXvTRk276TgFZuh9GPYv4iD3F3iAlDHdA8sQz4mUnYHxYrWoSViy7TOPZbB/NP0
|
||||
eAN5d4s1BhESNzDX3ODR47VL7IyZtVqbNURPdw0BVMnC7m1iKTsSAsh+b4zShNqg
|
||||
RNjBtv3Kew5BKP79qUZL5v/WSBgIl1RIko/jp+RwlghoFJGl5ku2A0syZLgUBor1
|
||||
VaTfL9Uv/fSYUscBnAd/2Yt+Gxm3Ng48I24FkXdL4ZWVP76+63ugoEGvgAiRTHBC
|
||||
Lls7vBQS7a/3TVayhogADkUo6XgZE/FfNTqLeLKoljBLLyYs3CIHE8s=
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5AIBAAKCAYEA2p6hJGMwSeMI5M7rIBoCXdd5XecBiULUwrao88gGjg25jyKU
|
||||
RczRDsEvW7Hx/6RXtU9EvZAGLNFqLv8Op2xd1VAY9jfyngwqkjKn89dBw/EvIiBl
|
||||
c8Tw5Cpf2wGW0pVeNt71oluIacWst5DJ7XQMGSr7ZRcIx41fToqNU9rE1/s7h92j
|
||||
CB9i7bxCzIxvWXCDva97nTWUq+2MShsE3b40DGbW8EotVT2idtTp7AZkEwe1qn2o
|
||||
T6AO6WRLjX1Tranv0cb/G8FxrNYgZJbD0Cwc07oY62+yNvVTabYpQeoUAgqaY0+6
|
||||
LL9G6lcegemCJ5yv6xkfHc4UvrqB2dlY1FMxwNSul+UPx/8ynqBtrb7UIzZEHdsj
|
||||
M1oSJhuCKmkfkUz8elr2r3fX5G5yWX40zA9vc5FMbxdIAHsOg2q52LGJ3VL4atzV
|
||||
uoXZ2r1nMlTAioe4kG2yFvVT+ny3sjWkQ5MadazVv+1qwM4geIfHUBYIpryYwvOq
|
||||
rHp30i7NQjm9yqXHAgMBAAECggGBAMeWuxc1VwidtajvH8oW9MInzi3kkIp38TYy
|
||||
/NxTaWiXLyl2MFfpPZNy24GjW4RAzbJBxEgsDPct2Ps+8Gn5jVEJ50Aio+WWxebj
|
||||
SGJdyzTQJG/Lk9O1oRcteIXBVai7pWAC/c5UMp4eUijkjvWyVLlFfG42MVW9w504
|
||||
8P31ZHCqdRb9SbJItVDF51ZHgADvr9alNv23xRuRq9qcAD1RQMNxwBlwHyMLOh+z
|
||||
EjzhOMwG5dvZDKhlQDfj0PZDzPlnglGRIshyKyvogyP3tYSmZ3V9SUvS2gM+sPw+
|
||||
s7htKtxmiW91OuJ9v6ADDLax7CprkEDyd8OdlcdCANX2goB+F6kzXQH+6eGYVE2N
|
||||
PzYPkbzyc7i6kMgjOgScuEO1D/c7ILHjxwEyTfZtqa+gvfZ7AHTpTCUtPdXxfi5j
|
||||
9/LMk9+W8rFxMPIjOl16K2QyUzu2ym56TOD6qYqIX2qrNV/BByn6NjF2Tl15jnPp
|
||||
e0oi0R0d8qwfPzZBOXpXsTdvDOJ4QQKBwQDbLolatR2k4bj+62W/tp0LJmQBAeZF
|
||||
tjArlab/C0+t7vT8AM3az7ESOAp8cY8li/0O2dFdEH5XtdpnDAEfMrUr2Uc3uHf0
|
||||
QXwJuH4V4mNE1vTH3pA1qE7/7IElO9pByP0rVLCQhvA19uUQhHH37iQFPfg575PY
|
||||
uyJlOzTXr4gGX33DVvHOJcMIN5lwWDAnlonkpG8o0lPP290IsQHU7TGogc++wXkA
|
||||
6kf5VdVoOXEYEAHTf84ZqQephV/kOwsMRz0CgcEA/1frW9h0oB8xxmxPmdaONiiT
|
||||
F7JDpmJo67RYRAe5pHAZRyN57Wbu880lx2H2PcMW9hzmfd48di7BbbRr+bg5Uxy3
|
||||
ai/CgjtgpCpjeqJ1IMJMPdzE0S3m/N/YY2vTci0xQwZw9Fq2AwgirDrXTQ6/Ood9
|
||||
W43mL0hx4tKydy6LhppnsJO+MESm/hx6Ew0QngwYKPEohobnot4DaoZyzoflIEVG
|
||||
yOEwRSUAUPOmuRQZC+w496jMxTObyShMrpp9rpFTAoHAPGZam5CFlsZNQJKF+4rL
|
||||
RCNUM6LeXh+SrrAS0P3A+2F6SWe/UqkhVq/y09BHbkVhexIzS74b0vfeM79vH7XN
|
||||
j0PVCFnhVIInOFaLCGTWjkXeNqXyf5beDlCSVjxkLPTCL4qrDWjiETz0atTUw0nw
|
||||
yzEEkpKe337SP6tNKJLKnVb7RTVUdUaatEz+D6N9wasOXN+jclBjoEgqZRbCNncW
|
||||
1CTRpvOR8Nqe8urgYFRUAhmHJ0108kVOQzzp6+8JYFzRAoHBAIkTO6gMpV8oH+Jz
|
||||
VrAxPBra4UwBSMvTXJvcLt4mf4RFIWzNILFPZsu+v58vea9iQbtRfHLpkO+o3fH0
|
||||
v1pJiYySh+wbQ4ICOjknAExfVh2F8MPs9kONLsllqZaF1fcfR6jBlnW3FKq//U0U
|
||||
MWyOlB3pimRR4tZTP8ASd/f/JqvVzABA8AKdeEBGLUp44wjVWUrxW14MoeEO6iqP
|
||||
jqZM0bXnOr6wFOepm2fZxRDqNx/tag+ZsIPU1rbASZoaGYpTPQKBwBUZTt4EbiI3
|
||||
NTg0psWOsXm+HAtbgDSkw6A6JaQx7C6/XxRKUHfkSICzGnz2JsuoWdC7bCAu/nuy
|
||||
/5LTOaTDiA7MJuuYq5ofM/9M8ocLqRu3etXvTUwEnXXbUSMa/6YUIlY8Q42tSSto
|
||||
s0GkLkUynWv2kZY6z049f9qCZaF5RhzOxxZEoARf2Tirv+q4ow6t5UpEQKifvOW1
|
||||
pU7CLjlECJgQ23VdwEY0jDdTB6w3Hd4QXAwpQSxDUeaJjVy3L/v7dA==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,39 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5AIBAAKCAYEA9MRyBtArSjt29azNoCWxx5xZF3RodBcQu+wv5sRR8lWozrr4
|
||||
brfUJLslcQHowqaAprOU1NP+BH12P5CSymsUrwAmCwSQ54CimXrNi5RiNMl7dtIn
|
||||
Jksk4Kp6nJgfyR7TqeQgqxtv+skVWdJY7ptxqpVuDfkf1JnNr68dbANw8hEJpPaG
|
||||
m3qOt81YvSg37R75HiOCzv+hFcSjKpPyFMvPARMCOHuZS0fYRJtI5nwmR0mWtKfn
|
||||
H/2204YNiQUne/8h2fgtkpxyOjxKOs2KJxbmpV6Uur/YyGyinb5+Aa0df3KCBuZm
|
||||
E+i/AsZcTsk0fgefe+bshWG/hzrNfV0wsX3TYjYOSBJ04+f/uQW00G1GGSxPwTsS
|
||||
hGqVuwfJkTqkjAXX5wcH+PgJewG/dyMzKklMg19Y65WkhpWa/19o2KSZNw6TO8YM
|
||||
1arwT0STcMc+4fdrVB09lX6qNJA8UL8hUX+jbKBzatDY64h1d9E8PE0ODHYgYFO2
|
||||
Ko7e2GnWCQeijGmnAgMBAAECggGBAIjKaLdqC2d3CCqQonJH3q0hsaCsC9wlL9L2
|
||||
UmbzfKCkQq0WTNUDo2nLtUcMvBpclzWS0zCGMUYtH7Kyh3bclTigKqKpsJnQiA6i
|
||||
VNEW4jOCDp//HqYGBNwSKmftlIX/1mbx+VfnA5PyYR5LsivXb5TX4iOpAKL+Obdf
|
||||
dF/zJGIEJ5GrvNqTicMq3dcI7Qh18N9pFSe+MTZLKK0Y9Yetx0hgaTNL0AYEZtcg
|
||||
uYMmCvZ4J+Namo6EanKYTmQvHzvq/tZVMvud9Gcr6uKKtVBcgex9S/R7IicaKg78
|
||||
oDTgH0nDrpI55pZCX8vuVGk8nVTXXLTsMR1XojOpiYjS6ucfTkPEw3fOW/YRhHg5
|
||||
93TrdDiWkqSWube5LNUF87q65t/aw/y2EH2aTNqcPD5OQ+EZRS8OGYPqOrJ4Ycbp
|
||||
j6CMSE+LX2IDMQyJ+9J0vPHtFsAviBKQkPoQ1L6mvhJuw6ksy34NQGykNDHz7nQK
|
||||
SeqvCJ6XCtaWNkq+00lC3UFaGsjuUQKBwQD8+y370co5G7G5GDLbLE3i+pguUN7L
|
||||
5YfDj5qqsM9hOJNqeKAHrKFP2ii0F9WxGw/ruY0k8k7zUt6LepgwkCI5BYfckRKJ
|
||||
g8YsNTizjqPLRGtiqL9Garjo+xPxFGj+TkTg9fYD4xTWFa1I15zzCu7Ye7xObeEH
|
||||
LRtcm3R4fU54JDrKtKDccoQmTEAzsxRdNXi9ifc7qgjGBH9W02guuGPY4ltT1aZR
|
||||
bcO5vpi44Fnl2h6d7N6iwCtFJ0CaT1pAZ4UCgcEA97Asf5DTDWKByZBhk+VvuT1b
|
||||
6nMYjqKxDNMmCaomCmk8Mif0w9SEJmAg0b/gbs/H6T78a+9WjbN5q9xHcDU91uax
|
||||
TdCenTq7H981AjgUG7OA7XwYn+AKy+hGSnsTJglMJzJm6TGt+Sq0oO9EahBRDlsP
|
||||
PiQRot2gyQfubwcl3rhdErRwaCM92BUyPkC2fy2OppAeZOOxxuzxrvHflDOuDGCZ
|
||||
KPCmy6U9HV0JOAO2FSNJeZdNLBixXa1Pk8TgbLY7AoHBAPG7lhn9Qg3Fz9H9NINH
|
||||
13jfWdFQB0SwJEWTEAiwgMj2ha6Eau5KX63s2V4VNGVSZakqmZtHSneppOuEjq5A
|
||||
2+K+zS7PFPaACzos9OxmjU7rJu2UL4m66sv9NvXzOcxev+RyQs0+DKfw+K8VEG0Q
|
||||
8l+8BJiw2AjCalXYWbfUjMmyXNdbOCbN6kaqL+L26KuUL7Z1gd/qPw3wODmgMvoJ
|
||||
yabxzLDUA2PlzdPMMyTdhCllfkILmEXN+MrQkiOhVa0a/QKBwGZjAhH9ePD4fnQm
|
||||
5d8wIb3uGlfRGh6kLBIEGp42IqF9HPASykBFUhdW91odOhY0eAv4CHpJpnrO7QXY
|
||||
+gLtT1HNbQ+gpGCUTZQAPbZcHhvRWQNSoA8+mtftfVj+hUzc3Qj68cWFzsfIGoDI
|
||||
R3ycoBUSGTvzxwKPIQ7Y43wr9UCa74Zy5mB16POw12MadxYda/F4c8f6w5taiRFr
|
||||
VKO7tT/Skp101U4rURcZRV1NU3BrdMz5eWI4FuGFafbIlIj7zwKBwHCt3VQt+JmZ
|
||||
OhCJR+8Q+jT0JvnMu1zi4CcMRiT8FbNdZDY/3B0wG4ySTNrEikFzIjihF4zIp2nv
|
||||
nD3qKQs+THl51GA8AnP9bNk7hknD7rXUuScndccTW58+PGrjqfwJp/1MEeOJQpoX
|
||||
0JML1w+dIKHzsKN0X6UL7Gyq8m+0SJKmQQguan3d3M8CMpnW0srgqOfJ+q1+bz8b
|
||||
6FuJeijoaN8+zyKkN+9R91Erw5pk+7vJRzEpDtkhprEE5tLNDKrXJw==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
// +build integration
|
||||
|
||||
package integration_test
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user