Compare commits

...

36 Commits

Author SHA1 Message Date
Audrius Butkevicius
ed747a2d3d Add identicons to device prompts 2015-01-03 23:34:15 +00:00
Jakob Borg
3a8ee4ce2e Merge pull request #1169 from syncthing/pullhash
Hash blocks after receipt, try multiple peers (fixes #1166)
2015-01-04 00:24:07 +01:00
Audrius Butkevicius
5ac01a3af4 Hash blocks after receipt, try multiple peers (fixes #1166) 2015-01-03 23:21:57 +00:00
Jakob Borg
46343f2f9e Merge pull request #1174 from AudriusButkevicius/intro
New device, folder prompts (fixes #120, fixes #330)
2015-01-04 00:16:10 +01:00
Audrius Butkevicius
56ccb5b2ab New device, folder prompts (fixes #120, fixes #330) 2015-01-03 23:06:41 +00:00
Jakob Borg
9a946eed80 Discourse -> Wiki for docs 2015-01-03 16:44:13 +01:00
Audrius Butkevicius
9c6cb0f630 Merge pull request #1172 from syncthing/random-scanintv
Add a random perturbation to the scan interval (fixes #1150)
2015-01-02 15:25:22 +00:00
Audrius Butkevicius
1b066d6965 Merge pull request #1171 from syncthing/jobqueue
Add job queue (replaces #1060)
2015-01-02 15:18:50 +00:00
Jakob Borg
54c3caad53 Add a random perturbation to the scan interval (fixes #1150) 2015-01-02 16:16:16 +01:00
Jakob Borg
9b5e8aaf83 Repair buggy BringToFront 2015-01-02 15:54:04 +01:00
Jakob Borg
5143c09bcf Refactor / cleanup 2015-01-02 15:54:04 +01:00
Jakob Borg
2496185629 Only buffer file names, not full &FileInfo 2015-01-02 15:33:39 +01:00
Jakob Borg
34deb82aea Use slice instead of list, no map
benchmark                           old ns/op     new ns/op     delta
BenchmarkJobQueueBump               345           154498        +44682.03%
BenchmarkJobQueuePushPopDone10k     9437373       3258204       -65.48%

benchmark                           old allocs     new allocs     delta
BenchmarkJobQueueBump               0              0              +0.00%
BenchmarkJobQueuePushPopDone10k     10565          22             -99.79%

benchmark                           old bytes     new bytes     delta
BenchmarkJobQueueBump               0             0             +0.00%
BenchmarkJobQueuePushPopDone10k     1452498       385869        -73.43%
2015-01-02 15:33:39 +01:00
Jakob Borg
8f72ae9da2 Add some benchmarks 2015-01-02 15:33:39 +01:00
Audrius Butkevicius
b753f01ac1 Add tests 2015-01-02 15:33:39 +01:00
Audrius Butkevicius
fd0a147ae6 Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.

Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.

As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.

I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
 on write to copyChan

I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.

My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2015-01-02 15:33:39 +01:00
Audrius Butkevicius
e94bd90782 Merge pull request #1164 from syncthing/ro-tempfiles
Handle read only temp files after crash/restart
2014-12-31 12:08:37 +00:00
Jakob Borg
ce4b897d0e Handle read only temp files after crash/restart 2014-12-31 13:06:28 +01:00
Jakob Borg
a7694029e2 Make sure to stop processes when exiting integration test 2014-12-31 13:04:06 +01:00
Jakob Borg
1e9110b763 Add debugging utility for manual directory comparison 2014-12-31 13:04:06 +01:00
Jakob Borg
6f3fbbbe49 Improve error checking in integration tests 2014-12-31 13:04:04 +01:00
Jakob Borg
d346ec7bfe Merge pull request #1160 from AudriusButkevicius/upnp
Use unique names for UPnP mappings (fixes #1100, fixes #1128)
2014-12-31 12:56:47 +01:00
Jakob Borg
26a3613397 Merge pull request #1162 from AudriusButkevicius/silence
Silence versioner warnings for unmatched files (fixes #1117)
2014-12-31 12:54:23 +01:00
Jakob Borg
e6318bddf3 Merge pull request #1161 from AudriusButkevicius/upnp2
Use ListenMulticastUDP for multicast sockets (potentially fixes #1113)
2014-12-31 12:53:56 +01:00
Audrius Butkevicius
514bb0beda Silence versioner warnings for unmatched files (fixes #1117) 2014-12-30 22:43:07 +00:00
Audrius Butkevicius
41b1bd2f05 Use ListenMulticastUDP for multicast sockets (potentially fixes #1113) 2014-12-30 22:27:47 +00:00
Audrius Butkevicius
bf40dadf04 Use unique names for UPnP mappings (fixes #1100, fixes #1128) 2014-12-30 21:47:12 +00:00
Jakob Borg
cb1678ebec Clean up folders after -reset test 2014-12-30 11:02:49 +01:00
Jakob Borg
0c1ac568b5 Fix tests with newer goleveldb 2014-12-29 14:50:24 +01:00
Audrius Butkevicius
0f9550c747 Merge pull request #1149 from syncthing/fix-1058
Also check file size when determining if file is unchanged (fixes #1058)
2014-12-29 13:29:00 +00:00
Audrius Butkevicius
b13ae17a47 Merge pull request #1147 from syncthing/fix-1118
Generate a random API key on initial setup (fixes #1118)
2014-12-29 13:28:38 +00:00
Jakob Borg
f762a12d18 Also check file size when determining if file is unchanged (fixes #1058) 2014-12-29 14:24:12 +01:00
Jakob Borg
20d30a80be Generate a random API key on initial setup (fixes #1118)
Also makes the javascript implementation use the same algorithm for
generating random strings.
2014-12-29 13:48:26 +01:00
Audrius Butkevicius
229b218203 Merge pull request #1146 from syncthing/fix-1047
Make auto upgrade careful about breaking changes (fixes #1047)
2014-12-29 11:41:10 +00:00
Jakob Borg
4b668aaca8 Make auto upgrade careful about breaking changes (fixes #1047) 2014-12-29 12:35:06 +01:00
Jakob Borg
8c7f1421c6 Update goleveldb 2014-12-29 12:23:07 +01:00
48 changed files with 2630 additions and 1618 deletions

16
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.4rc1",
"GoVersion": "go1.4",
"Packages": [
"./cmd/..."
],
@@ -31,7 +31,7 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "97e257099d2ab9578151ba85e2641e2cd14d3ca8"
"Rev": "63c9e642efad852f49e20a6f90194cae112fd2ac"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
@@ -51,23 +51,19 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Comment": "null-236",
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
"Rev": "731db29863ea7213d9556d0170afb38987f401d4"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Comment": "null-236",
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
"Rev": "731db29863ea7213d9556d0170afb38987f401d4"
},
{
"ImportPath": "golang.org/x/text/transform",
"Comment": "null-112",
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
"Rev": "985ee5acfaf1ff6712c7c99438752f8e09416ccb"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Comment": "null-112",
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
"Rev": "985ee5acfaf1ff6712c7c99438752f8e09416ccb"
}
]
}

View File

@@ -8,152 +8,669 @@
package cache
import (
"sync"
"sync/atomic"
"unsafe"
"github.com/syndtr/goleveldb/leveldb/util"
)
// SetFunc is the function that will be called by Namespace.Get to create
// a cache object, if charge is less than one than the cache object will
// not be registered to cache tree, if value is nil then the cache object
// will not be created.
type SetFunc func() (charge int, value interface{})
// DelFin is the function that will be called as the result of a delete operation.
// Exist == true is indication that the object is exist, and pending == true is
// indication of deletion already happen but haven't done yet (wait for all handles
// to be released). And exist == false means the object doesn't exist.
type DelFin func(exist, pending bool)
// PurgeFin is the function that will be called as the result of a purge operation.
type PurgeFin func(ns, key uint64)
// Cache is a cache tree. A cache instance must be goroutine-safe.
type Cache interface {
// SetCapacity sets cache tree capacity.
SetCapacity(capacity int)
// Capacity returns cache tree capacity.
// Cacher provides interface to implements a caching functionality.
// An implementation must be goroutine-safe.
type Cacher interface {
// Capacity returns cache capacity.
Capacity() int
// Used returns used cache tree capacity.
Used() int
// SetCapacity sets cache capacity.
SetCapacity(capacity int)
// Size returns entire alive cache objects size.
Size() int
// Promote promotes the 'cache node'.
Promote(n *Node)
// NumObjects returns number of alive objects.
NumObjects() int
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
Ban(n *Node)
// GetNamespace gets cache namespace with the given id.
// GetNamespace is never return nil.
GetNamespace(id uint64) Namespace
// Evict evicts the 'cache node'.
Evict(n *Node)
// PurgeNamespace purges cache namespace with the given id from this cache tree.
// Also read Namespace.Purge.
PurgeNamespace(id uint64, fin PurgeFin)
// EvictNS evicts 'cache node' with the given namespace.
EvictNS(ns uint64)
// ZapNamespace detaches cache namespace with the given id from this cache tree.
// Also read Namespace.Zap.
ZapNamespace(id uint64)
// EvictAll evicts all 'cache node'.
EvictAll()
// Purge purges all cache namespace from this cache tree.
// This is behave the same as calling Namespace.Purge method on all cache namespace.
Purge(fin PurgeFin)
// Zap detaches all cache namespace from this cache tree.
// This is behave the same as calling Namespace.Zap method on all cache namespace.
Zap()
// Close closes the 'cache tree'
Close() error
}
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
type Namespace interface {
// Get gets cache object with the given key.
// If cache object is not found and setf is not nil, Get will atomically creates
// the cache object by calling setf. Otherwise Get will returns nil.
//
// The returned cache handle should be released after use by calling Release
// method.
Get(key uint64, setf SetFunc) Handle
// Value is a 'cacheable object'. It may implements util.Releaser, if
// so the the Release method will be called once object is released.
type Value interface{}
// Delete removes cache object with the given key from cache tree.
// A deleted cache object will be released as soon as all of its handles have
// been released.
// Delete only happen once, subsequent delete will consider cache object doesn't
// exist, even if the cache object ins't released yet.
//
// If not nil, fin will be called if the cache object doesn't exist or when
// finally be released.
//
// Delete returns true if such cache object exist and never been deleted.
Delete(key uint64, fin DelFin) bool
// Purge removes all cache objects within this namespace from cache tree.
// This is the same as doing delete on all cache objects.
//
// If not nil, fin will be called on all cache objects when its finally be
// released.
Purge(fin PurgeFin)
// Zap detaches namespace from cache tree and release all its cache objects.
// A zapped namespace can never be filled again.
// Calling Get on zapped namespace will always return nil.
Zap()
type CacheGetter struct {
Cache *Cache
NS uint64
}
// Handle is a cache handle.
type Handle interface {
// Release releases this cache handle. This method can be safely called mutiple
// times.
Release()
// Value returns value of this cache handle.
// Value will returns nil after this cache handle have be released.
Value() interface{}
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
DelNotExist = iota
DelExist
DelPendig
mInitialSize = 1 << 4
mOverflowThreshold = 1 << 5
mOverflowGrowThreshold = 1 << 7
)
// Namespace state.
type nsState int
const (
nsEffective nsState = iota
nsZapped
)
// Node state.
type nodeState int
const (
nodeZero nodeState = iota
nodeEffective
nodeEvicted
nodeDeleted
)
// Fake handle.
type fakeHandle struct {
value interface{}
fin func()
once uint32
type mBucket struct {
mu sync.Mutex
node []*Node
frozen bool
}
func (h *fakeHandle) Value() interface{} {
if atomic.LoadUint32(&h.once) == 0 {
return h.value
func (b *mBucket) freeze() []*Node {
b.mu.Lock()
defer b.mu.Unlock()
if !b.frozen {
b.frozen = true
}
return b.node
}
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
for _, n := range b.node {
if n.hash == hash && n.ns == ns && n.key == key {
atomic.AddInt32(&n.ref, 1)
b.mu.Unlock()
return true, false, n
}
}
// Get only.
if noset {
b.mu.Unlock()
return true, false, nil
}
// Create node.
n = &Node{
r: r,
hash: hash,
ns: ns,
key: key,
ref: 1,
}
// Add node to bucket.
b.node = append(b.node, n)
bLen := len(b.node)
b.mu.Unlock()
// Update counter.
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
if bLen > mOverflowThreshold {
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
}
// Grow.
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) << 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
return true, true, n
}
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
var (
n *Node
bLen int
)
for i := range b.node {
n = b.node[i]
if n.ns == ns && n.key == key {
if atomic.LoadInt32(&n.ref) == 0 {
deleted = true
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Remove node from bucket.
b.node = append(b.node[:i], b.node[i+1:]...)
bLen = len(b.node)
}
break
}
}
b.mu.Unlock()
if deleted {
// Call OnDel.
for _, f := range n.onDel {
f()
}
// Update counter.
atomic.AddInt32(&r.size, int32(n.size)*-1)
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
if bLen >= mOverflowThreshold {
atomic.AddInt32(&h.overflow, -1)
}
// Shrink.
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) >> 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
}
return true, deleted
}
type mNode struct {
buckets []unsafe.Pointer // []*mBucket
mask uint32
pred unsafe.Pointer // *mNode
resizeInProgess int32
overflow int32
growThreshold int32
shrinkThreshold int32
}
func (n *mNode) initBucket(i uint32) *mBucket {
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
return b
}
p := (*mNode)(atomic.LoadPointer(&n.pred))
if p != nil {
var node []*Node
if n.mask > p.mask {
// Grow.
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
if pb == nil {
pb = p.initBucket(i & p.mask)
}
m := pb.freeze()
// Split nodes.
for _, x := range m {
if x.hash&n.mask == i {
node = append(node, x)
}
}
} else {
// Shrink.
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
if pb0 == nil {
pb0 = p.initBucket(i)
}
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
if pb1 == nil {
pb1 = p.initBucket(i + uint32(len(n.buckets)))
}
m0 := pb0.freeze()
m1 := pb1.freeze()
// Merge nodes.
node = make([]*Node, 0, len(m0)+len(m1))
node = append(node, m0...)
node = append(node, m1...)
}
b := &mBucket{node: node}
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
if len(node) > mOverflowThreshold {
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
}
return b
}
}
return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
}
func (n *mNode) initBuckets() {
for i := range n.buckets {
n.initBucket(uint32(i))
}
atomic.StorePointer(&n.pred, nil)
}
// Cache is a 'cache map'.
type Cache struct {
mu sync.RWMutex
mHead unsafe.Pointer // *mNode
nodes int32
size int32
cacher Cacher
closed bool
}
// NewCache creates a new 'cache map'. The cacher is optional and
// may be nil.
func NewCache(cacher Cacher) *Cache {
h := &mNode{
buckets: make([]unsafe.Pointer, mInitialSize),
mask: mInitialSize - 1,
growThreshold: int32(mInitialSize * mOverflowThreshold),
shrinkThreshold: 0,
}
for i := range h.buckets {
h.buckets[i] = unsafe.Pointer(&mBucket{})
}
r := &Cache{
mHead: unsafe.Pointer(h),
cacher: cacher,
}
return r
}
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
h := (*mNode)(atomic.LoadPointer(&r.mHead))
i := hash & h.mask
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
if b == nil {
b = h.initBucket(i)
}
return h, b
}
func (r *Cache) delete(n *Node) bool {
for {
h, b := r.getBucket(n.hash)
done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
if done {
return deleted
}
}
return false
}
// Nodes returns number of 'cache node' in the map.
func (r *Cache) Nodes() int {
return int(atomic.LoadInt32(&r.nodes))
}
// Size returns sums of 'cache node' size in the map.
func (r *Cache) Size() int {
return int(atomic.LoadInt32(&r.size))
}
// Capacity returns cache capacity.
func (r *Cache) Capacity() int {
if r.cacher == nil {
return 0
}
return r.cacher.Capacity()
}
// SetCapacity sets cache capacity.
func (r *Cache) SetCapacity(capacity int) {
if r.cacher != nil {
r.cacher.SetCapacity(capacity)
}
}
// Get gets 'cache node' with the given namespace and key.
// If cache node is not found and setFunc is not nil, Get will atomically creates
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
//
// The returned 'cache handle' should be released after use by calling Release
// method.
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return nil
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
if done {
if n != nil {
n.mu.Lock()
if n.value == nil {
if setFunc == nil {
n.mu.Unlock()
n.unref()
return nil
}
n.size, n.value = setFunc()
if n.value == nil {
n.size = 0
n.mu.Unlock()
n.unref()
return nil
}
atomic.AddInt32(&r.size, int32(n.size))
}
n.mu.Unlock()
if r.cacher != nil {
r.cacher.Promote(n)
}
return &Handle{unsafe.Pointer(n)}
}
break
}
}
return nil
}
func (h *fakeHandle) Release() {
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
// Delete removes and ban 'cache node' with the given namespace and key.
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
// only attributed to the particular 'cache node', so when a 'cache node'
// is recreated it will not be banned.
//
// If onDel is not nil, then it will be executed if such 'cache node'
// doesn't exist or once the 'cache node' is released.
//
// Delete return true is such 'cache node' exist.
func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if onDel != nil {
n.mu.Lock()
n.onDel = append(n.onDel, onDel)
n.mu.Unlock()
}
if r.cacher != nil {
r.cacher.Ban(n)
}
n.unref()
return true
}
break
}
}
if onDel != nil {
onDel()
}
return false
}
// Evict evicts 'cache node' with the given namespace and key. This will
// simply call Cacher.Evict.
//
// Evict return true is such 'cache node' exist.
func (r *Cache) Evict(ns, key uint64) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if r.cacher != nil {
r.cacher.Evict(n)
}
n.unref()
return true
}
break
}
}
return false
}
// EvictNS evicts 'cache node' with the given namespace. This will
// simply call Cacher.EvictNS.
func (r *Cache) EvictNS(ns uint64) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if h.fin != nil {
h.fin()
h.fin = nil
if r.cacher != nil {
r.cacher.EvictNS(ns)
}
}
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
func (r *Cache) EvictAll() {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if r.cacher != nil {
r.cacher.EvictAll()
}
}
// Close closes the 'cache map' and releases all 'cache node'.
func (r *Cache) Close() error {
r.mu.Lock()
if !r.closed {
r.closed = true
if r.cacher != nil {
if err := r.cacher.Close(); err != nil {
return err
}
}
h := (*mNode)(r.mHead)
h.initBuckets()
for i := range h.buckets {
b := (*mBucket)(h.buckets[i])
for _, n := range b.node {
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Call OnDel.
for _, f := range n.onDel {
f()
}
}
}
}
r.mu.Unlock()
return nil
}
// Node is a 'cache node'.
type Node struct {
r *Cache
hash uint32
ns, key uint64
mu sync.Mutex
size int
value Value
ref int32
onDel []func()
CacheData unsafe.Pointer
}
// NS returns this 'cache node' namespace.
func (n *Node) NS() uint64 {
return n.ns
}
// Key returns this 'cache node' key.
func (n *Node) Key() uint64 {
return n.key
}
// Size returns this 'cache node' size.
func (n *Node) Size() int {
return n.size
}
// Value returns this 'cache node' value.
func (n *Node) Value() Value {
return n.value
}
// Ref returns this 'cache node' ref counter.
func (n *Node) Ref() int32 {
return atomic.LoadInt32(&n.ref)
}
// GetHandle returns an handle for this 'cache node'.
func (n *Node) GetHandle() *Handle {
if atomic.AddInt32(&n.ref, 1) <= 1 {
panic("BUG: Node.GetHandle on zero ref")
}
return &Handle{unsafe.Pointer(n)}
}
func (n *Node) unref() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.delete(n)
}
}
func (n *Node) unrefLocked() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.mu.RLock()
if !n.r.closed {
n.r.delete(n)
}
n.r.mu.RUnlock()
}
}
type Handle struct {
n unsafe.Pointer // *Node
}
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
return n.value
}
return nil
}
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
n := (*Node)(nPtr)
n.unrefLocked()
}
}
func murmur32(ns, key uint64, seed uint32) uint32 {
const (
m = uint32(0x5bd1e995)
r = 24
)
k1 := uint32(ns >> 32)
k2 := uint32(ns)
k3 := uint32(key >> 32)
k4 := uint32(key)
k1 *= m
k1 ^= k1 >> r
k1 *= m
k2 *= m
k2 ^= k2 >> r
k2 *= m
k3 *= m
k3 ^= k3 >> r
k3 *= m
k4 *= m
k4 ^= k4 >> r
k4 *= m
h := seed
h *= m
h ^= k1
h *= m
h ^= k2
h *= m
h ^= k3
h *= m
h ^= k4
h ^= h >> 13
h *= m
h ^= h >> 15
return h
}

View File

@@ -13,11 +13,26 @@ import (
"sync/atomic"
"testing"
"time"
"unsafe"
)
type int32o int32
func (o *int32o) acquire() {
if atomic.AddInt32((*int32)(o), 1) != 1 {
panic("BUG: invalid ref")
}
}
func (o *int32o) Release() {
if atomic.AddInt32((*int32)(o), -1) != 0 {
panic("BUG: invalid ref")
}
}
type releaserFunc struct {
fn func()
value interface{}
value Value
}
func (r releaserFunc) Release() {
@@ -26,8 +41,8 @@ func (r releaserFunc) Release() {
}
}
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
return ns.Get(key, func() (int, interface{}) {
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
return c.Get(ns, key, func() (int, Value) {
if relf != nil {
return charge, releaserFunc{relf, value}
} else {
@@ -36,7 +51,246 @@ func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) H
})
}
func TestCache_HitMiss(t *testing.T) {
func TestCacheMap(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
nsx := []struct {
nobjects, nhandles, concurrent, repeat int
}{
{10000, 400, 50, 3},
{100000, 1000, 100, 10},
}
var (
objects [][]int32o
handles [][]unsafe.Pointer
)
for _, x := range nsx {
objects = append(objects, make([]int32o, x.nobjects))
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
}
c := NewCache(nil)
wg := new(sync.WaitGroup)
var done int32
for ns, x := range nsx {
for i := 0; i < x.concurrent; i++ {
wg.Add(1)
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for j := len(objects) * repeat; j >= 0; j-- {
key := uint64(r.Intn(len(objects)))
h := c.Get(uint64(ns), key, func() (int, Value) {
o := &objects[key]
o.acquire()
return 1, o
})
if v := h.Value().(*int32o); v != &objects[key] {
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
}
if objects[key] != 1 {
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
}
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
h.Release()
}
}
}(ns, i, x.repeat, objects[ns], handles[ns])
}
go func(handles []unsafe.Pointer) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for atomic.LoadInt32(&done) == 0 {
i := r.Intn(len(handles))
h := (*Handle)(atomic.LoadPointer(&handles[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
h.Release()
}
time.Sleep(time.Millisecond)
}
}(handles[ns])
}
go func() {
handles := make([]*Handle, 100000)
for atomic.LoadInt32(&done) == 0 {
for i := range handles {
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
return 1, 1
})
}
for _, h := range handles {
h.Release()
}
}
}()
wg.Wait()
atomic.StoreInt32(&done, 1)
for _, handles0 := range handles {
for i := range handles0 {
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
h.Release()
}
}
}
for ns, objects0 := range objects {
for i, o := range objects0 {
if o != 0 {
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
}
}
}
}
func TestCacheMap_NodesAndSize(t *testing.T) {
c := NewCache(nil)
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 2, nil)
set(c, 1, 1, 3, 3, nil)
set(c, 2, 1, 4, 1, nil)
if c.Nodes() != 4 {
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
}
if c.Size() != 7 {
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
}
}
func TestLRUCache_Capacity(t *testing.T) {
c := NewCache(NewLRU(10))
if c.Capacity() != 10 {
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
}
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 2, nil).Release()
set(c, 1, 1, 3, 3, nil).Release()
set(c, 2, 1, 4, 1, nil).Release()
set(c, 2, 2, 5, 1, nil).Release()
set(c, 2, 3, 6, 1, nil).Release()
set(c, 2, 4, 7, 1, nil).Release()
set(c, 2, 5, 8, 1, nil).Release()
if c.Nodes() != 7 {
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
}
if c.Size() != 10 {
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
}
c.SetCapacity(9)
if c.Capacity() != 9 {
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
}
if c.Nodes() != 6 {
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
}
if c.Size() != 8 {
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
}
}
func TestCacheMap_NilValue(t *testing.T) {
c := NewCache(NewLRU(10))
h := c.Get(0, 0, func() (size int, value Value) {
return 1, nil
})
if h != nil {
t.Error("cache handle is non-nil")
}
if c.Nodes() != 0 {
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
}
if c.Size() != 0 {
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
}
}
func TestLRUCache_GetLatency(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
const (
concurrentSet = 30
concurrentGet = 3
duration = 3 * time.Second
delay = 3 * time.Millisecond
maxkey = 100000
)
var (
set, getHit, getAll int32
getMaxLatency, getDuration int64
)
c := NewCache(NewLRU(5000))
wg := &sync.WaitGroup{}
until := time.Now().Add(duration)
for i := 0; i < concurrentSet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for time.Now().Before(until) {
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
time.Sleep(delay)
atomic.AddInt32(&set, 1)
return 1, 1
}).Release()
}
}(i)
}
for i := 0; i < concurrentGet; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
mark := time.Now()
if mark.Before(until) {
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
latency := int64(time.Now().Sub(mark))
m := atomic.LoadInt64(&getMaxLatency)
if latency > m {
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
}
atomic.AddInt64(&getDuration, latency)
if h != nil {
atomic.AddInt32(&getHit, 1)
h.Release()
}
atomic.AddInt32(&getAll, 1)
} else {
break
}
}
}(i)
}
wg.Wait()
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
if getAvglatency > delay/3 {
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
}
}
func TestLRUCache_HitMiss(t *testing.T) {
cases := []struct {
key uint64
value string
@@ -54,14 +308,13 @@ func TestCache_HitMiss(t *testing.T) {
}
setfin := 0
c := NewLRUCache(1000)
ns := c.GetNamespace(0)
c := NewCache(NewLRU(1000))
for i, x := range cases {
set(ns, x.key, x.value, len(x.value), func() {
set(c, 0, x.key, x.value, len(x.value), func() {
setfin++
}).Release()
for j, y := range cases {
h := ns.Get(y.key, nil)
h := c.Get(0, y.key, nil)
if j <= i {
// should hit
if h == nil {
@@ -85,7 +338,7 @@ func TestCache_HitMiss(t *testing.T) {
for i, x := range cases {
finalizerOk := false
ns.Delete(x.key, func(exist, pending bool) {
c.Delete(0, x.key, func() {
finalizerOk = true
})
@@ -94,7 +347,7 @@ func TestCache_HitMiss(t *testing.T) {
}
for j, y := range cases {
h := ns.Get(y.key, nil)
h := c.Get(0, y.key, nil)
if j > i {
// should hit
if h == nil {
@@ -122,20 +375,19 @@ func TestCache_HitMiss(t *testing.T) {
}
func TestLRUCache_Eviction(t *testing.T) {
c := NewLRUCache(12)
ns := c.GetNamespace(0)
o1 := set(ns, 1, 1, 1, nil)
set(ns, 2, 2, 1, nil).Release()
set(ns, 3, 3, 1, nil).Release()
set(ns, 4, 4, 1, nil).Release()
set(ns, 5, 5, 1, nil).Release()
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
c := NewCache(NewLRU(12))
o1 := set(c, 0, 1, 1, 1, nil)
set(c, 0, 2, 2, 1, nil).Release()
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
set(c, 0, 5, 5, 1, nil).Release()
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
h.Release()
}
set(ns, 9, 9, 10, nil).Release() // 5,2,9
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
for _, key := range []uint64{9, 2, 5, 1} {
h := ns.Get(key, nil)
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
@@ -147,7 +399,7 @@ func TestLRUCache_Eviction(t *testing.T) {
}
o1.Release()
for _, key := range []uint64{1, 2, 5} {
h := ns.Get(key, nil)
h := c.Get(0, key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
@@ -158,7 +410,7 @@ func TestLRUCache_Eviction(t *testing.T) {
}
}
for _, key := range []uint64{3, 4, 9} {
h := ns.Get(key, nil)
h := c.Get(0, key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
@@ -169,487 +421,150 @@ func TestLRUCache_Eviction(t *testing.T) {
}
}
func TestLRUCache_SetGet(t *testing.T) {
c := NewLRUCache(13)
ns := c.GetNamespace(0)
for i := 0; i < 200; i++ {
n := uint64(rand.Intn(99999) % 20)
set(ns, n, n, 1, nil).Release()
if h := ns.Get(n, nil); h != nil {
if h.Value() == nil {
t.Errorf("key '%d' contains nil value", n)
func TestLRUCache_Evict(t *testing.T) {
c := NewCache(NewLRU(6))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
set(c, 1, 1, 4, 1, nil).Release()
set(c, 1, 2, 5, 1, nil).Release()
set(c, 2, 1, 6, 1, nil).Release()
set(c, 2, 2, 7, 1, nil).Release()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
h.Release()
} else {
if x := h.Value().(uint64); x != n {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
}
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
}
}
}
if ok := c.Evict(0, 1); !ok {
t.Error("first Cache.Evict on #0.1 return false")
}
if ok := c.Evict(0, 1); ok {
t.Error("second Cache.Evict on #0.1 return true")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
}
c.EvictNS(1)
if h := c.Get(1, 1, nil); h != nil {
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
}
if h := c.Get(1, 2, nil); h != nil {
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
}
c.EvictAll()
for ns := 0; ns < 3; ns++ {
for key := 1; key < 3; key++ {
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
}
}
}
}
func TestLRUCache_Delete(t *testing.T) {
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, nil).Release()
set(c, 0, 2, 2, 1, nil).Release()
if ok := c.Delete(0, 1, delFunc); !ok {
t.Error("Cache.Delete on #1 return false")
}
if h := c.Get(0, 1, nil); h != nil {
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
}
if ok := c.Delete(0, 1, delFunc); ok {
t.Error("Cache.Delete on #1 return true")
}
h2 := c.Get(0, 2, nil)
if h2 == nil {
t.Error("Cache.Get on #2 return nil")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(1) Cache.Delete on #2 return false")
}
if ok := c.Delete(0, 2, delFunc); !ok {
t.Error("(2) Cache.Delete on #2 return false")
}
set(c, 0, 3, 3, 1, nil).Release()
set(c, 0, 4, 4, 1, nil).Release()
c.Get(0, 2, nil).Release()
for key := 2; key <= 4; key++ {
if h := c.Get(0, uint64(key), nil); h != nil {
h.Release()
} else {
t.Errorf("key '%d' doesn't exist", n)
}
}
}
func TestLRUCache_Purge(t *testing.T) {
c := NewLRUCache(3)
ns1 := c.GetNamespace(0)
o1 := set(ns1, 1, 1, 1, nil)
o2 := set(ns1, 2, 2, 1, nil)
ns1.Purge(nil)
set(ns1, 3, 3, 1, nil).Release()
for _, key := range []uint64{1, 2, 3} {
h := ns1.Get(key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
o1.Release()
o2.Release()
for _, key := range []uint64{1, 2} {
h := ns1.Get(key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
h.Release()
}
}
}
type testingCacheObjectCounter struct {
created uint
released uint
}
func (c *testingCacheObjectCounter) createOne() {
c.created++
}
func (c *testingCacheObjectCounter) releaseOne() {
c.released++
}
type testingCacheObject struct {
t *testing.T
cnt *testingCacheObjectCounter
ns, key uint64
releaseCalled bool
}
func (x *testingCacheObject) Release() {
if !x.releaseCalled {
x.releaseCalled = true
x.cnt.releaseOne()
} else {
x.t.Errorf("duplicate setfin NS#%d KEY#%d", x.ns, x.key)
}
}
func TestLRUCache_ConcurrentSetGet(t *testing.T) {
runtime.GOMAXPROCS(runtime.NumCPU())
seed := time.Now().UnixNano()
t.Logf("seed=%d", seed)
const (
N = 2000000
M = 4000
C = 3
)
var set, get uint32
wg := &sync.WaitGroup{}
c := NewLRUCache(M / 4)
for ni := uint64(0); ni < C; ni++ {
r0 := rand.New(rand.NewSource(seed + int64(ni)))
r1 := rand.New(rand.NewSource(seed + int64(ni) + 1))
ns := c.GetNamespace(ni)
wg.Add(2)
go func(ns Namespace, r *rand.Rand) {
for i := 0; i < N; i++ {
x := uint64(r.Int63n(M))
o := ns.Get(x, func() (int, interface{}) {
atomic.AddUint32(&set, 1)
return 1, x
})
if v := o.Value().(uint64); v != x {
t.Errorf("#%d invalid value, got=%d", x, v)
}
o.Release()
}
wg.Done()
}(ns, r0)
go func(ns Namespace, r *rand.Rand) {
for i := 0; i < N; i++ {
x := uint64(r.Int63n(M))
o := ns.Get(x, nil)
if o != nil {
atomic.AddUint32(&get, 1)
if v := o.Value().(uint64); v != x {
t.Errorf("#%d invalid value, got=%d", x, v)
}
o.Release()
}
}
wg.Done()
}(ns, r1)
}
wg.Wait()
t.Logf("set=%d get=%d", set, get)
}
func TestLRUCache_Finalizer(t *testing.T) {
const (
capacity = 100
goroutines = 100
iterations = 10000
keymax = 8000
)
cnt := &testingCacheObjectCounter{}
c := NewLRUCache(capacity)
type instance struct {
seed int64
rnd *rand.Rand
nsid uint64
ns Namespace
effective int
handles []Handle
handlesMap map[uint64]int
delete bool
purge bool
zap bool
wantDel int
delfinCalled int
delfinCalledAll int
delfinCalledEff int
purgefinCalled int
}
instanceGet := func(p *instance, key uint64) {
h := p.ns.Get(key, func() (charge int, value interface{}) {
to := &testingCacheObject{
t: t, cnt: cnt,
ns: p.nsid,
key: key,
}
p.effective++
cnt.createOne()
return 1, releaserFunc{func() {
to.Release()
p.effective--
}, to}
})
p.handles = append(p.handles, h)
p.handlesMap[key] = p.handlesMap[key] + 1
}
instanceRelease := func(p *instance, i int) {
h := p.handles[i]
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
if n := p.handlesMap[key]; n == 0 {
t.Fatal("key ref == 0")
} else if n > 1 {
p.handlesMap[key] = n - 1
} else {
delete(p.handlesMap, key)
}
h.Release()
p.handles = append(p.handles[:i], p.handles[i+1:]...)
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
}
seed := time.Now().UnixNano()
t.Logf("seed=%d", seed)
instances := make([]*instance, goroutines)
for i := range instances {
p := &instance{}
p.handlesMap = make(map[uint64]int)
p.seed = seed + int64(i)
p.rnd = rand.New(rand.NewSource(p.seed))
p.nsid = uint64(i)
p.ns = c.GetNamespace(p.nsid)
p.delete = i%6 == 0
p.purge = i%8 == 0
p.zap = i%12 == 0 || i%3 == 0
instances[i] = p
}
runr := rand.New(rand.NewSource(seed - 1))
run := func(rnd *rand.Rand, x []*instance, init func(p *instance) bool, fn func(p *instance, i int) bool) {
var (
rx []*instance
rn []int
)
if init == nil {
rx = append([]*instance{}, x...)
rn = make([]int, len(x))
} else {
for _, p := range x {
if init(p) {
rx = append(rx, p)
rn = append(rn, 0)
}
}
}
for len(rx) > 0 {
i := rand.Intn(len(rx))
if fn(rx[i], rn[i]) {
rn[i]++
} else {
rx = append(rx[:i], rx[i+1:]...)
rn = append(rn[:i], rn[i+1:]...)
}
t.Errorf("Cache.Get on #%d return nil", key)
}
}
// Get and release.
run(runr, instances, nil, func(p *instance, i int) bool {
if i < iterations {
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
instanceGet(p, uint64(p.rnd.Intn(keymax)))
} else {
instanceRelease(p, p.rnd.Intn(len(p.handles)))
}
return true
} else {
return false
h2.Release()
if h := c.Get(0, 2, nil); h != nil {
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
}
if delFuncCalled != 4 {
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
}
}
func TestLRUCache_Close(t *testing.T) {
relFuncCalled := 0
relFunc := func() {
relFuncCalled++
}
delFuncCalled := 0
delFunc := func() {
delFuncCalled++
}
c := NewCache(NewLRU(2))
set(c, 0, 1, 1, 1, relFunc).Release()
set(c, 0, 2, 2, 1, relFunc).Release()
h3 := set(c, 0, 3, 3, 1, relFunc)
if h3 == nil {
t.Error("Cache.Get on #3 return nil")
}
if ok := c.Delete(0, 3, delFunc); !ok {
t.Error("Cache.Delete on #3 return false")
}
c.Close()
if relFuncCalled != 3 {
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
}
if delFuncCalled != 1 {
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
}
}
func BenchmarkLRUCache(b *testing.B) {
c := NewCache(NewLRU(10000))
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for pb.Next() {
key := uint64(r.Intn(1000000))
c.Get(0, key, func() (int, Value) {
return 1, key
}).Release()
}
})
if used, cap := c.Used(), c.Capacity(); used > cap {
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
}
// Check effective objects.
for i, p := range instances {
if int(p.effective) < len(p.handlesMap) {
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
}
}
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// First delete.
run(runr, instances, func(p *instance) bool {
p.wantDel = p.effective
return p.delete
}, func(p *instance, i int) bool {
key := uint64(i)
if key < keymax {
_, wantExist := p.handlesMap[key]
gotExist := p.ns.Delete(key, func(exist, pending bool) {
p.delfinCalledAll++
if exist {
p.delfinCalledEff++
}
})
if !gotExist && wantExist {
t.Errorf("delete on NS#%d KEY#%d not found", p.nsid, key)
}
return true
} else {
return false
}
})
// Second delete.
run(runr, instances, func(p *instance) bool {
p.delfinCalled = 0
return p.delete
}, func(p *instance, i int) bool {
key := uint64(i)
if key < keymax {
gotExist := p.ns.Delete(key, func(exist, pending bool) {
if exist && !pending {
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.nsid, key)
}
p.delfinCalled++
})
if gotExist {
t.Errorf("delete on NS#%d KEY#%d found", p.nsid, key)
}
return true
} else {
if p.delfinCalled != keymax {
t.Errorf("(2) NS#%d not all delete fin called, diff=%d", p.nsid, keymax-p.delfinCalled)
}
return false
}
})
// Purge.
run(runr, instances, func(p *instance) bool {
return p.purge
}, func(p *instance, i int) bool {
p.ns.Purge(func(ns, key uint64) {
p.purgefinCalled++
})
return false
})
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// Release.
run(runr, instances, func(p *instance) bool {
return !p.zap
}, func(p *instance, i int) bool {
if len(p.handles) > 0 {
instanceRelease(p, len(p.handles)-1)
return true
} else {
return false
}
})
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// Zap.
run(runr, instances, func(p *instance) bool {
return p.zap
}, func(p *instance, i int) bool {
p.ns.Zap()
p.handles = nil
p.handlesMap = nil
return false
})
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
}
c.Purge(nil)
for _, p := range instances {
if p.delete {
if p.delfinCalledAll != keymax {
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.nsid, p.purge, p.zap, keymax-p.delfinCalledAll)
}
if p.delfinCalledEff != p.wantDel {
t.Errorf("#%d not all effective delete fin called, diff=%d", p.nsid, p.wantDel-p.delfinCalledEff)
}
if p.purge && p.purgefinCalled > 0 {
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.nsid, p.delete, p.zap, p.purgefinCalled)
}
} else {
if p.purge {
if p.purgefinCalled != p.wantDel {
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.nsid, p.delete, p.zap, p.wantDel-p.purgefinCalled)
}
}
}
}
if cnt.created != cnt.released {
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
}
}
func BenchmarkLRUCache_Set(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
}
func BenchmarkLRUCache_Get(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
ns.Get(i, nil)
}
}
func BenchmarkLRUCache_Get2(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
ns.Get(i, func() (charge int, value interface{}) {
return 0, nil
})
}
}
func BenchmarkLRUCache_Release(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
handles := make([]Handle, b.N)
for i := uint64(0); i < uint64(b.N); i++ {
handles[i] = set(ns, i, "", 1, nil)
}
b.ResetTimer()
for _, h := range handles {
h.Release()
}
}
func BenchmarkLRUCache_SetRelease(b *testing.B) {
capacity := b.N / 100
if capacity <= 0 {
capacity = 10
}
c := NewLRUCache(capacity)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil).Release()
}
}
func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
capacity := b.N / 100
if capacity <= 0 {
capacity = 10
}
c := NewLRUCache(capacity)
ns := c.GetNamespace(0)
b.ResetTimer()
na := b.N / 2
nb := b.N - na
for i := uint64(0); i < uint64(na); i++ {
set(ns, i, "", 1, nil).Release()
}
for i := uint64(0); i < uint64(nb); i++ {
set(ns, i, "", 1, nil).Release()
}
}

View File

@@ -0,0 +1,195 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"sync"
"unsafe"
)
type lruNode struct {
n *Node
h *Handle
ban bool
next, prev *lruNode
}
func (n *lruNode) insert(at *lruNode) {
x := at.next
at.next = n
n.prev = at
n.next = x
x.prev = n
}
func (n *lruNode) remove() {
if n.prev != nil {
n.prev.next = n.next
n.next.prev = n.prev
n.prev = nil
n.next = nil
} else {
panic("BUG: removing removed node")
}
}
type lru struct {
mu sync.Mutex
capacity int
used int
recent lruNode
}
func (r *lru) reset() {
r.recent.next = &r.recent
r.recent.prev = &r.recent
r.used = 0
}
func (r *lru) Capacity() int {
r.mu.Lock()
defer r.mu.Unlock()
return r.capacity
}
func (r *lru) SetCapacity(capacity int) {
var evicted []*lruNode
r.mu.Lock()
r.capacity = capacity
for r.used > r.capacity {
rn := r.recent.prev
if rn == nil {
panic("BUG: invalid LRU used or capacity counter")
}
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) Promote(n *Node) {
var evicted []*lruNode
r.mu.Lock()
if n.CacheData == nil {
if n.Size() <= r.capacity {
rn := &lruNode{n: n, h: n.GetHandle()}
rn.insert(&r.recent)
n.CacheData = unsafe.Pointer(rn)
r.used += n.Size()
for r.used > r.capacity {
rn := r.recent.prev
if rn == nil {
panic("BUG: invalid LRU used or capacity counter")
}
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
}
} else {
rn := (*lruNode)(n.CacheData)
if !rn.ban {
rn.remove()
rn.insert(&r.recent)
}
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) Ban(n *Node) {
r.mu.Lock()
if n.CacheData == nil {
n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
} else {
rn := (*lruNode)(n.CacheData)
if !rn.ban {
rn.remove()
rn.ban = true
r.used -= rn.n.Size()
r.mu.Unlock()
rn.h.Release()
rn.h = nil
return
}
}
r.mu.Unlock()
}
func (r *lru) Evict(n *Node) {
r.mu.Lock()
rn := (*lruNode)(n.CacheData)
if rn == nil || rn.ban {
r.mu.Unlock()
return
}
n.CacheData = nil
r.mu.Unlock()
rn.h.Release()
}
func (r *lru) EvictNS(ns uint64) {
var evicted []*lruNode
r.mu.Lock()
for e := r.recent.prev; e != &r.recent; {
rn := e
e = e.prev
if rn.n.NS() == ns {
rn.remove()
rn.n.CacheData = nil
r.used -= rn.n.Size()
evicted = append(evicted, rn)
}
}
r.mu.Unlock()
for _, rn := range evicted {
rn.h.Release()
}
}
func (r *lru) EvictAll() {
r.mu.Lock()
back := r.recent.prev
for rn := back; rn != &r.recent; rn = rn.prev {
rn.n.CacheData = nil
}
r.reset()
r.mu.Unlock()
for rn := back; rn != &r.recent; rn = rn.prev {
rn.h.Release()
}
}
func (r *lru) Close() error {
return nil
}
// NewLRU create a new LRU-cache.
func NewLRU(capacity int) Cacher {
r := &lru{capacity: capacity}
r.reset()
return r
}

View File

@@ -1,622 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/util"
)
// The LLRB implementation were taken from https://github.com/petar/GoLLRB.
// Which contains the following header:
//
// Copyright 2010 Petar Maymounkov. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// lruCache represent a LRU cache state.
type lruCache struct {
mu sync.Mutex
recent lruNode
table map[uint64]*lruNs
capacity int
used, size, alive int
}
// NewLRUCache creates a new initialized LRU cache with the given capacity.
func NewLRUCache(capacity int) Cache {
c := &lruCache{
table: make(map[uint64]*lruNs),
capacity: capacity,
}
c.recent.rNext = &c.recent
c.recent.rPrev = &c.recent
return c
}
func (c *lruCache) Capacity() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.capacity
}
func (c *lruCache) Used() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.used
}
func (c *lruCache) Size() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.size
}
func (c *lruCache) NumObjects() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.alive
}
// SetCapacity set cache capacity.
func (c *lruCache) SetCapacity(capacity int) {
c.mu.Lock()
c.capacity = capacity
c.evict()
c.mu.Unlock()
}
// GetNamespace return namespace object for given id.
func (c *lruCache) GetNamespace(id uint64) Namespace {
c.mu.Lock()
defer c.mu.Unlock()
if ns, ok := c.table[id]; ok {
return ns
}
ns := &lruNs{lru: c, id: id}
c.table[id] = ns
return ns
}
func (c *lruCache) ZapNamespace(id uint64) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.zapNB()
delete(c.table, id)
}
c.mu.Unlock()
}
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.purgeNB(fin)
}
c.mu.Unlock()
}
// Purge purge entire cache.
func (c *lruCache) Purge(fin PurgeFin) {
c.mu.Lock()
for _, ns := range c.table {
ns.purgeNB(fin)
}
c.mu.Unlock()
}
func (c *lruCache) Zap() {
c.mu.Lock()
for _, ns := range c.table {
ns.zapNB()
}
c.table = make(map[uint64]*lruNs)
c.mu.Unlock()
}
func (c *lruCache) evict() {
top := &c.recent
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
if n.state != nodeEffective {
panic("evicting non effective node")
}
n.state = nodeEvicted
n.rRemove()
n.derefNB()
c.used -= n.charge
n = c.recent.rPrev
}
}
type lruNs struct {
lru *lruCache
id uint64
rbRoot *lruNode
state nsState
}
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
if h == nil {
n = &lruNode{ns: ns, key: key}
return n, n
}
if key < h.key {
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
if hn != nil {
h.rbLeft = hn
} else {
return nil, n
}
} else if key > h.key {
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
if hn != nil {
h.rbRight = hn
} else {
return nil, n
}
} else {
return nil, h
}
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
h = rbRotLeft(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
rbFlip(h)
}
return h, n
}
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
if hn != nil {
ns.rbRoot = hn
ns.rbRoot.rbBlack = true
}
return n
}
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
h := ns.rbRoot
for h != nil {
switch {
case key < h.key:
h = h.rbLeft
case key > h.key:
h = h.rbRight
default:
return h
}
}
return nil
}
func (ns *lruNs) getNode(key uint64) *lruNode {
return ns.rbGetNode(key)
}
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
if h == nil {
return nil
}
if key < h.key {
if h.rbLeft == nil { // key not present. Nothing to delete
return h
}
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
h = rbMoveLeft(h)
}
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
} else {
if rbIsRed(h.rbLeft) {
h = rbRotRight(h)
}
// If @key equals @h.key and no right children at @h
if h.key == key && h.rbRight == nil {
return nil
}
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
h = rbMoveRight(h)
}
// If @key equals @h.key, and (from above) 'h.Right != nil'
if h.key == key {
var x *lruNode
h.rbRight, x = rbDeleteMin(h.rbRight)
if x == nil {
panic("logic")
}
x.rbLeft, h.rbLeft = h.rbLeft, nil
x.rbRight, h.rbRight = h.rbRight, nil
x.rbBlack = h.rbBlack
h = x
} else { // Else, @key is bigger than @h.key
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
}
}
return rbFixup(h)
}
func (ns *lruNs) deleteNode(key uint64) {
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
if ns.rbRoot != nil {
ns.rbRoot.rbBlack = true
}
}
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
if h == nil {
return true
}
if h.key >= pivot {
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
return false
}
if !iter(h) {
return false
}
}
return ns.rbIterateNodes(h.rbRight, pivot, iter)
}
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
ns.rbIterateNodes(ns.rbRoot, 0, iter)
}
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
ns.lru.mu.Lock()
defer ns.lru.mu.Unlock()
if ns.state != nsEffective {
return nil
}
var n *lruNode
if setf == nil {
n = ns.getNode(key)
if n == nil {
return nil
}
} else {
n = ns.getOrCreateNode(key)
}
switch n.state {
case nodeZero:
charge, value := setf()
if value == nil {
ns.deleteNode(key)
return nil
}
if charge < 0 {
charge = 0
}
n.value = value
n.charge = charge
n.state = nodeEvicted
ns.lru.size += charge
ns.lru.alive++
fallthrough
case nodeEvicted:
if n.charge == 0 {
break
}
// Insert to recent list.
n.state = nodeEffective
n.ref++
ns.lru.used += n.charge
ns.lru.evict()
fallthrough
case nodeEffective:
// Bump to front.
n.rRemove()
n.rInsert(&ns.lru.recent)
case nodeDeleted:
// Do nothing.
default:
panic("invalid state")
}
n.ref++
return &lruHandle{node: n}
}
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
ns.lru.mu.Lock()
defer ns.lru.mu.Unlock()
if ns.state != nsEffective {
if fin != nil {
fin(false, false)
}
return false
}
n := ns.getNode(key)
if n == nil {
if fin != nil {
fin(false, false)
}
return false
}
switch n.state {
case nodeEffective:
ns.lru.used -= n.charge
n.state = nodeDeleted
n.delfin = fin
n.rRemove()
n.derefNB()
case nodeEvicted:
n.state = nodeDeleted
n.delfin = fin
case nodeDeleted:
if fin != nil {
fin(true, true)
}
return false
default:
panic("invalid state")
}
return true
}
func (ns *lruNs) purgeNB(fin PurgeFin) {
if ns.state == nsEffective {
var nodes []*lruNode
ns.iterateNodes(func(n *lruNode) bool {
nodes = append(nodes, n)
return true
})
for _, n := range nodes {
switch n.state {
case nodeEffective:
ns.lru.used -= n.charge
n.state = nodeDeleted
n.purgefin = fin
n.rRemove()
n.derefNB()
case nodeEvicted:
n.state = nodeDeleted
n.purgefin = fin
case nodeDeleted:
default:
panic("invalid state")
}
}
}
}
func (ns *lruNs) Purge(fin PurgeFin) {
ns.lru.mu.Lock()
ns.purgeNB(fin)
ns.lru.mu.Unlock()
}
func (ns *lruNs) zapNB() {
if ns.state == nsEffective {
ns.state = nsZapped
ns.iterateNodes(func(n *lruNode) bool {
if n.state == nodeEffective {
ns.lru.used -= n.charge
n.rRemove()
}
ns.lru.size -= n.charge
n.state = nodeDeleted
n.fin()
return true
})
ns.rbRoot = nil
}
}
func (ns *lruNs) Zap() {
ns.lru.mu.Lock()
ns.zapNB()
delete(ns.lru.table, ns.id)
ns.lru.mu.Unlock()
}
type lruNode struct {
ns *lruNs
rNext, rPrev *lruNode
rbLeft, rbRight *lruNode
rbBlack bool
key uint64
value interface{}
charge int
ref int
state nodeState
delfin DelFin
purgefin PurgeFin
}
func (n *lruNode) rInsert(at *lruNode) {
x := at.rNext
at.rNext = n
n.rPrev = at
n.rNext = x
x.rPrev = n
}
func (n *lruNode) rRemove() bool {
if n.rPrev == nil {
return false
}
n.rPrev.rNext = n.rNext
n.rNext.rPrev = n.rPrev
n.rPrev = nil
n.rNext = nil
return true
}
func (n *lruNode) fin() {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
if n.purgefin != nil {
if n.delfin != nil {
panic("conflicting delete and purge fin")
}
n.purgefin(n.ns.id, n.key)
n.purgefin = nil
} else if n.delfin != nil {
n.delfin(true, false)
n.delfin = nil
}
}
func (n *lruNode) derefNB() {
n.ref--
if n.ref == 0 {
if n.ns.state == nsEffective {
// Remove elemement.
n.ns.deleteNode(n.key)
n.ns.lru.size -= n.charge
n.ns.lru.alive--
n.fin()
}
n.value = nil
} else if n.ref < 0 {
panic("leveldb/cache: lruCache: negative node reference")
}
}
func (n *lruNode) deref() {
n.ns.lru.mu.Lock()
n.derefNB()
n.ns.lru.mu.Unlock()
}
type lruHandle struct {
node *lruNode
once uint32
}
func (h *lruHandle) Value() interface{} {
if atomic.LoadUint32(&h.once) == 0 {
return h.node.value
}
return nil
}
func (h *lruHandle) Release() {
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
return
}
h.node.deref()
h.node = nil
}
func rbIsRed(h *lruNode) bool {
if h == nil {
return false
}
return !h.rbBlack
}
func rbRotLeft(h *lruNode) *lruNode {
x := h.rbRight
if x.rbBlack {
panic("rotating a black link")
}
h.rbRight = x.rbLeft
x.rbLeft = h
x.rbBlack = h.rbBlack
h.rbBlack = false
return x
}
func rbRotRight(h *lruNode) *lruNode {
x := h.rbLeft
if x.rbBlack {
panic("rotating a black link")
}
h.rbLeft = x.rbRight
x.rbRight = h
x.rbBlack = h.rbBlack
h.rbBlack = false
return x
}
func rbFlip(h *lruNode) {
h.rbBlack = !h.rbBlack
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
h.rbRight.rbBlack = !h.rbRight.rbBlack
}
func rbMoveLeft(h *lruNode) *lruNode {
rbFlip(h)
if rbIsRed(h.rbRight.rbLeft) {
h.rbRight = rbRotRight(h.rbRight)
h = rbRotLeft(h)
rbFlip(h)
}
return h
}
func rbMoveRight(h *lruNode) *lruNode {
rbFlip(h)
if rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
rbFlip(h)
}
return h
}
func rbFixup(h *lruNode) *lruNode {
if rbIsRed(h.rbRight) {
h = rbRotLeft(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
rbFlip(h)
}
return h
}
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
if h == nil {
return nil, nil
}
if h.rbLeft == nil {
return nil, h
}
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
h = rbMoveLeft(h)
}
h.rbLeft, n = rbDeleteMin(h.rbLeft)
return rbFixup(h), n
}

View File

@@ -9,14 +9,12 @@ package leveldb
import (
"bytes"
"fmt"
"io"
"math/rand"
"testing"
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"io"
"math/rand"
"testing"
)
const ctValSize = 1000
@@ -33,8 +31,8 @@ func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
return newDbCorruptHarnessWopt(t, &opt.Options{
BlockCache: cache.NewLRUCache(100),
Strict: opt.StrictJournalChecksum,
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
})
}
@@ -269,9 +267,9 @@ func TestCorruptDB_TableIndex(t *testing.T) {
func TestCorruptDB_MissingManifest(t *testing.T) {
rnd := rand.New(rand.NewSource(0x0badda7a))
h := newDbCorruptHarnessWopt(t, &opt.Options{
BlockCache: cache.NewLRUCache(100),
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
BlockCacheCapacity: 100,
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
})
h.build(1000)

View File

@@ -823,8 +823,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
case p == "blockpool":
value = fmt.Sprintf("%v", db.s.tops.bpool)
case p == "cachedblock":
if bc := db.s.o.GetBlockCache(); bc != nil {
value = fmt.Sprintf("%d", bc.Size())
if db.s.tops.bcache != nil {
value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
} else {
value = "<nil>"
}

View File

@@ -8,6 +8,7 @@ package leveldb
import (
"container/list"
"fmt"
"runtime"
"sync"
"sync/atomic"
@@ -89,6 +90,10 @@ func (db *DB) newSnapshot() *Snapshot {
return snap
}
func (snap *Snapshot) String() string {
return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
}
// Get gets the value for the given key. It returns ErrNotFound if
// the DB does not contains the key.
//

View File

@@ -1271,7 +1271,7 @@ func TestDB_DeletionMarkers2(t *testing.T) {
}
func TestDB_CompactionTableOpenError(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
defer h.close()
im := 10
@@ -1629,8 +1629,8 @@ func TestDB_ManualCompaction(t *testing.T) {
func TestDB_BloomFilter(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
BlockCache: opt.NoCache,
Filter: filter.NewBloomFilter(10),
DisableBlockCache: true,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
@@ -2066,8 +2066,8 @@ func TestDB_GetProperties(t *testing.T) {
func TestDB_GoleveldbIssue72and83(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
WriteBuffer: 1 * opt.MiB,
CachedOpenFiles: 3,
WriteBuffer: 1 * opt.MiB,
OpenFilesCacheCapacity: 3,
})
defer h.close()
@@ -2200,7 +2200,7 @@ func TestDB_GoleveldbIssue72and83(t *testing.T) {
func TestDB_TransientError(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{
WriteBuffer: 128 * opt.KiB,
CachedOpenFiles: 3,
OpenFilesCacheCapacity: 3,
DisableCompactionBackoff: true,
})
defer h.close()
@@ -2410,7 +2410,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
CompactionTableSize: 43 * opt.KiB,
CompactionExpandLimitFactor: 1,
CompactionGPOverlapsFactor: 1,
BlockCache: opt.NoCache,
DisableBlockCache: true,
}
s, err := newSession(stor, o)
if err != nil {

View File

@@ -112,9 +112,9 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
db.writeDelay += time.Since(start)
db.writeDelayN++
} else if db.writeDelayN > 0 {
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
db.writeDelay = 0
db.writeDelayN = 0
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
}
return
}

View File

@@ -17,14 +17,14 @@ import (
var _ = testutil.Defer(func() {
Describe("Leveldb external", func() {
o := &opt.Options{
BlockCache: opt.NoCache,
BlockRestartInterval: 5,
BlockSize: 80,
Compression: opt.NoCompression,
CachedOpenFiles: -1,
Strict: opt.StrictAll,
WriteBuffer: 1000,
CompactionTableSize: 2000,
DisableBlockCache: true,
BlockRestartInterval: 5,
BlockSize: 80,
Compression: opt.NoCompression,
OpenFilesCacheCapacity: -1,
Strict: opt.StrictAll,
WriteBuffer: 1000,
CompactionTableSize: 2000,
}
Describe("write test", func() {

View File

@@ -106,7 +106,7 @@ func (ik iKey) assert() {
panic("leveldb: nil iKey")
}
if len(ik) < 8 {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
}
}
@@ -124,7 +124,7 @@ func (ik iKey) parseNum() (seq uint64, kt kType) {
num := ik.num()
seq, kt = uint64(num>>8), kType(num&0xff)
if kt > ktVal {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
}
return
}

View File

@@ -20,8 +20,9 @@ const (
GiB = MiB * 1024
)
const (
DefaultBlockCacheSize = 8 * MiB
var (
DefaultBlockCacher = LRUCacher
DefaultBlockCacheCapacity = 8 * MiB
DefaultBlockRestartInterval = 16
DefaultBlockSize = 4 * KiB
DefaultCompactionExpandLimitFactor = 25
@@ -33,7 +34,8 @@ const (
DefaultCompactionTotalSize = 10 * MiB
DefaultCompactionTotalSizeMultiplier = 10.0
DefaultCompressionType = SnappyCompression
DefaultCachedOpenFiles = 500
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultMaxMemCompationLevel = 2
DefaultNumLevel = 7
DefaultWriteBuffer = 4 * MiB
@@ -41,22 +43,33 @@ const (
DefaultWriteL0SlowdownTrigger = 8
)
type noCache struct{}
// Cacher is a caching algorithm.
type Cacher interface {
New(capacity int) cache.Cacher
}
func (noCache) SetCapacity(capacity int) {}
func (noCache) Capacity() int { return 0 }
func (noCache) Used() int { return 0 }
func (noCache) Size() int { return 0 }
func (noCache) NumObjects() int { return 0 }
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
func (noCache) ZapNamespace(id uint64) {}
func (noCache) Purge(fin cache.PurgeFin) {}
func (noCache) Zap() {}
type CacherFunc struct {
NewFunc func(capacity int) cache.Cacher
}
var NoCache cache.Cache = noCache{}
func (f *CacherFunc) New(capacity int) cache.Cacher {
if f.NewFunc != nil {
return f.NewFunc(capacity)
}
return nil
}
// Compression is the per-block compression algorithm to use.
func noCacher(int) cache.Cacher { return nil }
var (
// LRUCacher is the LRU-cache algorithm.
LRUCacher = &CacherFunc{cache.NewLRU}
// NoCacher is the value to disable caching algorithm.
NoCacher = &CacherFunc{}
)
// Compression is the 'sorted table' block compression algorithm to use.
type Compression uint
func (c Compression) String() string {
@@ -133,16 +146,17 @@ type Options struct {
// The default value is nil
AltFilters []filter.Filter
// BlockCache provides per-block caching for LevelDB. Specify NoCache to
// disable block caching.
// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
// Specify NoCacher to disable caching algorithm.
//
// By default LevelDB will create LRU-cache with capacity of BlockCacheSize.
BlockCache cache.Cache
// The default value is LRUCacher.
BlockCacher Cacher
// BlockCacheSize defines the capacity of the default 'block cache'.
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
//
// The default value is 8MiB.
BlockCacheSize int
BlockCacheCapacity int
// BlockRestartInterval is the number of keys between restart points for
// delta encoding of keys.
@@ -156,13 +170,6 @@ type Options struct {
// The default value is 4KiB.
BlockSize int
// CachedOpenFiles defines number of open files to kept around when not
// in-use, the counting includes still in-use files.
// Set this to negative value to disable caching.
//
// The default value is 500.
CachedOpenFiles int
// CompactionExpandLimitFactor limits compaction size after expanded.
// This will be multiplied by table size limit at compaction target level.
//
@@ -237,11 +244,17 @@ type Options struct {
// The default value uses the same ordering as bytes.Compare.
Comparer comparer.Comparer
// Compression defines the per-block compression to use.
// Compression defines the 'sorted table' block compression to use.
//
// The default value (DefaultCompression) uses snappy compression.
Compression Compression
// DisableBlockCache allows disable use of cache.Cache functionality on
// 'sorted table' block.
//
// The default value is false.
DisableBlockCache bool
// DisableCompactionBackoff allows disable compaction retry backoff.
//
// The default value is false.
@@ -288,6 +301,18 @@ type Options struct {
// The default is 7.
NumLevel int
// OpenFilesCacher provides cache algorithm for open files caching.
// Specify NoCacher to disable caching algorithm.
//
// The default value is LRUCacher.
OpenFilesCacher Cacher
// OpenFilesCacheCapacity defines the capacity of the open files caching.
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
//
// The default value is 500.
OpenFilesCacheCapacity int
// Strict defines the DB strict level.
Strict Strict
@@ -320,18 +345,22 @@ func (o *Options) GetAltFilters() []filter.Filter {
return o.AltFilters
}
func (o *Options) GetBlockCache() cache.Cache {
if o == nil {
func (o *Options) GetBlockCacher() Cacher {
if o == nil || o.BlockCacher == nil {
return DefaultBlockCacher
} else if o.BlockCacher == NoCacher {
return nil
}
return o.BlockCache
return o.BlockCacher
}
func (o *Options) GetBlockCacheSize() int {
if o == nil || o.BlockCacheSize <= 0 {
return DefaultBlockCacheSize
func (o *Options) GetBlockCacheCapacity() int {
if o == nil || o.BlockCacheCapacity <= 0 {
return DefaultBlockCacheCapacity
} else if o.BlockCacheCapacity == -1 {
return 0
}
return o.BlockCacheSize
return o.BlockCacheCapacity
}
func (o *Options) GetBlockRestartInterval() int {
@@ -348,15 +377,6 @@ func (o *Options) GetBlockSize() int {
return o.BlockSize
}
func (o *Options) GetCachedOpenFiles() int {
if o == nil || o.CachedOpenFiles == 0 {
return DefaultCachedOpenFiles
} else if o.CachedOpenFiles < 0 {
return 0
}
return o.CachedOpenFiles
}
func (o *Options) GetCompactionExpandLimit(level int) int {
factor := DefaultCompactionExpandLimitFactor
if o != nil && o.CompactionExpandLimitFactor > 0 {
@@ -494,6 +514,25 @@ func (o *Options) GetNumLevel() int {
return o.NumLevel
}
func (o *Options) GetOpenFilesCacher() Cacher {
if o == nil || o.OpenFilesCacher == nil {
return DefaultOpenFilesCacher
}
if o.OpenFilesCacher == NoCacher {
return nil
}
return o.OpenFilesCacher
}
func (o *Options) GetOpenFilesCacheCapacity() int {
if o == nil || o.OpenFilesCacheCapacity <= 0 {
return DefaultOpenFilesCacheCapacity
} else if o.OpenFilesCacheCapacity == -1 {
return 0
}
return o.OpenFilesCacheCapacity
}
func (o *Options) GetStrict(strict Strict) bool {
if o == nil || o.Strict == 0 {
return DefaultStrict&strict != 0

View File

@@ -7,7 +7,6 @@
package leveldb
import (
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
)
@@ -32,13 +31,6 @@ func (s *session) setOptions(o *opt.Options) {
no.AltFilters[i] = &iFilter{filter}
}
}
// Block cache.
switch o.GetBlockCache() {
case nil:
no.BlockCache = cache.NewLRUCache(o.GetBlockCacheSize())
case opt.NoCache:
no.BlockCache = nil
}
// Comparer.
s.icmp = &iComparer{o.GetComparer()}
no.Comparer = s.icmp

View File

@@ -73,7 +73,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
stCompPtrs: make([]iKey, o.GetNumLevel()),
}
s.setOptions(o)
s.tops = newTableOps(s, s.o.GetCachedOpenFiles())
s.tops = newTableOps(s)
s.setVersion(newVersion(s))
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
return
@@ -82,9 +82,6 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
// Close session.
func (s *session) close() {
s.tops.close()
if bc := s.o.GetBlockCache(); bc != nil {
bc.Purge(nil)
}
if s.manifest != nil {
s.manifest.Close()
}

View File

@@ -221,7 +221,7 @@ func (fs *fileStorage) GetManifest() (f File, err error) {
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
continue
}
if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil {
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
continue
}

View File

@@ -286,10 +286,10 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
// Table operations.
type tOps struct {
s *session
cache cache.Cache
cacheNS cache.Namespace
bpool *util.BufferPool
s *session
cache *cache.Cache
bcache *cache.Cache
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
@@ -338,26 +338,28 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
// Opens table. It returns a cache handle, which should
// be released after use.
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
num := f.file.Num()
ch = t.cacheNS.Get(num, func() (charge int, value interface{}) {
ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
var r storage.Reader
r, err = f.file.Open()
if err != nil {
return 0, nil
}
var bcacheNS cache.Namespace
if bc := t.s.o.GetBlockCache(); bc != nil {
bcacheNS = bc.GetNamespace(num)
var bcache *cache.CacheGetter
if t.bcache != nil {
bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
}
var tr *table.Reader
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcacheNS, t.bpool, t.s.o.Options)
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
if err != nil {
r.Close()
return 0, nil
}
return 1, tr
})
if ch == nil && err == nil {
err = ErrClosed
@@ -412,16 +414,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
// no one use the the table.
func (t *tOps) remove(f *tFile) {
num := f.file.Num()
t.cacheNS.Delete(num, func(exist, pending bool) {
if !pending {
if err := f.file.Remove(); err != nil {
t.s.logf("table@remove removing @%d %q", num, err)
} else {
t.s.logf("table@remove removed @%d", num)
}
if bc := t.s.o.GetBlockCache(); bc != nil {
bc.ZapNamespace(num)
}
t.cache.Delete(0, num, func() {
if err := f.file.Remove(); err != nil {
t.s.logf("table@remove removing @%d %q", num, err)
} else {
t.s.logf("table@remove removed @%d", num)
}
if t.bcache != nil {
t.bcache.EvictNS(num)
}
})
}
@@ -429,18 +429,34 @@ func (t *tOps) remove(f *tFile) {
// Closes the table ops instance. It will close all tables,
// regadless still used or not.
func (t *tOps) close() {
t.cache.Zap()
t.bpool.Close()
t.cache.Close()
if t.bcache != nil {
t.bcache.Close()
}
}
// Creates new initialized table ops instance.
func newTableOps(s *session, cacheCap int) *tOps {
c := cache.NewLRUCache(cacheCap)
func newTableOps(s *session) *tOps {
var (
cacher cache.Cacher
bcache *cache.Cache
)
if s.o.GetOpenFilesCacheCapacity() > 0 {
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
}
if !s.o.DisableBlockCache {
var bcacher cache.Cacher
if s.o.GetBlockCacheCapacity() > 0 {
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
}
bcache = cache.NewCache(bcacher)
}
return &tOps{
s: s,
cache: c,
cacheNS: c.GetNamespace(0),
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
s: s,
cache: cache.NewCache(cacher),
bcache: bcache,
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
}
}

View File

@@ -509,7 +509,7 @@ type Reader struct {
mu sync.RWMutex
fi *storage.FileInfo
reader io.ReaderAt
cache cache.Namespace
cache *cache.CacheGetter
err error
bpool *util.BufferPool
// Options
@@ -613,18 +613,22 @@ func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error)
func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
if r.cache != nil {
var err error
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return 0, nil
}
var b *block
b, err = r.readBlock(bh, verifyChecksum)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
var (
err error
ch *cache.Handle
)
if fillCache {
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
var b *block
b, err = r.readBlock(bh, verifyChecksum)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
} else {
ch = r.cache.Get(bh.offset, nil)
}
if ch != nil {
b, ok := ch.Value().(*block)
if !ok {
@@ -667,18 +671,22 @@ func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
if r.cache != nil {
var err error
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return 0, nil
}
var b *filterBlock
b, err = r.readFilterBlock(bh)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
var (
err error
ch *cache.Handle
)
if fillCache {
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
var b *filterBlock
b, err = r.readFilterBlock(bh)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
} else {
ch = r.cache.Get(bh.offset, nil)
}
if ch != nil {
b, ok := ch.Value().(*filterBlock)
if !ok {
@@ -980,7 +988,7 @@ func (r *Reader) Release() {
// The fi, cache and bpool is optional and can be nil.
//
// The returned table reader instance is goroutine-safe.
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
if f == nil {
return nil, errors.New("leveldb/table: nil file")
}

View File

@@ -16,7 +16,7 @@ import (
"unicode/utf8"
)
type lowerCaseASCII struct{ transform.NopResetter }
type lowerCaseASCII struct{ NopResetter }
func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
@@ -34,7 +34,7 @@ func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, er
var errYouMentionedX = errors.New("you mentioned X")
type dontMentionX struct{ transform.NopResetter }
type dontMentionX struct{ NopResetter }
func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := len(src)
@@ -52,7 +52,7 @@ func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
// doublerAtEOF is a strange Transformer that transforms "this" to "tthhiiss",
// but only if atEOF is true.
type doublerAtEOF struct{ transform.NopResetter }
type doublerAtEOF struct{ NopResetter }
func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if !atEOF {
@@ -71,7 +71,7 @@ func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
// rleDecode and rleEncode implement a toy run-length encoding: "aabbbbbbbbbb"
// is encoded as "2a10b". The decoding is assumed to not contain any numbers.
type rleDecode struct{ transform.NopResetter }
type rleDecode struct{ NopResetter }
func (rleDecode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
loop:
@@ -104,7 +104,7 @@ loop:
}
type rleEncode struct {
transform.NopResetter
NopResetter
// allowStutter means that "xxxxxxxx" can be encoded as "5x3x"
// instead of always as "8x".

View File

@@ -25,7 +25,8 @@ for incompatible changes.
Getting Started
---------------
Take a look at the [getting started guide](http://discourse.syncthing.net/t/46).
Take a look at the [getting started
guide](https://github.com/syncthing/syncthing/wiki/Getting-Started).
There are a few examples for keeping syncthing running in the background
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
@@ -37,7 +38,7 @@ Building
--------
Building Syncthing from source is easy, and there's a
[guide](http://discourse.syncthing.net/t/44)
[guide](https://github.com/syncthing/syncthing/wiki/Building).
that describes it for both Unix and Windows.
Signed Releases
@@ -52,8 +53,8 @@ Documentation
=============
The [syncthing
documentation](http://discourse.syncthing.net/category/documentation) is
on the discourse site.
documentation](https://github.com/syncthing/syncthing/wiki/) is on the
Github wiki.
All code is licensed under the
[GPL](https://github.com/syncthing/syncthing/blob/master/LICENSE), v3 or

177
cmd/stcompdirs/main.go Normal file
View File

@@ -0,0 +1,177 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"crypto/md5"
"errors"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"github.com/syncthing/syncthing/internal/symlinks"
)
func main() {
flag.Parse()
log.Println(compareDirectories(flag.Args()...))
}
// Compare a number of directories. Returns nil if the contents are identical,
// otherwise an error describing the first found difference.
func compareDirectories(dirs ...string) error {
chans := make([]chan fileInfo, len(dirs))
for i := range chans {
chans[i] = make(chan fileInfo)
}
errcs := make([]chan error, len(dirs))
abort := make(chan struct{})
for i := range dirs {
errcs[i] = startWalker(dirs[i], chans[i], abort)
}
res := make([]fileInfo, len(dirs))
for {
numDone := 0
for i := range chans {
fi, ok := <-chans[i]
if !ok {
err, hasError := <-errcs[i]
if hasError {
close(abort)
return err
}
numDone++
}
res[i] = fi
}
for i := 1; i < len(res); i++ {
if res[i] != res[0] {
close(abort)
if res[i].name < res[0].name {
return fmt.Errorf("%s missing %v (present in %s)", dirs[0], res[i], dirs[i])
} else if res[i].name > res[0].name {
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
}
return fmt.Errorf("Mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
}
}
if numDone == len(dirs) {
return nil
}
}
}
type fileInfo struct {
name string
mode os.FileMode
mod int64
hash [16]byte
}
func (f fileInfo) String() string {
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
}
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rn, _ := filepath.Rel(dir, path)
if rn == "." || rn == ".stfolder" {
return nil
}
if rn == ".stversions" {
return filepath.SkipDir
}
var f fileInfo
if info.Mode()&os.ModeSymlink != 0 {
f = fileInfo{
name: rn,
mode: os.ModeSymlink,
}
tgt, _, err := symlinks.Read(path)
if err != nil {
return err
}
h := md5.New()
h.Write([]byte(tgt))
hash := h.Sum(nil)
copy(f.hash[:], hash)
} else if info.IsDir() {
f = fileInfo{
name: rn,
mode: info.Mode(),
// hash and modtime zero for directories
}
} else {
f = fileInfo{
name: rn,
mode: info.Mode(),
mod: info.ModTime().Unix(),
}
sum, err := md5file(path)
if err != nil {
return err
}
f.hash = sum
}
select {
case res <- f:
return nil
case <-abort:
return errors.New("abort")
}
}
errc := make(chan error)
go func() {
err := filepath.Walk(dir, walker)
close(res)
if err != nil {
errc <- err
}
close(errc)
}()
return errc
}
func md5file(fname string) (hash [16]byte, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := md5.New()
io.Copy(h, f)
hb := h.Sum(nil)
copy(hash[:], hb)
return
}

View File

@@ -149,6 +149,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
postRestMux.HandleFunc("/rest/bump", withModel(m, restPostBump))
// A handler that splits requests between the two above and disables
// caching
@@ -314,19 +315,12 @@ func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var folder = qs.Get("folder")
files := m.NeedFolderFilesLimited(folder, 100) // max 100 files
progress, queued, rest := m.NeedFolderFiles(folder, 100)
// Convert the struct to a more loose structure, and inject the size.
output := make([]map[string]interface{}, 0, len(files))
for _, file := range files {
output = append(output, map[string]interface{}{
"Name": file.Name,
"Flags": file.Flags,
"Modified": file.Modified,
"Version": file.Version,
"LocalVersion": file.LocalVersion,
"NumBlocks": file.NumBlocks,
"Size": protocol.BlocksToSize(file.NumBlocks),
})
output := map[string][]map[string]interface{}{
"progress": toNeedSlice(progress),
"queued": toNeedSlice(queued),
"rest": toNeedSlice(rest),
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
@@ -650,6 +644,14 @@ func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
}
}
func restPostBump(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
file := qs.Get("file")
m.BringToFront(folder, file)
restGetNeed(m, w, r)
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
@@ -775,3 +777,19 @@ func mimeTypeForFile(file string) string {
return mime.TypeByExtension(ext)
}
}
func toNeedSlice(files []protocol.FileInfoTruncated) []map[string]interface{} {
output := make([]map[string]interface{}, len(files))
for i, file := range files {
output[i] = map[string]interface{}{
"Name": file.Name,
"Flags": file.Flags,
"Modified": file.Modified,
"Version": file.Version,
"LocalVersion": file.LocalVersion,
"NumBlocks": file.NumBlocks,
"Size": protocol.BlocksToSize(file.NumBlocks),
}
}
return output
}

View File

@@ -341,7 +341,7 @@ func main() {
if doUpgrade {
// Use leveldb database locks to protect against concurrent upgrades
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
if err != nil {
l.Fatalln("Cannot upgrade, database seems to be locked. Is another copy of Syncthing already running?")
}
@@ -489,7 +489,7 @@ func syncthingMain() {
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
}
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
if err != nil {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
@@ -795,7 +795,7 @@ func setupExternalPort(igd *upnp.IGD, port int) int {
for i := 0; i < 10; i++ {
r := 1024 + predictableRandom.Intn(65535-1024)
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", cfg.Options().UPnPLease*60)
err := igd.AddPortMapping(upnp.TCP, r, port, fmt.Sprintf("syncthing-%d", r), cfg.Options().UPnPLease*60)
if err == nil {
return r
}
@@ -975,11 +975,16 @@ next:
}
}
events.Default.Log(events.DeviceRejected, map[string]string{
"device": remoteID.String(),
"address": conn.RemoteAddr().String(),
})
l.Infof("Connection from %s with unknown device ID %s; ignoring", conn.RemoteAddr(), remoteID)
if !cfg.IgnoredDevice(remoteID) {
events.Default.Log(events.DeviceRejected, map[string]string{
"device": remoteID.String(),
"address": conn.RemoteAddr().String(),
})
l.Infof("Connection from %s with unknown device ID %s", conn.RemoteAddr(), remoteID)
} else {
l.Infof("Connection from %s with ignored device ID %s", conn.RemoteAddr(), remoteID)
}
conn.Close()
}
}
@@ -1274,7 +1279,8 @@ func autoUpgrade() {
continue
}
if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
if upgrade.CompareVersions(rel.Tag, Version) != upgrade.Newer {
// Skip equal, older or majorly newer (incompatible) versions
continue
}

View File

@@ -1,8 +1,10 @@
{
"API Key": "API Key",
"About": "About",
"Add": "Add",
"Add Device": "Add Device",
"Add Folder": "Add Folder",
"Add new folder?": "Add new folder?",
"Address": "Address",
"Addresses": "Addresses",
"Allow Anonymous Usage Reporting?": "Allow Anonymous Usage Reporting?",
@@ -22,6 +24,7 @@
"Device ID": "Device ID",
"Device Identification": "Device Identification",
"Device Name": "Device Name",
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Device {{device}} ({{address}}) wants to connect. Add new device?",
"Disconnected": "Disconnected",
"Documentation": "Documentation",
"Download Rate": "Download Rate",
@@ -51,6 +54,7 @@
"Global Discovery Server": "Global Discovery Server",
"Global State": "Global State",
"Idle": "Idle",
"Ignore": "Ignore",
"Ignore Patterns": "Ignore Patterns",
"Ignore Permissions": "Ignore Permissions",
"Incoming Rate Limit (KiB/s)": "Incoming Rate Limit (KiB/s)",
@@ -59,12 +63,15 @@
"Keep Versions": "Keep Versions",
"Last File Synced": "Last File Synced",
"Last seen": "Last seen",
"Later": "Later",
"Latest Release": "Latest Release",
"Local Discovery": "Local Discovery",
"Local State": "Local State",
"Maximum Age": "Maximum Age",
"Multi level wildcard (matches multiple directory levels)": "Multi level wildcard (matches multiple directory levels)",
"Never": "Never",
"New Device": "New Device",
"New Folder": "New Folder",
"No": "No",
"No File Versioning": "No File Versioning",
"Notice": "Notice",
@@ -92,8 +99,11 @@
"Select the devices to share this folder with.": "Select the devices to share this folder with.",
"Select the folders to share with this device.": "Select the folders to share with this device.",
"Settings": "Settings",
"Share": "Share",
"Share Folder": "Share Folder",
"Share Folders With Device": "Share Folders With Device",
"Share With Devices": "Share With Devices",
"Share this folder?": "Share this folder?",
"Shared With": "Shared With",
"Short identifier for the folder. Must be the same on all cluster devices.": "Short identifier for the folder. Must be the same on all cluster devices.",
"Show ID": "Show ID",
@@ -149,5 +159,6 @@
"Yes": "Yes",
"You must keep at least one version.": "You must keep at least one version.",
"full documentation": "full documentation",
"items": "items"
"items": "items",
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} wants to share folder \"{{folder}}\"."
}

View File

@@ -308,6 +308,73 @@
</div>
</div> <!-- /row -->
<!-- Device Rejections -->
<div ng-repeat="(device, event) in deviceRejections" class="row">
<div class="col-md-12">
<div class="panel panel-warning">
<div class="panel-heading">
<h3 class="panel-title">
<identicon data-value="device"></identicon>&emsp;<span translate>New Device</span>
</h3>
</div>
<div class="panel-body">
<p>
<small>{{ event.time | date:"H:mm:ss" }}:</small>
<span translate translate-value-device="{{ device }}" translate-value-address="{{ event.data.address }}">
Device {%device%} ({%address%}) wants to connect. Add new device?
<span>
</p>
</div>
<div class="panel-footer clearfix">
<div class="pull-right">
<button class="btn btn-sm btn-success" ng-click="addNewDeviceID(device)"><span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Add</span></button>
<button class="btn btn-sm btn-danger" ng-click="ignoreRejectedDevice(device)"><span class="glyphicon glyphicon-remove"></span>&emsp;<span translate>Ignore</span></button>
<button class="btn btn-sm btn-default" ng-click="dismissDeviceRejection(device)"><span class="glyphicon glyphicon-time"></span>&emsp;<span translate>Later</span></button>
</div>
</div>
</div>
</div>
</div>
<!-- Folder Rejections -->
<div ng-repeat="(key, event) in folderRejections" class="row reject">
<div class="col-md-12">
<div class="panel panel-warning">
<div class="panel-heading">
<h3 class="panel-title">
<span translate ng-if="!folders[event.data.folder]">New Folder</span>
<span translate ng-if="folders[event.data.folder]">Share Folder</span>
</h3>
</div>
<div class="panel-body">
<p>
<small>{{ event.time | date:"H:mm:ss" }}:</small>
<span translate translate-value-device="{{ deviceName(findDevice(event.data.device)) }}" translate-value-folder="{{ event.data.folder }}">
{%device%} wants to share folder "{%folder%}".
</span>
<span translate ng-if="folders[event.data.folder]">Share this folder?</span>
<span translate ng-if="!folders[event.data.folder]">Add new folder?</span>
</p>
</div>
<div class="panel-footer clearfix">
<div class="pull-right">
<button class="btn btn-sm btn-success" ng-click="addFolderAndShare(event.data.folder, event.data.device)" ng-if="!folders[event.data.folder]">
<span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Add</span>
</button>
<button class="btn btn-sm btn-success" ng-click="shareFolderWithDevice(event.data.folder, event.data.device)" ng-if="folders[event.data.folder]">
<span class="glyphicon glyphicon-ok"></span>&emsp;<span translate>Share</span>
</button>
<button class="btn btn-sm btn-default" ng-click="dismissFolderRejection(event.data.folder, event.data.device)">
<span class="glyphicon glyphicon-time"></span>&emsp;<span translate>Later</span>
</button>
</div>
</div>
</div>
</div>
</div>
<!-- Errors -->
<div ng-if="errorList().length > 0" class="row">
@@ -801,21 +868,37 @@
<hr/>
<table class="table table-striped table-condensed">
<tr ng-repeat="f in needed" ng-init="a = needAction(f)">
<tr ng-repeat="f in needed.progress" ng-init="a = needAction(f)">
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
<td title="{{f.Name}}">{{f.Name | basename}}</td>
<td>
<span ng-if="a == 'sync' && progress[neededFolder] && progress[neededFolder][f.Name]">
<div class="progress">
<div class="progress-bar progress-bar-success" style="width: {{progress[neededFolder][f.Name].Reused}}%"></div>
<div class="progress-bar" style="width: {{progress[neededFolder][f.Name].CopiedFromOrigin}}%"></div>
<div class="progress-bar progress-bar-info" style="width: {{progress[neededFolder][f.Name].CopiedFromElsewhere}}%"></div>
<div class="progress-bar progress-bar-warning" style="width: {{progress[neededFolder][f.Name].Pulled}}%"></div>
<div class="progress-bar progress-bar-danger progress-bar-striped active" style="width: {{progress[neededFolder][f.Name].Pulling}}%"></div>
<span class="show frontal">{{progress[neededFolder][f.Name].BytesDone | binary}}B / {{progress[neededFolder][f.Name].BytesTotal | binary}}B</span>
</div>
</span>
<td ng-if="a == 'sync' && progress[neededFolder] && progress[neededFolder][f.Name]">
<div class="progress">
<div class="progress-bar progress-bar-success" style="width: {{progress[neededFolder][f.Name].Reused}}%"></div>
<div class="progress-bar" style="width: {{progress[neededFolder][f.Name].CopiedFromOrigin}}%"></div>
<div class="progress-bar progress-bar-info" style="width: {{progress[neededFolder][f.Name].CopiedFromElsewhere}}%"></div>
<div class="progress-bar progress-bar-warning" style="width: {{progress[neededFolder][f.Name].Pulled}}%"></div>
<div class="progress-bar progress-bar-danger progress-bar-striped active" style="width: {{progress[neededFolder][f.Name].Pulling}}%"></div>
<span class="show frontal">
{{progress[neededFolder][f.Name].BytesDone | binary}}B / {{progress[neededFolder][f.Name].BytesTotal | binary}}B
</span>
</div>
</td>
<td class="text-right small-data" ng-if="a != 'sync' || !progress[neededFolder] || !progress[neededFolder][f.Name]">
<span ng-if="f.Size > 0">{{f.Size | binary}}B</span>
</td>
</tr>
<tr ng-repeat="f in needed.queued" ng-init="a = needAction(f)">
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
<td title="{{f.Name}}">{{f.Name | basename}}</td>
<td class="text-right small-data">
<span ng-if="$index != 0" class="glyphicon glyphicon-chevron-up" ng-click="bumpFile(neededFolder, f.Name)"></span>
<span ng-if="f.Size > 0">{{f.Size | binary}}B</span>
</td>
</tr>
<tr ng-repeat="f in needed.rest" ng-init="a = needAction(f)">
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
<td title="{{f.Name}}">{{f.Name | basename}}</td>
<td class="text-right small-data"><span ng-if="f.Size > 0">{{f.Size | binary}}B</span></td>
</tr>
</table>

View File

@@ -104,15 +104,12 @@ function decimals(val, num) {
return decs;
}
function randomString(len, bits) {
bits = bits || 36;
var outStr = "",
newStr;
while (outStr.length < len) {
newStr = Math.random().toString(bits).slice(2);
outStr += newStr.slice(0, Math.min(newStr.length, (len - outStr.length)));
function randomString(len) {
var i, result = '', chars = '01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-';
for (i = 0; i < len; i++) {
result += chars[Math.round(Math.random() * (chars.length - 1))];
}
return outStr.toLowerCase();
return result;
}
function isEmptyObject(obj) {

View File

@@ -47,6 +47,8 @@ angular.module('syncthing.core')
$scope.model = {};
$scope.myID = '';
$scope.devices = [];
$scope.deviceRejections = {};
$scope.folderRejections = {};
$scope.protocolChanged = false;
$scope.reportData = {};
$scope.reportPreview = false;
@@ -168,6 +170,14 @@ angular.module('syncthing.core')
}
});
$scope.$on('DeviceRejected', function (event, arg) {
$scope.deviceRejections[arg.data.device] = arg;
});
$scope.$on('FolderRejected', function (event, arg) {
$scope.folderRejections[arg.data.folder + "-" + arg.data.device] = arg;
});
$scope.$on('ConfigSaved', function (event, arg) {
updateLocalConfig(arg.data);
@@ -698,6 +708,11 @@ angular.module('syncthing.core')
return n.DeviceID !== $scope.currentDevice.DeviceID;
});
$scope.config.Devices = $scope.devices;
// In case we later added the device manually, remove the ignoral
// record.
$scope.config.IgnoredDevices = $scope.config.IgnoredDevices.filter(function (id) {
return id !== $scope.currentDevice.DeviceID;
});
for (var id in $scope.folders) {
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
@@ -709,10 +724,24 @@ angular.module('syncthing.core')
};
$scope.saveDevice = function () {
var deviceCfg, done, i;
$('#editDevice').modal('hide');
deviceCfg = $scope.currentDevice;
$scope.saveDeviceConfig($scope.currentDevice);
};
$scope.addNewDeviceID = function (device) {
var deviceCfg = {
DeviceID: device,
AddressesStr: 'dynamic',
Compression: true,
Introducer: false,
selectedFolders: {}
};
$scope.saveDeviceConfig(deviceCfg);
$scope.dismissDeviceRejection(device);
};
$scope.saveDeviceConfig = function (deviceCfg) {
var done, i;
deviceCfg.Addresses = deviceCfg.AddressesStr.split(',').map(function (x) {
return x.trim();
});
@@ -732,6 +761,11 @@ angular.module('syncthing.core')
$scope.devices.sort(deviceCompare);
$scope.config.Devices = $scope.devices;
// In case we are adding the device manually, remove the ignoral
// record.
$scope.config.IgnoredDevices = $scope.config.IgnoredDevices.filter(function (id) {
return id !== deviceCfg.DeviceID;
});
if (!$scope.editingSelf) {
for (var id in deviceCfg.selectedFolders) {
@@ -749,7 +783,6 @@ angular.module('syncthing.core')
DeviceID: deviceCfg.DeviceID
});
}
continue
} else {
$scope.folders[id].Devices = $scope.folders[id].Devices.filter(function (n) {
return n.DeviceID != deviceCfg.DeviceID;
@@ -761,6 +794,16 @@ angular.module('syncthing.core')
$scope.saveConfig();
};
$scope.dismissDeviceRejection = function (device) {
delete $scope.deviceRejections[device];
};
$scope.ignoreRejectedDevice = function (device) {
$scope.config.IgnoredDevices.push(device);
$scope.saveConfig();
$scope.dismissDeviceRejection(device);
};
$scope.otherDevices = function () {
return $scope.devices.filter(function (n) {
return n.DeviceID !== $scope.myID;
@@ -817,8 +860,8 @@ angular.module('syncthing.core')
});
});
$scope.editFolder = function (deviceCfg) {
$scope.currentFolder = angular.copy(deviceCfg);
$scope.editFolder = function (folderCfg) {
$scope.currentFolder = angular.copy(folderCfg);
$scope.currentFolder.selectedDevices = {};
$scope.currentFolder.Devices.forEach(function (n) {
$scope.currentFolder.selectedDevices[n.DeviceID] = true;
@@ -867,6 +910,34 @@ angular.module('syncthing.core')
$('#editFolder').modal();
};
$scope.addFolderAndShare = function (folder, device) {
$scope.dismissFolderRejection(folder, device);
$scope.currentFolder = {
ID: folder,
selectedDevices: {}
};
$scope.currentFolder.selectedDevices[device] = true;
$scope.currentFolder.RescanIntervalS = 60;
$scope.currentFolder.FileVersioningSelector = "none";
$scope.currentFolder.simpleKeep = 5;
$scope.currentFolder.staggeredMaxAge = 365;
$scope.currentFolder.staggeredCleanInterval = 3600;
$scope.currentFolder.staggeredVersionsPath = "";
$scope.editingExisting = false;
$scope.folderEditor.$setPristine();
$('#editFolder').modal();
};
$scope.shareFolderWithDevice = function (folder, device) {
$scope.folders[folder].Devices.push({
DeviceID: device
});
$scope.config.Folders = folderList($scope.folders);
$scope.saveConfig();
$scope.dismissFolderRejection(folder, device);
};
$scope.saveFolder = function () {
var folderCfg, done, i;
@@ -916,6 +987,10 @@ angular.module('syncthing.core')
$scope.saveConfig();
};
$scope.dismissFolderRejection = function (folder, device) {
delete $scope.folderRejections[folder + "-" + device];
};
$scope.sharesFolder = function (folderCfg) {
var names = [];
folderCfg.Devices.forEach(function (device) {
@@ -994,7 +1069,7 @@ angular.module('syncthing.core')
};
$scope.setAPIKey = function (cfg) {
cfg.APIKey = randomString(30, 32);
cfg.APIKey = randomString(32);
};
$scope.showURPreview = function () {
@@ -1056,6 +1131,15 @@ angular.module('syncthing.core')
$http.post(urlbase + "/scan?folder=" + encodeURIComponent(folder));
};
$scope.bumpFile = function (folder, file) {
$http.post(urlbase + "/bump?folder=" + encodeURIComponent(folder) + "&file=" + encodeURIComponent(file)).success(function (data) {
if ($scope.neededFolder == folder) {
console.log("bumpFile", folder, data);
$scope.needed = data;
}
});
};
// pseudo main. called on all definitions assigned
initController();
});

View File

File diff suppressed because one or more lines are too long

View File

@@ -20,6 +20,7 @@ import (
"encoding/xml"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"reflect"
@@ -37,12 +38,13 @@ var l = logger.DefaultLogger
const CurrentVersion = 7
type Configuration struct {
Version int `xml:"version,attr"`
Folders []FolderConfiguration `xml:"folder"`
Devices []DeviceConfiguration `xml:"device"`
GUI GUIConfiguration `xml:"gui"`
Options OptionsConfiguration `xml:"options"`
XMLName xml.Name `xml:"configuration" json:"-"`
Version int `xml:"version,attr"`
Folders []FolderConfiguration `xml:"folder"`
Devices []DeviceConfiguration `xml:"device"`
GUI GUIConfiguration `xml:"gui"`
Options OptionsConfiguration `xml:"options"`
IgnoredDevices []protocol.DeviceID `xml:"ignoredDevice"`
XMLName xml.Name `xml:"configuration" json:"-"`
OriginalVersion int `xml:"-" json:"-"` // The version we read from disk, before any conversion
Deprecated_Repositories []FolderConfiguration `xml:"repository" json:"-"`
@@ -240,10 +242,13 @@ func (cfg *Configuration) WriteXML(w io.Writer) error {
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
fillNilSlices(&cfg.Options)
// Initialize an empty slice for folders if the config has none
// Initialize an empty slices
if cfg.Folders == nil {
cfg.Folders = []FolderConfiguration{}
}
if cfg.IgnoredDevices == nil {
cfg.IgnoredDevices = []protocol.DeviceID{}
}
// Check for missing, bad or duplicate folder ID:s
var seenFolders = map[string]*FolderConfiguration{}
@@ -369,6 +374,10 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
cfg.Options.GlobalAnnServers = uniqueStrings(cfg.Options.GlobalAnnServers)
if cfg.GUI.APIKey == "" {
cfg.GUI.APIKey = randomString(32)
}
}
// ChangeRequiresRestart returns true if updating the configuration requires a
@@ -674,3 +683,16 @@ func (l FolderDeviceConfigurationList) Swap(a, b int) {
func (l FolderDeviceConfigurationList) Len() int {
return len(l)
}
// randomCharset contains the characters that can make up a randomString().
const randomCharset = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
// randomString returns a string of random characters (taken from
// randomCharset) of the specified length.
func randomString(l int) string {
bs := make([]byte, l)
for i := range bs {
bs[i] = randomCharset[rand.Intn(len(randomCharset))]
}
return string(bs)
}

View File

@@ -245,6 +245,19 @@ func (w *Wrapper) InvalidateFolder(id string, err string) {
}
}
// Returns whether or not connection attempts from the given device should be
// silently ignored.
func (w *Wrapper) IgnoredDevice(id protocol.DeviceID) bool {
w.mut.Lock()
defer w.mut.Unlock()
for _, device := range w.cfg.IgnoredDevices {
if device == id {
return true
}
}
return false
}
// Save writes the configuration to disk, and generates a ConfigSaved event.
func (w *Wrapper) Save() error {
fd, err := ioutil.TempFile(filepath.Dir(w.path), "cfg")

View File

@@ -144,7 +144,7 @@ func TestConcurrentSetClear(t *testing.T) {
var wg sync.WaitGroup
os.RemoveAll("testdata/concurrent-set-clear.db")
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{CachedOpenFiles: 10})
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
if err != nil {
t.Fatal(err)
}
@@ -200,7 +200,7 @@ func TestConcurrentSetOnly(t *testing.T) {
var wg sync.WaitGroup
os.RemoveAll("testdata/concurrent-set-only.db")
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{CachedOpenFiles: 10})
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10})
if err != nil {
t.Fatal(err)
}

View File

@@ -79,6 +79,8 @@ const (
type service interface {
Serve()
Stop()
Jobs() ([]string, []string) // In progress, Queued
BringToFront(string)
}
type Model struct {
@@ -189,6 +191,7 @@ func (m *Model) StartFolderRW(folder string) {
copiers: cfg.Copiers,
pullers: cfg.Pullers,
finishers: cfg.Finishers,
queue: newJobQueue(),
}
m.folderRunners[folder] = p
m.fmut.Unlock()
@@ -416,22 +419,50 @@ func (m *Model) NeedSize(folder string) (files int, bytes int64) {
return
}
// NeedFiles returns the list of currently needed files, stopping at maxFiles
// files. Limit <= 0 is ignored.
func (m *Model) NeedFolderFilesLimited(folder string, maxFiles int) []protocol.FileInfoTruncated {
// NeedFiles returns the list of currently needed files in progress, queued,
// and to be queued on next puller iteration. Also takes a soft cap which is
// only respected when adding files from the model rather than the runner queue.
func (m *Model) NeedFolderFiles(folder string, max int) ([]protocol.FileInfoTruncated, []protocol.FileInfoTruncated, []protocol.FileInfoTruncated) {
defer m.leveldbPanicWorkaround()
m.fmut.RLock()
defer m.fmut.RUnlock()
if rf, ok := m.folderFiles[folder]; ok {
fs := make([]protocol.FileInfoTruncated, 0, maxFiles)
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
fs = append(fs, f.(protocol.FileInfoTruncated))
return maxFiles <= 0 || len(fs) < maxFiles
})
return fs
var progress, queued, rest []protocol.FileInfoTruncated
var seen map[string]bool
runner, ok := m.folderRunners[folder]
if ok {
progressNames, queuedNames := runner.Jobs()
progress = make([]protocol.FileInfoTruncated, len(progressNames))
queued = make([]protocol.FileInfoTruncated, len(queuedNames))
seen = make(map[string]bool, len(progressNames)+len(queuedNames))
for i, name := range progressNames {
progress[i] = rf.GetGlobal(name).ToTruncated() /// XXX: Should implement GetGlobalTruncated directly
seen[name] = true
}
for i, name := range queuedNames {
queued[i] = rf.GetGlobal(name).ToTruncated() /// XXX: Should implement GetGlobalTruncated directly
seen[name] = true
}
}
left := max - len(progress) - len(queued)
if max < 1 || left > 0 {
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
left--
ft := f.(protocol.FileInfoTruncated)
if !seen[ft.Name] {
rest = append(rest, ft)
}
return max < 1 || left > 0
})
}
return progress, queued, rest
}
return nil
return nil, nil, nil
}
// Index is called when a new device is connected and we receive their full index.
@@ -446,7 +477,7 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
"folder": folder,
"device": deviceID.String(),
})
l.Warnf("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
l.Infof("Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
return
}
@@ -1336,9 +1367,9 @@ func (m *Model) RemoteLocalVersion(folder string) uint64 {
return ver
}
func (m *Model) availability(folder string, file string) []protocol.DeviceID {
func (m *Model) availability(folder, file string) []protocol.DeviceID {
// Acquire this lock first, as the value returned from foldersFiles can
// gen heavily modified on Close()
// get heavily modified on Close()
m.pmut.RLock()
defer m.pmut.RUnlock()
@@ -1359,6 +1390,17 @@ func (m *Model) availability(folder string, file string) []protocol.DeviceID {
return availableDevices
}
// Bump the given files priority in the job queue
func (m *Model) BringToFront(folder, file string) {
m.pmut.RLock()
defer m.pmut.RUnlock()
runner, ok := m.folderRunners[folder]
if ok {
runner.BringToFront(file)
}
}
func (m *Model) String() string {
return fmt.Sprintf("model@%p", m)
}

View File

@@ -16,11 +16,10 @@
package model
import (
"bytes"
"crypto/sha256"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync"
@@ -78,6 +77,7 @@ type Puller struct {
copiers int
pullers int
finishers int
queue *jobQueue
}
// Serve will run scans and pulls. It will return when Stop()ed or on a
@@ -156,16 +156,10 @@ loop:
}
p.model.setState(p.folder, FolderSyncing)
tries := 0
checksum := false
for {
tries++
// Last resort mode, to get around corrupt/invalid block maps.
if tries == 10 {
l.Infoln("Desperation mode ON")
checksum = true
}
changed := p.pullerIteration(checksum, curIgnores)
changed := p.pullerIteration(curIgnores)
if debug {
l.Debugln(p, "changed", changed)
}
@@ -224,10 +218,14 @@ loop:
}
p.model.setState(p.folder, FolderIdle)
if p.scanIntv > 0 {
// Sleep a random time between 3/4 and 5/4 of the configured interval.
sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4
intv := time.Duration(sleepNanos) * time.Nanosecond
if debug {
l.Debugln(p, "next rescan in", p.scanIntv)
l.Debugln(p, "next rescan in", intv)
}
scanTimer.Reset(p.scanIntv)
scanTimer.Reset(intv)
}
if !initialScanCompleted {
l.Infoln("Completed initial scan (rw) of folder", p.folder)
@@ -249,7 +247,7 @@ func (p *Puller) String() string {
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the folder.
func (p *Puller) pullerIteration(checksum bool, ignores *ignore.Matcher) int {
func (p *Puller) pullerIteration(ignores *ignore.Matcher) int {
pullChan := make(chan pullBlockState)
copyChan := make(chan copyBlocksState)
finisherChan := make(chan *sharedPullerState)
@@ -266,7 +264,7 @@ func (p *Puller) pullerIteration(checksum bool, ignores *ignore.Matcher) int {
copyWg.Add(1)
go func() {
// copierRoutine finishes when copyChan is closed
p.copierRoutine(copyChan, pullChan, finisherChan, checksum)
p.copierRoutine(copyChan, pullChan, finisherChan)
copyWg.Done()
}()
}
@@ -337,15 +335,23 @@ func (p *Puller) pullerIteration(checksum bool, ignores *ignore.Matcher) int {
p.handleDir(file)
default:
// A new or changed file or symlink. This is the only case where we
// do stuff in the background; the other three are done
// synchronously.
p.handleFile(file, copyChan, finisherChan)
// do stuff concurrently in the background
p.queue.Push(file.Name)
}
changed++
return true
})
for {
fileName, ok := p.queue.Pop()
if !ok {
break
}
f := p.model.CurrentGlobalFile(p.folder, fileName)
p.handleFile(f, copyChan, finisherChan)
}
// Signal copy and puller routines that we are done with the in data for
// this iteration. Wait for them to finish.
close(copyChan)
@@ -483,6 +489,7 @@ func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksSt
if debug {
l.Debugln(p, "taking shortcut on", file.Name)
}
p.queue.Done(file.Name)
if file.IsSymlink() {
p.shortcutSymlink(curFile, file)
} else {
@@ -598,7 +605,7 @@ func (p *Puller) shortcutSymlink(curFile, file protocol.FileInfo) {
// copierRoutine reads copierStates until the in channel closes and performs
// the relevant copies when possible, or passes it to the puller routine.
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState, checksum bool) {
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
buf := make([]byte, protocol.BlockSize)
nextFile:
@@ -634,7 +641,6 @@ nextFile:
}
p.model.fmut.RUnlock()
hasher := sha256.New()
for _, block := range state.blocks {
buf = buf[:int(block.Size)]
found := p.model.finder.Iterate(block.Hash, func(folder, file string, index uint32) bool {
@@ -658,12 +664,9 @@ nextFile:
return false
}
// Only done on second to last puller attempt
if checksum {
hasher.Write(buf)
hash := hasher.Sum(nil)
hasher.Reset()
if !bytes.Equal(hash, block.Hash) {
hash, err := scanner.VerifyBuffer(buf, block)
if err != nil {
if hash != nil {
if debug {
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
}
@@ -671,8 +674,10 @@ nextFile:
if err != nil {
l.Warnln("finder fix:", err)
}
return false
} else if debug {
l.Debugln("Finder failed to verify buffer", err)
}
return false
}
_, err = dstFd.WriteAt(buf, block.Offset)
@@ -707,20 +712,9 @@ nextFile:
}
func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
nextBlock:
for state := range in {
if state.failed() != nil {
continue nextBlock
}
// Select the least busy device to pull the block from. If we found no
// feasible device at all, fail the block (and in the long run, the
// file).
potentialDevices := p.model.availability(p.folder, state.file.Name)
selected := activity.leastBusy(potentialDevices)
if selected == (protocol.DeviceID{}) {
state.earlyClose("pull", errNoDevice)
continue nextBlock
continue
}
// Get an fd to the temporary file. Tehcnically we don't need it until
@@ -728,45 +722,58 @@ nextBlock:
// no point in issuing the request to the network.
fd, err := state.tempFile()
if err != nil {
continue nextBlock
continue
}
// Fetch the block, while marking the selected device as in use so that
// leastBusy can select another device when someone else asks.
activity.using(selected)
buf, err := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
activity.done(selected)
if err != nil {
state.earlyClose("pull", err)
continue nextBlock
}
var lastError error
potentialDevices := p.model.availability(p.folder, state.file.Name)
for {
// Select the least busy device to pull the block from. If we found no
// feasible device at all, fail the block (and in the long run, the
// file).
selected := activity.leastBusy(potentialDevices)
if selected == (protocol.DeviceID{}) {
if lastError != nil {
state.earlyClose("pull", lastError)
} else {
state.earlyClose("pull", errNoDevice)
}
break
}
// Save the block data we got from the cluster
_, err = fd.WriteAt(buf, state.block.Offset)
if err != nil {
state.earlyClose("save", err)
continue nextBlock
}
potentialDevices = removeDevice(potentialDevices, selected)
state.pullDone()
out <- state.sharedPullerState
// Fetch the block, while marking the selected device as in use so that
// leastBusy can select another device when someone else asks.
activity.using(selected)
buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
activity.done(selected)
if lastError != nil {
continue
}
// Verify that the received block matches the desired hash, if not
// try pulling it from another device.
_, lastError = scanner.VerifyBuffer(buf, state.block)
if lastError != nil {
continue
}
// Save the block data we got from the cluster
_, err = fd.WriteAt(buf, state.block.Offset)
if err != nil {
state.earlyClose("save", err)
} else {
state.pullDone()
out <- state.sharedPullerState
}
break
}
}
}
func (p *Puller) performFinish(state *sharedPullerState) {
// Verify the file against expected hashes
fd, err := os.Open(state.tempName)
if err != nil {
l.Warnln("puller: final:", err)
return
}
err = scanner.Verify(fd, protocol.BlockSize, state.file.Blocks)
fd.Close()
if err != nil {
l.Infoln("puller:", state.file.Name, err, "(file changed during pull?)")
return
}
var err error
// Set the correct permission bits on the new file
if !p.ignorePerms {
err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777))
@@ -850,6 +857,7 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
continue
}
p.queue.Done(state.file.Name)
p.performFinish(state)
p.model.receivedFile(p.folder, state.file.Name)
if p.progressEmitter != nil {
@@ -859,6 +867,15 @@ func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
}
}
// Moves the given filename to the front of the job queue
func (p *Puller) BringToFront(filename string) {
p.queue.BringToFront(filename)
}
func (p *Puller) Jobs() ([]string, []string) {
return p.queue.Jobs()
}
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
for i := range cfg.Folders {
folder := &cfg.Folders[i]
@@ -868,3 +885,13 @@ func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
}
}
}
func removeDevice(devices []protocol.DeviceID, device protocol.DeviceID) []protocol.DeviceID {
for i := range devices {
if devices[i] == device {
devices[i] = devices[len(devices)-1]
return devices[:len(devices)-1]
}
}
return devices
}

View File

@@ -221,7 +221,7 @@ func TestCopierFinder(t *testing.T) {
finisherChan := make(chan *sharedPullerState, 1)
// Run a single fetcher routine
go p.copierRoutine(copyChan, pullChan, finisherChan, false)
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(requiredFile, copyChan, finisherChan)
@@ -317,9 +317,8 @@ func TestCopierCleanup(t *testing.T) {
}
}
// On the 10th iteration, we start hashing the content which we receive by
// following blockfinder's instructions. Make sure that the copier routine
// hashes the content when asked, and pulls if it fails to find the block.
// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"}
cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}}
@@ -361,8 +360,8 @@ func TestLastResortPulling(t *testing.T) {
pullChan := make(chan pullBlockState, 1)
finisherChan := make(chan *sharedPullerState, 1)
// Run a single copier routine with checksumming enabled
go p.copierRoutine(copyChan, pullChan, finisherChan, true)
// Run a single copier routine
go p.copierRoutine(copyChan, pullChan, finisherChan)
p.handleFile(file, copyChan, finisherChan)

94
internal/model/queue.go Normal file
View File

@@ -0,0 +1,94 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package model
import "sync"
type jobQueue struct {
progress []string
queued []string
mut sync.Mutex
}
func newJobQueue() *jobQueue {
return &jobQueue{}
}
func (q *jobQueue) Push(file string) {
q.mut.Lock()
q.queued = append(q.queued, file)
q.mut.Unlock()
}
func (q *jobQueue) Pop() (string, bool) {
q.mut.Lock()
defer q.mut.Unlock()
if len(q.queued) == 0 {
return "", false
}
var f string
f = q.queued[0]
q.queued = q.queued[1:]
q.progress = append(q.progress, f)
return f, true
}
func (q *jobQueue) BringToFront(filename string) {
q.mut.Lock()
defer q.mut.Unlock()
for i, cur := range q.queued {
if cur == filename {
if i > 0 {
// Shift the elements before the selected element one step to
// the right, overwriting the selected element
copy(q.queued[1:i+1], q.queued[0:])
// Put the selected element at the front
q.queued[0] = cur
}
return
}
}
}
func (q *jobQueue) Done(file string) {
q.mut.Lock()
defer q.mut.Unlock()
for i := range q.progress {
if q.progress[i] == file {
copy(q.progress[i:], q.progress[i+1:])
q.progress = q.progress[:len(q.progress)-1]
return
}
}
}
func (q *jobQueue) Jobs() ([]string, []string) {
q.mut.Lock()
defer q.mut.Unlock()
progress := make([]string, len(q.progress))
copy(progress, q.progress)
queued := make([]string, len(q.queued))
copy(queued, q.queued)
return progress, queued
}

View File

@@ -0,0 +1,200 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option)
// any later version.
//
// This program is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// You should have received a copy of the GNU General Public License along
// with this program. If not, see <http://www.gnu.org/licenses/>.
package model
import (
"fmt"
"reflect"
"testing"
)
func TestJobQueue(t *testing.T) {
// Some random actions
q := newJobQueue()
q.Push("f1")
q.Push("f2")
q.Push("f3")
q.Push("f4")
progress, queued := q.Jobs()
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
}
for i := 1; i < 5; i++ {
n, ok := q.Pop()
if !ok || n != fmt.Sprintf("f%d", i) {
t.Fatal("Wrong element")
}
progress, queued = q.Jobs()
if len(progress) != 1 || len(queued) != 3 {
t.Log(progress)
t.Log(queued)
t.Fatal("Wrong length")
}
q.Done(n)
progress, queued = q.Jobs()
if len(progress) != 0 || len(queued) != 3 {
t.Fatal("Wrong length", len(progress), len(queued))
}
q.Push(n)
progress, queued = q.Jobs()
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
}
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
if len(progress) != 0 || len(queued) != 4 {
t.Fatal("Wrong length")
}
}
if len(q.progress) > 0 || len(q.queued) != 4 {
t.Fatal("Wrong length")
}
for i := 4; i > 0; i-- {
progress, queued = q.Jobs()
if len(progress) != 4-i || len(queued) != i {
t.Fatal("Wrong length")
}
s := fmt.Sprintf("f%d", i)
q.BringToFront(s)
progress, queued = q.Jobs()
if len(progress) != 4-i || len(queued) != i {
t.Fatal("Wrong length")
}
n, ok := q.Pop()
if !ok || n != s {
t.Fatal("Wrong element")
}
progress, queued = q.Jobs()
if len(progress) != 5-i || len(queued) != i-1 {
t.Fatal("Wrong length")
}
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
if len(progress) != 5-i || len(queued) != i-1 {
t.Fatal("Wrong length")
}
}
_, ok := q.Pop()
if len(q.progress) != 4 || ok {
t.Fatal("Wrong length")
}
q.Done("f1")
q.Done("f2")
q.Done("f3")
q.Done("f4")
q.Done("f5") // Does not exist
_, ok = q.Pop()
if len(q.progress) != 0 || ok {
t.Fatal("Wrong length")
}
progress, queued = q.Jobs()
if len(progress) != 0 || len(queued) != 0 {
t.Fatal("Wrong length")
}
q.BringToFront("")
q.Done("f5") // Does not exist
progress, queued = q.Jobs()
if len(progress) != 0 || len(queued) != 0 {
t.Fatal("Wrong length")
}
}
func TestBringToFront(t *testing.T) {
q := newJobQueue()
q.Push("f1")
q.Push("f2")
q.Push("f3")
q.Push("f4")
_, queued := q.Jobs()
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
t.Errorf("Incorrect order %v at start", queued)
}
q.BringToFront("f1") // corner case: does nothing
_, queued = q.Jobs()
if !reflect.DeepEqual(queued, []string{"f1", "f2", "f3", "f4"}) {
t.Errorf("Incorrect order %v", queued)
}
q.BringToFront("f3")
_, queued = q.Jobs()
if !reflect.DeepEqual(queued, []string{"f3", "f1", "f2", "f4"}) {
t.Errorf("Incorrect order %v", queued)
}
q.BringToFront("f2")
_, queued = q.Jobs()
if !reflect.DeepEqual(queued, []string{"f2", "f3", "f1", "f4"}) {
t.Errorf("Incorrect order %v", queued)
}
q.BringToFront("f4") // corner case: last element
_, queued = q.Jobs()
if !reflect.DeepEqual(queued, []string{"f4", "f2", "f3", "f1"}) {
t.Errorf("Incorrect order %v", queued)
}
}
func BenchmarkJobQueueBump(b *testing.B) {
files := genFiles(b.N)
q := newJobQueue()
for _, f := range files {
q.Push(f.Name)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
q.BringToFront(files[i].Name)
}
}
func BenchmarkJobQueuePushPopDone10k(b *testing.B) {
files := genFiles(10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
q := newJobQueue()
for _, f := range files {
q.Push(f.Name)
}
for range files {
n, _ := q.Pop()
q.Done(n)
}
}
}

View File

@@ -17,6 +17,7 @@ package model
import (
"fmt"
"math/rand"
"time"
)
@@ -63,7 +64,9 @@ func (s *Scanner) Serve() {
return
}
timer.Reset(s.intv)
// Sleep a random time between 3/4 and 5/4 of the configured interval.
sleepNanos := (s.intv.Nanoseconds()*3 + rand.Int63n(2*s.intv.Nanoseconds())) / 4
timer.Reset(time.Duration(sleepNanos) * time.Nanosecond)
}
}
}
@@ -75,3 +78,9 @@ func (s *Scanner) Stop() {
func (s *Scanner) String() string {
return fmt.Sprintf("scanner/%s@%p", s.folder, s)
}
func (s *Scanner) BringToFront(string) {}
func (s *Scanner) Jobs() ([]string, []string) {
return nil, nil
}

View File

@@ -110,6 +110,17 @@ func (s *sharedPullerState) tempFile() (io.WriterAt, error) {
flags := os.O_WRONLY
if s.reused == 0 {
flags |= os.O_CREATE | os.O_EXCL
} else {
// With sufficiently bad luck when exiting or crashing, we may have
// had time to chmod the temp file to read only state but not yet
// moved it to it's final name. This leaves us with a read only temp
// file that we're going to try to reuse. To handle that, we need to
// make sure we have write permissions on the file before opening it.
err := os.Chmod(s.tempName, 0644)
if err != nil {
s.earlyCloseLocked("dst create chmod", err)
return nil, err
}
}
fd, err := os.OpenFile(s.tempName, flags, 0644)
if err != nil {

View File

@@ -69,6 +69,17 @@ func (f FileInfo) HasPermissionBits() bool {
return f.Flags&FlagNoPermBits == 0
}
func (f FileInfo) ToTruncated() FileInfoTruncated {
return FileInfoTruncated{
Name: f.Name,
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
LocalVersion: f.LocalVersion,
NumBlocks: uint32(len(f.Blocks)),
}
}
// Used for unmarshalling a FileInfo structure but skipping the actual block list
type FileInfoTruncated struct {
Name string // max:8192

View File

@@ -130,6 +130,24 @@ func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
return nil
}
func VerifyBuffer(buf []byte, block protocol.BlockInfo) ([]byte, error) {
if len(buf) != int(block.Size) {
return nil, fmt.Errorf("length mismatch %d != %d", len(buf), block.Size)
}
hf := sha256.New()
_, err := hf.Write(buf)
if err != nil {
return nil, err
}
hash := hf.Sum(nil)
if !bytes.Equal(hash, block.Hash) {
return hash, fmt.Errorf("hash mismatch %x != %x", hash, block.Hash)
}
return hash, nil
}
// BlockEqual returns whether two slices of blocks are exactly the same hash
// and index pair wise.
func BlocksEqual(src, tgt []protocol.BlockInfo) bool {

View File

@@ -254,9 +254,11 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
// - was not a directory previously (since it's a file now)
// - was not a symlink (since it's a file now)
// - was not invalid (since it looks valid now)
// - has the same size as previously
cf := w.CurrentFiler.CurrentFile(rn)
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
if permUnchanged && !cf.IsDeleted() && cf.Modified == info.ModTime().Unix() && !cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() {
if permUnchanged && !cf.IsDeleted() && cf.Modified == info.ModTime().Unix() && !cf.IsDirectory() &&
!cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() {
return nil
}

View File

@@ -87,8 +87,18 @@ func ToURL(url string) error {
}
}
// Returns 1 if a>b, -1 if a<b and 0 if they are equal
func CompareVersions(a, b string) int {
type Relation int
const (
MajorOlder Relation = -2 // Older by a major version (x in x.y.z or 0.x.y).
Older = -1 // Older by a minor version (y or z in x.y.z, or y in 0.x.y)
Equal = 0 // Versions are semantically equal
Newer = 1 // Newer by a minor version (y or z in x.y.z, or y in 0.x.y)
MajorNewer = 2 // Newer by a major version (x in x.y.z or 0.x.y).
)
// Returns a relation describing how a compares to b.
func CompareVersions(a, b string) Relation {
arel, apre := versionParts(a)
brel, bpre := versionParts(b)
@@ -100,27 +110,39 @@ func CompareVersions(a, b string) int {
// First compare major-minor-patch versions
for i := 0; i < minlen; i++ {
if arel[i] < brel[i] {
return -1
if i == 0 {
return MajorOlder
}
if i == 1 && arel[0] == 0 {
return MajorOlder
}
return Older
}
if arel[i] > brel[i] {
return 1
if i == 0 {
return MajorNewer
}
if i == 1 && arel[0] == 0 {
return MajorNewer
}
return Newer
}
}
// Longer version is newer, when the preceding parts are equal
if len(arel) < len(brel) {
return -1
return Older
}
if len(arel) > len(brel) {
return 1
return Newer
}
// Prerelease versions are older, if the versions are the same
if len(apre) == 0 && len(bpre) > 0 {
return 1
return Newer
}
if len(apre) > 0 && len(bpre) == 0 {
return -1
return Older
}
minlen = len(apre)
@@ -135,24 +157,24 @@ func CompareVersions(a, b string) int {
switch bv := bpre[i].(type) {
case int:
if av < bv {
return -1
return Older
}
if av > bv {
return 1
return Newer
}
case string:
return -1
return Older
}
case string:
switch bv := bpre[i].(type) {
case int:
return 1
return Newer
case string:
if av < bv {
return -1
return Older
}
if av > bv {
return 1
return Newer
}
}
}
@@ -160,14 +182,14 @@ func CompareVersions(a, b string) int {
// If all else is equal, longer prerelease string is newer
if len(apre) < len(bpre) {
return -1
return Older
}
if len(apre) > len(bpre) {
return 1
return Newer
}
// Looks like they're actually the same
return 0
return Equal
}
// Split a version into parts.

View File

@@ -19,33 +19,37 @@ import "testing"
var testcases = []struct {
a, b string
r int
r Relation
}{
{"0.1.2", "0.1.2", 0},
{"0.1.3", "0.1.2", 1},
{"0.1.1", "0.1.2", -1},
{"0.3.0", "0.1.2", 1},
{"0.0.9", "0.1.2", -1},
{"1.1.2", "0.1.2", 1},
{"0.1.2", "1.1.2", -1},
{"0.1.10", "0.1.9", 1},
{"0.10.0", "0.2.0", 1},
{"30.10.0", "4.9.0", 1},
{"0.9.0-beta7", "0.9.0-beta6", 1},
{"1.0.0-alpha", "1.0.0-alpha.1", -1},
{"1.0.0-alpha.1", "1.0.0-alpha.beta", -1},
{"1.0.0-alpha.beta", "1.0.0-beta", -1},
{"1.0.0-beta", "1.0.0-beta.2", -1},
{"1.0.0-beta.2", "1.0.0-beta.11", -1},
{"1.0.0-beta.11", "1.0.0-rc.1", -1},
{"1.0.0-rc.1", "1.0.0", -1},
{"1.0.0+45", "1.0.0+23-dev-foo", 0},
{"1.0.0-beta.23+45", "1.0.0-beta.23+23-dev-foo", 0},
{"1.0.0-beta.3+99", "1.0.0-beta.24+0", -1},
{"0.1.2", "0.1.2", Equal},
{"0.1.3", "0.1.2", Newer},
{"0.1.1", "0.1.2", Older},
{"0.3.0", "0.1.2", MajorNewer},
{"0.0.9", "0.1.2", MajorOlder},
{"1.3.0", "1.1.2", Newer},
{"1.0.9", "1.1.2", Older},
{"2.3.0", "1.1.2", MajorNewer},
{"1.0.9", "2.1.2", MajorOlder},
{"1.1.2", "0.1.2", MajorNewer},
{"0.1.2", "1.1.2", MajorOlder},
{"0.1.10", "0.1.9", Newer},
{"0.10.0", "0.2.0", MajorNewer},
{"30.10.0", "4.9.0", MajorNewer},
{"0.9.0-beta7", "0.9.0-beta6", Newer},
{"1.0.0-alpha", "1.0.0-alpha.1", Older},
{"1.0.0-alpha.1", "1.0.0-alpha.beta", Older},
{"1.0.0-alpha.beta", "1.0.0-beta", Older},
{"1.0.0-beta", "1.0.0-beta.2", Older},
{"1.0.0-beta.2", "1.0.0-beta.11", Older},
{"1.0.0-beta.11", "1.0.0-rc.1", Older},
{"1.0.0-rc.1", "1.0.0", Older},
{"1.0.0+45", "1.0.0+23-dev-foo", Equal},
{"1.0.0-beta.23+45", "1.0.0-beta.23+23-dev-foo", Equal},
{"1.0.0-beta.3+99", "1.0.0-beta.24+0", Older},
{"v1.1.2", "1.1.2", 0},
{"v1.1.2", "V1.1.2", 0},
{"1.1.2", "V1.1.2", 0},
{"v1.1.2", "1.1.2", Equal},
{"v1.1.2", "V1.1.2", Equal},
{"1.1.2", "V1.1.2", Equal},
}
func TestCompareVersions(t *testing.T) {

View File

@@ -158,7 +158,7 @@ Mx: %d
var results []IGD
resultChannel := make(chan IGD, 8)
socket, err := net.ListenUDP("udp4", &net.UDPAddr{})
socket, err := net.ListenMulticastUDP("udp4", nil, &net.UDPAddr{IP: ssdp.IP})
if err != nil {
l.Infoln(err)
return results

View File

@@ -232,7 +232,9 @@ func (v Staggered) expire(versions []string) {
versionTime, err := time.Parse(TimeFormat, filenameTag(file))
if err != nil {
l.Infof("Versioner: file name %q is invalid: %v", file, err)
if debug {
l.Debugf("Versioner: file name %q is invalid: %v", file, err)
}
continue
}
age := int64(time.Since(versionTime).Seconds())

View File

@@ -20,6 +20,7 @@ package integration
import (
"os"
"os/exec"
"path/filepath"
"testing"
"time"
)
@@ -54,6 +55,14 @@ func TestCLIReset(t *testing.T) {
t.Errorf("%s still exists", dir)
}
}
// Clean up
dirs, err = filepath.Glob("*.syncthing-reset-*")
if err != nil {
t.Fatal(err)
}
removeAll(dirs...)
}
func TestCLIGenerate(t *testing.T) {

View File

@@ -20,6 +20,7 @@ package integration
import (
"fmt"
"log"
"os"
"testing"
"time"
@@ -27,7 +28,7 @@ import (
"github.com/syncthing/syncthing/internal/protocol"
)
func TestSyncCluster(t *testing.T) {
func TestSyncClusterWithoutVersioning(t *testing.T) {
// Use no versioning
id, _ := protocol.DeviceIDFromString(id2)
cfg, _ := config.Load("h2/config.xml", id)
@@ -103,6 +104,20 @@ func testSyncCluster(t *testing.T) {
t.Fatal(err)
}
// We'll use this file for appending data without modifying the time stamp.
fd, err := os.Create("s1/appendfile")
if err != nil {
t.Fatal(err)
}
_, err = fd.WriteString("hello\n")
if err != nil {
t.Fatal(err)
}
err = fd.Close()
if err != nil {
t.Fatal(err)
}
err = generateFiles("s2", 1000, 21, "../LICENSE")
if err != nil {
t.Fatal(err)
@@ -117,18 +132,40 @@ func testSyncCluster(t *testing.T) {
t.Fatal(err)
}
// Prepare the expected state of folders after the sync
c1, err := directoryContents("s1")
if err != nil {
t.Fatal(err)
}
c2, err := directoryContents("s2")
if err != nil {
t.Fatal(err)
}
c3, err := directoryContents("s3")
if err != nil {
t.Fatal(err)
}
e1 := mergeDirectoryContents(c1, c2, c3)
e2, err := directoryContents("s12-1")
if err != nil {
t.Fatal(err)
}
e3, err := directoryContents("s23-2")
if err != nil {
t.Fatal(err)
}
expected := [][]fileInfo{e1, e2, e3}
// Start the syncers
p, err := scStartProcesses()
if err != nil {
t.Fatal(err)
}
// Prepare the expected state of folders after the sync
e1 := mergeDirectoryContents(directoryContents("s1"),
directoryContents("s2"),
directoryContents("s3"))
e2 := directoryContents("s12-1")
e3 := directoryContents("s23-2")
expected := [][]fileInfo{e1, e2, e3}
defer func() {
for i := range p {
p[i].stop()
}
}()
for count := 0; count < 5; count++ {
log.Println("Forcing rescan...")
@@ -170,15 +207,46 @@ func testSyncCluster(t *testing.T) {
break
}
// Prepare the expected state of folders after the sync
e1 = directoryContents("s1")
e2 = directoryContents("s12-1")
e3 = directoryContents("s23-2")
expected = [][]fileInfo{e1, e2, e3}
}
// Alter the "appendfile" without changing it's modification time. Sneaky!
fi, err := os.Stat("s1/appendfile")
if err != nil {
t.Fatal(err)
}
fd, err := os.OpenFile("s1/appendfile", os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
t.Fatal(err)
}
_, err = fd.Seek(0, os.SEEK_END)
if err != nil {
t.Fatal(err)
}
_, err = fd.WriteString("more data\n")
if err != nil {
t.Fatal(err)
}
err = fd.Close()
if err != nil {
t.Fatal(err)
}
err = os.Chtimes("s1/appendfile", fi.ModTime(), fi.ModTime())
if err != nil {
t.Fatal(err)
}
for i := range p {
p[i].stop()
// Prepare the expected state of folders after the sync
e1, err = directoryContents("s1")
if err != nil {
t.Fatal(err)
}
e2, err = directoryContents("s12-1")
if err != nil {
t.Fatal(err)
}
e3, err = directoryContents("s23-2")
if err != nil {
t.Fatal(err)
}
expected = [][]fileInfo{e1, e2, e3}
}
}
@@ -260,21 +328,30 @@ mainLoop:
log.Println("Checking...")
for _, dir := range []string{"s1", "s2", "s3"} {
actual := directoryContents(dir)
actual, err := directoryContents(dir)
if err != nil {
return err
}
if err := compareDirectoryContents(actual, expected[0]); err != nil {
return fmt.Errorf("%s: %v", dir, err)
}
}
for _, dir := range []string{"s12-1", "s12-2"} {
actual := directoryContents(dir)
actual, err := directoryContents(dir)
if err != nil {
return err
}
if err := compareDirectoryContents(actual, expected[1]); err != nil {
return fmt.Errorf("%s: %v", dir, err)
}
}
for _, dir := range []string{"s23-2", "s23-3"} {
actual := directoryContents(dir)
actual, err := directoryContents(dir)
if err != nil {
return err
}
if err := compareDirectoryContents(actual, expected[2]); err != nil {
return fmt.Errorf("%s: %v", dir, err)
}

View File

@@ -227,10 +227,11 @@ func compareDirectories(dirs ...string) error {
for i := range chans {
chans[i] = make(chan fileInfo)
}
errcs := make([]chan error, len(dirs))
abort := make(chan struct{})
for i := range dirs {
startWalker(dirs[i], chans[i], abort)
errcs[i] = startWalker(dirs[i], chans[i], abort)
}
res := make([]fileInfo, len(dirs))
@@ -239,6 +240,11 @@ func compareDirectories(dirs ...string) error {
for i := range chans {
fi, ok := <-chans[i]
if !ok {
err, hasError := <-errcs[i]
if hasError {
close(abort)
return err
}
numDone++
}
res[i] = fi
@@ -257,16 +263,16 @@ func compareDirectories(dirs ...string) error {
}
}
func directoryContents(dir string) []fileInfo {
func directoryContents(dir string) ([]fileInfo, error) {
res := make(chan fileInfo)
startWalker(dir, res, nil)
errc := startWalker(dir, res, nil)
var files []fileInfo
for f := range res {
files = append(files, f)
}
return files
return files, <-errc
}
func mergeDirectoryContents(c ...[]fileInfo) []fileInfo {
@@ -311,6 +317,10 @@ type fileInfo struct {
hash [16]byte
}
func (f fileInfo) String() string {
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
}
type fileInfoList []fileInfo
func (l fileInfoList) Len() int {
@@ -325,7 +335,7 @@ func (l fileInfoList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) {
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -381,10 +391,17 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) {
return errors.New("abort")
}
}
errc := make(chan error)
go func() {
filepath.Walk(dir, walker)
err := filepath.Walk(dir, walker)
close(res)
if err != nil {
errc <- err
}
close(errc)
}()
return errc
}
func md5file(fname string) (hash [16]byte, err error) {