mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-31 09:11:26 -05:00
Compare commits
23 Commits
v1.4.0-rc.
...
v1.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db02545ef3 | ||
|
|
52e72e0122 | ||
|
|
d1e0a38c04 | ||
|
|
0b610017ea | ||
|
|
9a1df97c69 | ||
|
|
ee61da5b6a | ||
|
|
a5e12a0a3d | ||
|
|
4f29180e7c | ||
|
|
0fb2cd52ff | ||
|
|
a4bd4d118a | ||
|
|
bb375b1aff | ||
|
|
05e23f1991 | ||
|
|
71de6fe290 | ||
|
|
6a840a040b | ||
|
|
c3637f2191 | ||
|
|
ca90f4e6af | ||
|
|
51fa36d61f | ||
|
|
d95a087829 | ||
|
|
a728743c86 | ||
|
|
ce27780a4c | ||
|
|
0df39ddc72 | ||
|
|
b84aa114be | ||
|
|
a596e5e2f0 |
21
build.go
21
build.go
@@ -654,7 +654,11 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
log.Printf("Warning: unable to remove generated %s: %v. Please remove it manually.", jsonPath, err)
|
||||
@@ -860,13 +864,22 @@ func getVersion() string {
|
||||
return "unknown-dev"
|
||||
}
|
||||
|
||||
func semanticVersion() (major, minor, patch, build string) {
|
||||
func semanticVersion() (major, minor, patch, build int) {
|
||||
r := regexp.MustCompile(`v(?P<Major>\d+)\.(?P<Minor>\d+).(?P<Patch>\d+).*\+(?P<CommitsAhead>\d+)`)
|
||||
matches := r.FindStringSubmatch(getVersion())
|
||||
if len(matches) != 5 {
|
||||
return "0", "0", "0", "0"
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
return matches[1], matches[2], matches[3], matches[4]
|
||||
|
||||
var ints [4]int
|
||||
for i := 1; i < 5; i++ {
|
||||
value, err := strconv.Atoi(matches[i])
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0
|
||||
}
|
||||
ints[i-1] = value
|
||||
}
|
||||
return ints[0], ints[1], ints[2], ints[3]
|
||||
}
|
||||
|
||||
func getBranchSuffix() string {
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -95,7 +96,7 @@ func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
})
|
||||
|
||||
go func() {
|
||||
addresses, err := disco.Lookup(deviceID)
|
||||
addresses, err := disco.Lookup(context.Background(), deviceID)
|
||||
res <- checkResult{addresses: addresses, error: err}
|
||||
}()
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
}
|
||||
}
|
||||
|
||||
if fi.BlocksHash != nil {
|
||||
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
|
||||
key := string(fi.BlocksHash)
|
||||
if _, ok := blocklists[key]; !ok {
|
||||
fmt.Printf("Missing block list for file %q, block list hash %x\n", fi.Name, fi.BlocksHash)
|
||||
|
||||
@@ -119,8 +119,8 @@ are mostly useful for developers. Use with care.
|
||||
"h", "m" and "s" abbreviations for hours minutes and seconds.
|
||||
Valid values are like "720h", "30s", etc.
|
||||
|
||||
STGCBLOCKSEVERY Set to a time interval to override the default database
|
||||
block GC interval of 13 hours. Same format as the
|
||||
STGCINDIRECTEVERY Set to a time interval to override the default database
|
||||
indirection GC interval of 13 hours. Same format as the
|
||||
STRECHECKDBEVERY variable.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
@@ -26,7 +27,7 @@ func (m *mockedCachingMux) Stop() {
|
||||
|
||||
// from events.Finder
|
||||
|
||||
func (m *mockedCachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, err error) {
|
||||
func (m *mockedCachingMux) Lookup(ctx context.Context, deviceID protocol.DeviceID) (direct []string, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -360,6 +360,12 @@ func (s *service) connect(ctx context.Context) {
|
||||
var seen []string
|
||||
|
||||
for _, deviceCfg := range cfg.Devices {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
deviceID := deviceCfg.DeviceID
|
||||
if deviceID == s.myID {
|
||||
continue
|
||||
@@ -380,7 +386,7 @@ func (s *service) connect(ctx context.Context) {
|
||||
for _, addr := range deviceCfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
if s.discoverer != nil {
|
||||
if t, err := s.discoverer.Lookup(deviceID); err == nil {
|
||||
if t, err := s.discoverer.Lookup(ctx, deviceID); err == nil {
|
||||
addrs = append(addrs, t...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,10 +48,15 @@ type ReadTransaction interface {
|
||||
// purposes of saving memory when transactions are in-RAM. Note that
|
||||
// transactions may be checkpointed *anyway* even if this is not called, due to
|
||||
// resource constraints, but this gives you a chance to decide when.
|
||||
//
|
||||
// Functions can be passed to Checkpoint. These are run if and only if the
|
||||
// checkpoint will result in a flush, and will run before the flush. The
|
||||
// transaction can be accessed via a closure. If an error is returned from
|
||||
// these functions the flush will be aborted and the error bubbled.
|
||||
type WriteTransaction interface {
|
||||
ReadTransaction
|
||||
Writer
|
||||
Checkpoint() error
|
||||
Checkpoint(...func() error) error
|
||||
Commit() error
|
||||
}
|
||||
|
||||
|
||||
@@ -37,14 +37,15 @@ func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) {
|
||||
}
|
||||
|
||||
func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
|
||||
snap, err := b.ldb.GetSnapshot()
|
||||
if err != nil {
|
||||
return leveldbSnapshot{}, wrapLeveldbErr(err)
|
||||
}
|
||||
rel, err := newReleaser(b.closeWG)
|
||||
if err != nil {
|
||||
return leveldbSnapshot{}, err
|
||||
}
|
||||
snap, err := b.ldb.GetSnapshot()
|
||||
if err != nil {
|
||||
rel.Release()
|
||||
return leveldbSnapshot{}, wrapLeveldbErr(err)
|
||||
}
|
||||
return leveldbSnapshot{
|
||||
snap: snap,
|
||||
rel: rel,
|
||||
@@ -52,14 +53,15 @@ func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
|
||||
}
|
||||
|
||||
func (b *leveldbBackend) NewWriteTransaction() (WriteTransaction, error) {
|
||||
snap, err := b.newSnapshot()
|
||||
if err != nil {
|
||||
return nil, err // already wrapped
|
||||
}
|
||||
rel, err := newReleaser(b.closeWG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snap, err := b.newSnapshot()
|
||||
if err != nil {
|
||||
rel.Release()
|
||||
return nil, err // already wrapped
|
||||
}
|
||||
return &leveldbTransaction{
|
||||
leveldbSnapshot: snap,
|
||||
ldb: b.ldb,
|
||||
@@ -148,8 +150,8 @@ func (t *leveldbTransaction) Put(key, val []byte) error {
|
||||
return t.checkFlush(dbFlushBatchMax)
|
||||
}
|
||||
|
||||
func (t *leveldbTransaction) Checkpoint() error {
|
||||
return t.checkFlush(dbFlushBatchMin)
|
||||
func (t *leveldbTransaction) Checkpoint(preFlush ...func() error) error {
|
||||
return t.checkFlush(dbFlushBatchMin, preFlush...)
|
||||
}
|
||||
|
||||
func (t *leveldbTransaction) Commit() error {
|
||||
@@ -165,10 +167,15 @@ func (t *leveldbTransaction) Release() {
|
||||
}
|
||||
|
||||
// checkFlush flushes and resets the batch if its size exceeds the given size.
|
||||
func (t *leveldbTransaction) checkFlush(size int) error {
|
||||
func (t *leveldbTransaction) checkFlush(size int, preFlush ...func() error) error {
|
||||
if len(t.batch.Dump()) < size {
|
||||
return nil
|
||||
}
|
||||
for _, hook := range preFlush {
|
||||
if err := hook(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.flush()
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func addToBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func discardFromBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) error {
|
||||
@@ -101,7 +101,7 @@ func discardFromBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) er
|
||||
}
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||
|
||||
@@ -19,22 +19,25 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// We set the bloom filter capacity to handle 100k individual block lists
|
||||
// with a false positive probability of 1% for the first pass. Once we know
|
||||
// how many block lists we have we will use that number instead, if it's
|
||||
// more than 100k. For fewer than 100k block lists we will just get better
|
||||
// false positive rate instead.
|
||||
blockGCBloomCapacity = 100000
|
||||
blockGCBloomFalsePositiveRate = 0.01 // 1%
|
||||
blockGCDefaultInterval = 13 * time.Hour
|
||||
blockGCTimeKey = "lastBlockGCTime"
|
||||
// We set the bloom filter capacity to handle 100k individual items with
|
||||
// a false positive probability of 1% for the first pass. Once we know
|
||||
// how many items we have we will use that number instead, if it's more
|
||||
// than 100k. For fewer than 100k items we will just get better false
|
||||
// positive rate instead.
|
||||
indirectGCBloomCapacity = 100000
|
||||
indirectGCBloomFalsePositiveRate = 0.01 // 1%
|
||||
indirectGCDefaultInterval = 13 * time.Hour
|
||||
indirectGCTimeKey = "lastIndirectGCTime"
|
||||
|
||||
// Use indirection for the block list when it exceeds this many entries
|
||||
blocksIndirectionCutoff = 3
|
||||
)
|
||||
|
||||
var blockGCInterval = blockGCDefaultInterval
|
||||
var indirectGCInterval = indirectGCDefaultInterval
|
||||
|
||||
func init() {
|
||||
if dur, err := time.ParseDuration(os.Getenv("STGCBLOCKSEVERY")); err == nil {
|
||||
blockGCInterval = dur
|
||||
if dur, err := time.ParseDuration(os.Getenv("STGCINDIRECTEVERY")); err == nil {
|
||||
indirectGCInterval = dur
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,12 +127,18 @@ func (db *Lowlevel) updateRemoteFiles(folder, device []byte, fs []protocol.FileI
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Checkpoint(); err != nil {
|
||||
if err := t.Checkpoint(func() error {
|
||||
return meta.toDB(t, folder)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return t.commit()
|
||||
if err := meta.toDB(t, folder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// updateLocalFiles adds fileinfos to the db, and updates the global versionlist,
|
||||
@@ -227,12 +236,18 @@ func (db *Lowlevel) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
|
||||
}
|
||||
}
|
||||
|
||||
if err := t.Checkpoint(); err != nil {
|
||||
if err := t.Checkpoint(func() error {
|
||||
return meta.toDB(t, folder)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return t.commit()
|
||||
if err := meta.toDB(t, folder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) dropFolder(folder []byte) error {
|
||||
@@ -290,7 +305,7 @@ func (db *Lowlevel) dropFolder(folder []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) dropDeviceFolder(device, folder []byte, meta *metadataTracker) error {
|
||||
@@ -343,7 +358,7 @@ func (db *Lowlevel) dropDeviceFolder(device, folder []byte, meta *metadataTracke
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
@@ -411,7 +426,7 @@ func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
}
|
||||
|
||||
l.Debugf("db check completed for %q", folder)
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) getIndexID(device, folder []byte) (protocol.IndexID, error) {
|
||||
@@ -469,22 +484,32 @@ func (db *Lowlevel) dropPrefix(prefix []byte) error {
|
||||
if err := t.deleteKeyPrefix(prefix); err != nil {
|
||||
return err
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) gcRunner() {
|
||||
t := time.NewTimer(db.timeUntil(blockGCTimeKey, blockGCInterval))
|
||||
// Calculate the time for the next GC run. Even if we should run GC
|
||||
// directly, give the system a while to get up and running and do other
|
||||
// stuff first. (We might have migrations and stuff which would be
|
||||
// better off running before GC.)
|
||||
next := db.timeUntil(indirectGCTimeKey, indirectGCInterval)
|
||||
if next < time.Minute {
|
||||
next = time.Minute
|
||||
}
|
||||
|
||||
t := time.NewTimer(next)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-db.gcStop:
|
||||
return
|
||||
case <-t.C:
|
||||
if err := db.gcBlocks(); err != nil {
|
||||
l.Warnln("Database block GC failed:", err)
|
||||
if err := db.gcIndirect(); err != nil {
|
||||
l.Warnln("Database indirection GC failed:", err)
|
||||
}
|
||||
db.recordTime(blockGCTimeKey)
|
||||
t.Reset(db.timeUntil(blockGCTimeKey, blockGCInterval))
|
||||
db.recordTime(indirectGCTimeKey)
|
||||
t.Reset(db.timeUntil(indirectGCTimeKey, indirectGCInterval))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -509,15 +534,16 @@ func (db *Lowlevel) timeUntil(key string, every time.Duration) time.Duration {
|
||||
return sleepTime
|
||||
}
|
||||
|
||||
func (db *Lowlevel) gcBlocks() error {
|
||||
// The block GC uses a bloom filter to track used block lists. This means
|
||||
// iterating over all items, adding their block lists to the filter, then
|
||||
// iterating over the block lists and removing those that don't match the
|
||||
// filter. The filter will give false positives so we will keep around one
|
||||
// percent of block lists that we don't really need (at most).
|
||||
func (db *Lowlevel) gcIndirect() error {
|
||||
// The indirection GC uses bloom filters to track used block lists and
|
||||
// versions. This means iterating over all items, adding their hashes to
|
||||
// the filter, then iterating over the indirected items and removing
|
||||
// those that don't match the filter. The filter will give false
|
||||
// positives so we will keep around one percent of things that we don't
|
||||
// really need (at most).
|
||||
//
|
||||
// Block GC needs to run when there are no modifications to the FileInfos or
|
||||
// block lists.
|
||||
// Indirection GC needs to run when there are no modifications to the
|
||||
// FileInfos or indirected items.
|
||||
|
||||
db.gcMut.Lock()
|
||||
defer db.gcMut.Unlock()
|
||||
@@ -528,30 +554,32 @@ func (db *Lowlevel) gcBlocks() error {
|
||||
}
|
||||
defer t.Release()
|
||||
|
||||
// Set up the bloom filter with the initial capacity and false positive
|
||||
// rate, or higher capacity if we've done this before and seen lots of block
|
||||
// lists.
|
||||
// Set up the bloom filters with the initial capacity and false positive
|
||||
// rate, or higher capacity if we've done this before and seen lots of
|
||||
// items. For simplicity's sake we track just one count, which is the
|
||||
// highest of the various indirected items.
|
||||
|
||||
capacity := blockGCBloomCapacity
|
||||
capacity := indirectGCBloomCapacity
|
||||
if db.gcKeyCount > capacity {
|
||||
capacity = db.gcKeyCount
|
||||
}
|
||||
filter := bloom.NewWithEstimates(uint(capacity), blockGCBloomFalsePositiveRate)
|
||||
blockFilter := bloom.NewWithEstimates(uint(capacity), indirectGCBloomFalsePositiveRate)
|
||||
|
||||
// Iterate the FileInfos, unmarshal the blocks hashes and add them to
|
||||
// the filter.
|
||||
// Iterate the FileInfos, unmarshal the block and version hashes and
|
||||
// add them to the filter.
|
||||
|
||||
it, err := db.NewPrefixIterator([]byte{KeyTypeDevice})
|
||||
it, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer it.Release()
|
||||
for it.Next() {
|
||||
var bl BlocksHashOnly
|
||||
if err := bl.Unmarshal(it.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(bl.BlocksHash) > 0 {
|
||||
filter.Add(bl.BlocksHash)
|
||||
blockFilter.Add(bl.BlocksHash)
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
@@ -562,15 +590,16 @@ func (db *Lowlevel) gcBlocks() error {
|
||||
// Iterate over block lists, removing keys with hashes that don't match
|
||||
// the filter.
|
||||
|
||||
it, err = db.NewPrefixIterator([]byte{KeyTypeBlockList})
|
||||
it, err = t.NewPrefixIterator([]byte{KeyTypeBlockList})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matched := 0
|
||||
defer it.Release()
|
||||
matchedBlocks := 0
|
||||
for it.Next() {
|
||||
key := blockListKey(it.Key())
|
||||
if filter.Test(key.BlocksHash()) {
|
||||
matched++
|
||||
if blockFilter.Test(key.BlocksHash()) {
|
||||
matchedBlocks++
|
||||
continue
|
||||
}
|
||||
if err := t.Delete(key); err != nil {
|
||||
@@ -583,7 +612,7 @@ func (db *Lowlevel) gcBlocks() error {
|
||||
}
|
||||
|
||||
// Remember the number of unique keys we kept until the next pass.
|
||||
db.gcKeyCount = matched
|
||||
db.gcKeyCount = matchedBlocks
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return err
|
||||
|
||||
@@ -28,8 +28,8 @@ type metadataTracker struct {
|
||||
}
|
||||
|
||||
type metaKey struct {
|
||||
dev protocol.DeviceID
|
||||
flags uint32
|
||||
dev protocol.DeviceID
|
||||
flag uint32
|
||||
}
|
||||
|
||||
func newMetadataTracker() *metadataTracker {
|
||||
@@ -62,8 +62,8 @@ func (m *metadataTracker) Marshal() ([]byte, error) {
|
||||
|
||||
// toDB saves the marshalled metadataTracker to the given db, under the key
|
||||
// corresponding to the given folder
|
||||
func (m *metadataTracker) toDB(db *Lowlevel, folder []byte) error {
|
||||
key, err := db.keyer.GenerateFolderMetaKey(nil, folder)
|
||||
func (m *metadataTracker) toDB(t readWriteTransaction, folder []byte) error {
|
||||
key, err := t.keyer.GenerateFolderMetaKey(nil, folder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func (m *metadataTracker) toDB(db *Lowlevel, folder []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.Put(key, bs)
|
||||
err = t.Put(key, bs)
|
||||
if err == nil {
|
||||
m.dirty = false
|
||||
}
|
||||
@@ -103,14 +103,18 @@ func (m *metadataTracker) fromDB(db *Lowlevel, folder []byte) error {
|
||||
|
||||
// countsPtr returns a pointer to the corresponding Counts struct, if
|
||||
// necessary allocating one in the process
|
||||
func (m *metadataTracker) countsPtr(dev protocol.DeviceID, flags uint32) *Counts {
|
||||
func (m *metadataTracker) countsPtr(dev protocol.DeviceID, flag uint32) *Counts {
|
||||
// must be called with the mutex held
|
||||
|
||||
key := metaKey{dev, flags}
|
||||
if bits.OnesCount32(flag) > 1 {
|
||||
panic("incorrect usage: set at most one bit in flag")
|
||||
}
|
||||
|
||||
key := metaKey{dev, flag}
|
||||
idx, ok := m.indexes[key]
|
||||
if !ok {
|
||||
idx = len(m.counts.Counts)
|
||||
m.counts.Counts = append(m.counts.Counts, Counts{DeviceID: dev[:], LocalFlags: flags})
|
||||
m.counts.Counts = append(m.counts.Counts, Counts{DeviceID: dev[:], LocalFlags: flag})
|
||||
m.indexes[key] = idx
|
||||
}
|
||||
return &m.counts.Counts[idx]
|
||||
@@ -157,8 +161,8 @@ func (m *metadataTracker) updateSeqLocked(dev protocol.DeviceID, f FileIntf) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flags uint32, f FileIntf) {
|
||||
cp := m.countsPtr(dev, flags)
|
||||
func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flag uint32, f FileIntf) {
|
||||
cp := m.countsPtr(dev, flag)
|
||||
|
||||
switch {
|
||||
case f.IsDeleted():
|
||||
@@ -196,8 +200,8 @@ func (m *metadataTracker) removeFile(dev protocol.DeviceID, f FileIntf) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flags uint32, f FileIntf) {
|
||||
cp := m.countsPtr(dev, f.FileLocalFlags())
|
||||
func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flag uint32, f FileIntf) {
|
||||
cp := m.countsPtr(dev, flag)
|
||||
|
||||
switch {
|
||||
case f.IsDeleted():
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -101,3 +103,75 @@ func TestMetaSequences(t *testing.T) {
|
||||
t.Error("sequence of first device should be 4, not", seq)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecalcMeta(t *testing.T) {
|
||||
ldb := NewLowlevel(backend.OpenMemory())
|
||||
defer ldb.Close()
|
||||
|
||||
// Add some files
|
||||
s1 := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb)
|
||||
files := []protocol.FileInfo{
|
||||
{Name: "a", Size: 1000},
|
||||
{Name: "b", Size: 2000},
|
||||
}
|
||||
s1.Update(protocol.LocalDeviceID, files)
|
||||
|
||||
// Verify local/global size
|
||||
snap := s1.Snapshot()
|
||||
ls := snap.LocalSize()
|
||||
gs := snap.GlobalSize()
|
||||
snap.Release()
|
||||
if ls.Bytes != 3000 {
|
||||
t.Fatalf("Wrong initial local byte count, %d != 3000", ls.Bytes)
|
||||
}
|
||||
if gs.Bytes != 3000 {
|
||||
t.Fatalf("Wrong initial global byte count, %d != 3000", gs.Bytes)
|
||||
}
|
||||
|
||||
// Reach into the database to make the metadata tracker intentionally
|
||||
// wrong and out of date
|
||||
curSeq := s1.meta.Sequence(protocol.LocalDeviceID)
|
||||
tran, err := ldb.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s1.meta.mut.Lock()
|
||||
s1.meta.countsPtr(protocol.LocalDeviceID, 0).Sequence = curSeq - 1 // too low
|
||||
s1.meta.countsPtr(protocol.LocalDeviceID, 0).Bytes = 1234 // wrong
|
||||
s1.meta.countsPtr(protocol.GlobalDeviceID, 0).Bytes = 1234 // wrong
|
||||
s1.meta.dirty = true
|
||||
s1.meta.mut.Unlock()
|
||||
if err := s1.meta.toDB(tran, []byte("test")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tran.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify that our bad data "took"
|
||||
snap = s1.Snapshot()
|
||||
ls = snap.LocalSize()
|
||||
gs = snap.GlobalSize()
|
||||
snap.Release()
|
||||
if ls.Bytes != 1234 {
|
||||
t.Fatalf("Wrong changed local byte count, %d != 1234", ls.Bytes)
|
||||
}
|
||||
if gs.Bytes != 1234 {
|
||||
t.Fatalf("Wrong changed global byte count, %d != 1234", gs.Bytes)
|
||||
}
|
||||
|
||||
// Create a new fileset, which will realize the inconsistency and recalculate
|
||||
s2 := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb)
|
||||
|
||||
// Verify local/global size
|
||||
snap = s2.Snapshot()
|
||||
ls = snap.LocalSize()
|
||||
gs = snap.GlobalSize()
|
||||
snap.Release()
|
||||
if ls.Bytes != 3000 {
|
||||
t.Fatalf("Wrong fixed local byte count, %d != 3000", ls.Bytes)
|
||||
}
|
||||
if gs.Bytes != 3000 {
|
||||
t.Fatalf("Wrong fixed global byte count, %d != 3000", gs.Bytes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -23,11 +25,17 @@ import (
|
||||
// 6: v0.14.50
|
||||
// 7: v0.14.53
|
||||
// 8: v1.4.0
|
||||
// 9: v1.4.0
|
||||
const (
|
||||
dbVersion = 8
|
||||
dbVersion = 9
|
||||
dbMinSyncthingVersion = "v1.4.0"
|
||||
)
|
||||
|
||||
var (
|
||||
errFolderIdxMissing = fmt.Errorf("folder db index missing")
|
||||
errDeviceIdxMissing = fmt.Errorf("device db index missing")
|
||||
)
|
||||
|
||||
type databaseDowngradeError struct {
|
||||
minSyncthingVersion string
|
||||
}
|
||||
@@ -49,6 +57,11 @@ type schemaUpdater struct {
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) updateSchema() error {
|
||||
// Updating the schema can touch any and all parts of the database. Make
|
||||
// sure we do not run GC concurrently with schema migrations.
|
||||
db.gcMut.Lock()
|
||||
defer db.gcMut.Unlock()
|
||||
|
||||
miscDB := NewMiscDataNamespace(db.Lowlevel)
|
||||
prevVersion, _, err := miscDB.Int64("dbVersion")
|
||||
if err != nil {
|
||||
@@ -80,7 +93,7 @@ func (db *schemaUpdater) updateSchema() error {
|
||||
{5, db.updateSchemaTo5},
|
||||
{6, db.updateSchema5to6},
|
||||
{7, db.updateSchema6to7},
|
||||
{8, db.updateSchema7to8},
|
||||
{9, db.updateSchemato9},
|
||||
}
|
||||
|
||||
for _, m := range migrations {
|
||||
@@ -204,7 +217,7 @@ func (db *schemaUpdater) updateSchema0to1(_ int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items
|
||||
@@ -240,7 +253,7 @@ func (db *schemaUpdater) updateSchema1to2(_ int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// updateSchema2to3 introduces a needKey->nil bucket for locally needed files.
|
||||
@@ -288,7 +301,7 @@ func (db *schemaUpdater) updateSchema2to3(_ int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// updateSchemaTo5 resets the need bucket due to bugs existing in the v0.14.49
|
||||
@@ -314,7 +327,7 @@ func (db *schemaUpdater) updateSchemaTo5(prevVersion int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := t.commit(); err != nil {
|
||||
if err := t.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -361,7 +374,7 @@ func (db *schemaUpdater) updateSchema5to6(_ int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// updateSchema6to7 checks whether all currently locally needed files are really
|
||||
@@ -418,11 +431,12 @@ func (db *schemaUpdater) updateSchema6to7(_ int) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.commit()
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) updateSchema7to8(_ int) error {
|
||||
func (db *schemaUpdater) updateSchemato9(prev int) error {
|
||||
// Loads and rewrites all files with blocks, to deduplicate block lists.
|
||||
// Checks for missing or incorrect sequence entries and rewrites those.
|
||||
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
@@ -430,15 +444,69 @@ func (db *schemaUpdater) updateSchema7to8(_ int) error {
|
||||
}
|
||||
defer t.close()
|
||||
|
||||
var sk []byte
|
||||
it, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metas := make(map[string]*metadataTracker)
|
||||
for it.Next() {
|
||||
var fi protocol.FileInfo
|
||||
if err := fi.Unmarshal(it.Value()); err != nil {
|
||||
intf, err := t.unmarshalTrunc(it.Value(), false)
|
||||
if backend.IsNotFound(err) {
|
||||
// Unmarshal error due to missing parts (block list), probably
|
||||
// due to a bad migration in a previous RC. Drop this key, as
|
||||
// getFile would anyway return this as a "not found" in the
|
||||
// normal flow of things.
|
||||
if err := t.Delete(it.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fi := intf.(protocol.FileInfo)
|
||||
device, ok := t.keyer.DeviceFromDeviceFileKey(it.Key())
|
||||
if !ok {
|
||||
return errDeviceIdxMissing
|
||||
}
|
||||
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||
folder, ok := t.keyer.FolderFromDeviceFileKey(it.Key())
|
||||
if !ok {
|
||||
return errFolderIdxMissing
|
||||
}
|
||||
if sk, err = t.keyer.GenerateSequenceKey(sk, folder, fi.Sequence); err != nil {
|
||||
return err
|
||||
}
|
||||
switch dk, err := t.Get(sk); {
|
||||
case err != nil:
|
||||
if !backend.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case !bytes.Equal(it.Key(), dk):
|
||||
folderStr := string(folder)
|
||||
meta, ok := metas[folderStr]
|
||||
if !ok {
|
||||
meta = loadMetadataTracker(db.Lowlevel, folderStr)
|
||||
metas[folderStr] = meta
|
||||
}
|
||||
fi.Sequence = meta.nextLocalSeq()
|
||||
if sk, err = t.keyer.GenerateSequenceKey(sk, folder, fi.Sequence); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.Put(sk, it.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.putFile(it.Key(), fi); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
if prev == 8 {
|
||||
// The transition to 8 already did the changes below.
|
||||
continue
|
||||
}
|
||||
if fi.Blocks == nil {
|
||||
continue
|
||||
}
|
||||
@@ -451,7 +519,13 @@ func (db *schemaUpdater) updateSchema7to8(_ int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
db.recordTime(blockGCTimeKey)
|
||||
for folder, meta := range metas {
|
||||
if err := meta.toDB(t, []byte(folder)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return t.commit()
|
||||
db.recordTime(indirectGCTimeKey)
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
134
lib/db/set.go
134
lib/db/set.go
@@ -71,56 +71,105 @@ func init() {
|
||||
}
|
||||
|
||||
func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
|
||||
var s = FileSet{
|
||||
return &FileSet{
|
||||
folder: folder,
|
||||
fs: fs,
|
||||
db: db,
|
||||
meta: newMetadataTracker(),
|
||||
meta: loadMetadataTracker(db, folder),
|
||||
updateMutex: sync.NewMutex(),
|
||||
}
|
||||
|
||||
if err := s.meta.fromDB(db, []byte(folder)); err != nil {
|
||||
l.Infof("No stored folder metadata for %q: recalculating", folder)
|
||||
if err := s.recalcCounts(); backend.IsClosed(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if age := time.Since(s.meta.Created()); age > databaseRecheckInterval {
|
||||
l.Infof("Stored folder metadata for %q is %v old; recalculating", folder, age)
|
||||
if err := s.recalcCounts(); backend.IsClosed(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s *FileSet) recalcCounts() error {
|
||||
s.meta = newMetadataTracker()
|
||||
|
||||
if err := s.db.checkGlobals([]byte(s.folder), s.meta); err != nil {
|
||||
return err
|
||||
func loadMetadataTracker(db *Lowlevel, folder string) *metadataTracker {
|
||||
recalc := func() *metadataTracker {
|
||||
meta, err := recalcMeta(db, folder)
|
||||
if backend.IsClosed(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
t, err := s.db.newReadWriteTransaction()
|
||||
meta := newMetadataTracker()
|
||||
if err := meta.fromDB(db, []byte(folder)); err != nil {
|
||||
l.Infof("No stored folder metadata for %q; recalculating", folder)
|
||||
return recalc()
|
||||
}
|
||||
|
||||
curSeq := meta.Sequence(protocol.LocalDeviceID)
|
||||
if metaOK := verifyLocalSequence(curSeq, db, folder); !metaOK {
|
||||
l.Infof("Stored folder metadata for %q is out of date after crash; recalculating", folder)
|
||||
return recalc()
|
||||
}
|
||||
|
||||
if age := time.Since(meta.Created()); age > databaseRecheckInterval {
|
||||
l.Infof("Stored folder metadata for %q is %v old; recalculating", folder, age)
|
||||
return recalc()
|
||||
}
|
||||
|
||||
return meta
|
||||
}
|
||||
|
||||
func recalcMeta(db *Lowlevel, folder string) (*metadataTracker, error) {
|
||||
meta := newMetadataTracker()
|
||||
if err := db.checkGlobals([]byte(folder), meta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer t.close()
|
||||
|
||||
var deviceID protocol.DeviceID
|
||||
err = t.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool {
|
||||
err = t.withAllFolderTruncated([]byte(folder), func(device []byte, f FileInfoTruncated) bool {
|
||||
copy(deviceID[:], device)
|
||||
s.meta.addFile(deviceID, f)
|
||||
meta.addFile(deviceID, f)
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.meta.SetCreated()
|
||||
return s.meta.toDB(s.db, []byte(s.folder))
|
||||
meta.SetCreated()
|
||||
if err := meta.toDB(t, []byte(folder)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := t.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Verify the local sequence number from actual sequence entries. Returns
|
||||
// true if it was all good, or false if a fixup was necessary.
|
||||
func verifyLocalSequence(curSeq int64, db *Lowlevel, folder string) bool {
|
||||
// Walk the sequence index from the current (supposedly) highest
|
||||
// sequence number and raise the alarm if we get anything. This recovers
|
||||
// from the occasion where we have written sequence entries to disk but
|
||||
// not yet written new metadata to disk.
|
||||
//
|
||||
// Note that we can have the same thing happen for remote devices but
|
||||
// there it's not a problem -- we'll simply advertise a lower sequence
|
||||
// number than we've actually seen and receive some duplicate updates
|
||||
// and then be in sync again.
|
||||
|
||||
t, err := db.newReadOnlyTransaction()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ok := true
|
||||
if err := t.withHaveSequence([]byte(folder), curSeq+1, func(fi FileIntf) bool {
|
||||
ok = false // we got something, which we should not have
|
||||
return false
|
||||
}); err != nil && !backend.IsClosed(err) {
|
||||
panic(err)
|
||||
}
|
||||
t.close()
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *FileSet) Drop(device protocol.DeviceID) {
|
||||
@@ -150,7 +199,20 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
|
||||
s.meta.resetAll(device)
|
||||
}
|
||||
|
||||
if err := s.meta.toDB(s.db, []byte(s.folder)); backend.IsClosed(err) {
|
||||
t, err := s.db.newReadWriteTransaction()
|
||||
if backend.IsClosed(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer t.close()
|
||||
|
||||
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := t.Commit(); backend.IsClosed(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
@@ -168,12 +230,6 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
s.updateMutex.Lock()
|
||||
defer s.updateMutex.Unlock()
|
||||
|
||||
defer func() {
|
||||
if err := s.meta.toDB(s.db, []byte(s.folder)); err != nil && !backend.IsClosed(err) {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if device == protocol.LocalDeviceID {
|
||||
// For the local device we have a bunch of metadata to track.
|
||||
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
||||
|
||||
@@ -129,27 +129,36 @@ func (f FileInfoTruncated) FileModifiedBy() protocol.ShortID {
|
||||
}
|
||||
|
||||
func (f FileInfoTruncated) ConvertToIgnoredFileInfo(by protocol.ShortID) protocol.FileInfo {
|
||||
return protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Type: f.Type,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
ModifiedBy: by,
|
||||
Version: f.Version,
|
||||
RawBlockSize: f.RawBlockSize,
|
||||
LocalFlags: protocol.FlagLocalIgnored,
|
||||
}
|
||||
file := f.copyToFileInfo()
|
||||
file.SetIgnored(by)
|
||||
return file
|
||||
}
|
||||
|
||||
func (f FileInfoTruncated) ConvertToDeletedFileInfo(by protocol.ShortID, localFlags uint32) protocol.FileInfo {
|
||||
func (f FileInfoTruncated) ConvertToDeletedFileInfo(by protocol.ShortID) protocol.FileInfo {
|
||||
file := f.copyToFileInfo()
|
||||
file.SetDeleted(by)
|
||||
return file
|
||||
}
|
||||
|
||||
// copyToFileInfo just copies all members of FileInfoTruncated to protocol.FileInfo
|
||||
func (f FileInfoTruncated) copyToFileInfo() protocol.FileInfo {
|
||||
return protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Type: f.Type,
|
||||
ModifiedS: time.Now().Unix(),
|
||||
ModifiedBy: by,
|
||||
Deleted: true,
|
||||
Version: f.Version.Update(by),
|
||||
LocalFlags: localFlags,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedBy: f.ModifiedBy,
|
||||
Version: f.Version,
|
||||
Sequence: f.Sequence,
|
||||
SymlinkTarget: f.SymlinkTarget,
|
||||
BlocksHash: f.BlocksHash,
|
||||
Type: f.Type,
|
||||
Permissions: f.Permissions,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
RawBlockSize: f.RawBlockSize,
|
||||
LocalFlags: f.LocalFlags,
|
||||
Deleted: f.Deleted,
|
||||
RawInvalid: f.RawInvalid,
|
||||
NoPermissions: f.NoPermissions,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -78,30 +78,34 @@ func (t readOnlyTransaction) unmarshalTrunc(bs []byte, trunc bool) (FileIntf, er
|
||||
return tf, nil
|
||||
}
|
||||
|
||||
var tf protocol.FileInfo
|
||||
if err := tf.Unmarshal(bs); err != nil {
|
||||
var fi protocol.FileInfo
|
||||
if err := fi.Unmarshal(bs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := t.fillBlockList(&tf); err != nil {
|
||||
if err := t.fillFileInfo(&fi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tf, nil
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (t readOnlyTransaction) fillBlockList(fi *protocol.FileInfo) error {
|
||||
if len(fi.BlocksHash) == 0 {
|
||||
return nil
|
||||
// fillFileInfo follows the (possible) indirection of blocks and fills it out.
|
||||
func (t readOnlyTransaction) fillFileInfo(fi *protocol.FileInfo) error {
|
||||
var key []byte
|
||||
|
||||
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
|
||||
// The blocks list is indirected and we need to load it.
|
||||
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
|
||||
bs, err := t.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var bl BlockList
|
||||
if err := bl.Unmarshal(bs); err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Blocks = bl.Blocks
|
||||
}
|
||||
blocksKey := t.keyer.GenerateBlockListKey(nil, fi.BlocksHash)
|
||||
bs, err := t.Get(blocksKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var bl BlockList
|
||||
if err := bl.Unmarshal(bs); err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Blocks = bl.Blocks
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -443,7 +447,7 @@ func (db *Lowlevel) newReadWriteTransaction() (readWriteTransaction, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) commit() error {
|
||||
func (t readWriteTransaction) Commit() error {
|
||||
t.readOnlyTransaction.close()
|
||||
return t.WriteTransaction.Commit()
|
||||
}
|
||||
@@ -453,26 +457,33 @@ func (t readWriteTransaction) close() {
|
||||
t.WriteTransaction.Release()
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) putFile(key []byte, fi protocol.FileInfo) error {
|
||||
if fi.Blocks != nil {
|
||||
if fi.BlocksHash == nil {
|
||||
fi.BlocksHash = protocol.BlocksHash(fi.Blocks)
|
||||
}
|
||||
blocksKey := t.keyer.GenerateBlockListKey(nil, fi.BlocksHash)
|
||||
if _, err := t.Get(blocksKey); backend.IsNotFound(err) {
|
||||
func (t readWriteTransaction) putFile(fkey []byte, fi protocol.FileInfo) error {
|
||||
var bkey []byte
|
||||
|
||||
// Always set the blocks hash when there are blocks.
|
||||
if len(fi.Blocks) > 0 {
|
||||
fi.BlocksHash = protocol.BlocksHash(fi.Blocks)
|
||||
} else {
|
||||
fi.BlocksHash = nil
|
||||
}
|
||||
|
||||
// Indirect the blocks if the block list is large enough.
|
||||
if len(fi.Blocks) > blocksIndirectionCutoff {
|
||||
bkey = t.keyer.GenerateBlockListKey(bkey, fi.BlocksHash)
|
||||
if _, err := t.Get(bkey); backend.IsNotFound(err) {
|
||||
// Marshal the block list and save it
|
||||
blocksBs := mustMarshal(&BlockList{Blocks: fi.Blocks})
|
||||
if err := t.Put(blocksKey, blocksBs); err != nil {
|
||||
if err := t.Put(bkey, blocksBs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Blocks = nil
|
||||
}
|
||||
|
||||
fi.Blocks = nil
|
||||
fiBs := mustMarshal(&fi)
|
||||
return t.Put(key, fiBs)
|
||||
return t.Put(fkey, fiBs)
|
||||
}
|
||||
|
||||
// updateGlobal adds this device+version to the version list for the given
|
||||
@@ -723,15 +734,12 @@ func (t *readWriteTransaction) withAllFolderTruncated(folder []byte, fn func(dev
|
||||
}
|
||||
continue
|
||||
}
|
||||
var f FileInfoTruncated
|
||||
// The iterator function may keep a reference to the unmarshalled
|
||||
// struct, which in turn references the buffer it was unmarshalled
|
||||
// from. dbi.Value() just returns an internal slice that it reuses, so
|
||||
// we need to copy it.
|
||||
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
|
||||
|
||||
intf, err := t.unmarshalTrunc(dbi.Value(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := intf.(FileInfoTruncated)
|
||||
|
||||
switch f.Name {
|
||||
case "", ".", "..", "/": // A few obviously invalid filenames
|
||||
@@ -755,10 +763,7 @@ func (t *readWriteTransaction) withAllFolderTruncated(folder []byte, fn func(dev
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err := dbi.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
return t.commit()
|
||||
return dbi.Error()
|
||||
}
|
||||
|
||||
type marshaller interface {
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
@@ -73,7 +74,7 @@ func (m *cachingMux) Add(finder Finder, cacheTime, negCacheTime time.Duration) {
|
||||
|
||||
// Lookup attempts to resolve the device ID using any of the added Finders,
|
||||
// while obeying the cache settings.
|
||||
func (m *cachingMux) Lookup(deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
func (m *cachingMux) Lookup(ctx context.Context, deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
m.mut.RLock()
|
||||
for i, finder := range m.finders {
|
||||
if cacheEntry, ok := m.caches[i].Get(deviceID); ok {
|
||||
@@ -99,7 +100,7 @@ func (m *cachingMux) Lookup(deviceID protocol.DeviceID) (addresses []string, err
|
||||
}
|
||||
|
||||
// Perform the actual lookup and cache the result.
|
||||
if addrs, err := finder.Lookup(deviceID); err == nil {
|
||||
if addrs, err := finder.Lookup(ctx, deviceID); err == nil {
|
||||
l.Debugln("lookup for", deviceID, "at", finder)
|
||||
l.Debugln(" addresses:", addrs)
|
||||
addresses = append(addresses, addrs...)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -39,7 +40,9 @@ func TestCacheUnique(t *testing.T) {
|
||||
f1 := &fakeDiscovery{addresses0}
|
||||
c.Add(f1, time.Minute, 0)
|
||||
|
||||
addr, err := c.Lookup(protocol.LocalDeviceID)
|
||||
ctx := context.Background()
|
||||
|
||||
addr, err := c.Lookup(ctx, protocol.LocalDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -53,7 +56,7 @@ func TestCacheUnique(t *testing.T) {
|
||||
f2 := &fakeDiscovery{addresses1}
|
||||
c.Add(f2, time.Minute, 0)
|
||||
|
||||
addr, err = c.Lookup(protocol.LocalDeviceID)
|
||||
addr, err = c.Lookup(ctx, protocol.LocalDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -66,7 +69,7 @@ type fakeDiscovery struct {
|
||||
addresses []string
|
||||
}
|
||||
|
||||
func (f *fakeDiscovery) Lookup(deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
func (f *fakeDiscovery) Lookup(_ context.Context, deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
return f.addresses, nil
|
||||
}
|
||||
|
||||
@@ -96,7 +99,7 @@ func TestCacheSlowLookup(t *testing.T) {
|
||||
// Start a lookup, which will take at least a second
|
||||
|
||||
t0 := time.Now()
|
||||
go c.Lookup(protocol.LocalDeviceID)
|
||||
go c.Lookup(context.Background(), protocol.LocalDeviceID)
|
||||
<-started // The slow lookup method has been called so we're inside the lock
|
||||
|
||||
// It should be possible to get ChildErrors while it's running
|
||||
@@ -116,7 +119,7 @@ type slowDiscovery struct {
|
||||
started chan struct{}
|
||||
}
|
||||
|
||||
func (f *slowDiscovery) Lookup(deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
func (f *slowDiscovery) Lookup(_ context.Context, deviceID protocol.DeviceID) (addresses []string, err error) {
|
||||
close(f.started)
|
||||
time.Sleep(f.delay)
|
||||
return nil, nil
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
|
||||
// A Finder provides lookup services of some kind.
|
||||
type Finder interface {
|
||||
Lookup(deviceID protocol.DeviceID) (address []string, err error)
|
||||
Lookup(ctx context.Context, deviceID protocol.DeviceID) (address []string, err error)
|
||||
Error() error
|
||||
String() string
|
||||
Cache() map[protocol.DeviceID]CacheEntry
|
||||
|
||||
@@ -41,8 +41,8 @@ type globalClient struct {
|
||||
}
|
||||
|
||||
type httpClient interface {
|
||||
Get(url string) (*http.Response, error)
|
||||
Post(url, ctype string, data io.Reader) (*http.Response, error)
|
||||
Get(ctx context.Context, url string) (*http.Response, error)
|
||||
Post(ctx context.Context, url, ctype string, data io.Reader) (*http.Response, error)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -89,7 +89,7 @@ func NewGlobal(server string, cert tls.Certificate, addrList AddressLister, evLo
|
||||
// The http.Client used for announcements. It needs to have our
|
||||
// certificate to prove our identity, and may or may not verify the server
|
||||
// certificate depending on the insecure setting.
|
||||
var announceClient httpClient = &http.Client{
|
||||
var announceClient httpClient = &contextClient{&http.Client{
|
||||
Timeout: requestTimeout,
|
||||
Transport: &http.Transport{
|
||||
DialContext: dialer.DialContext,
|
||||
@@ -99,14 +99,14 @@ func NewGlobal(server string, cert tls.Certificate, addrList AddressLister, evLo
|
||||
Certificates: []tls.Certificate{cert},
|
||||
},
|
||||
},
|
||||
}
|
||||
}}
|
||||
if opts.id != "" {
|
||||
announceClient = newIDCheckingHTTPClient(announceClient, devID)
|
||||
}
|
||||
|
||||
// The http.Client used for queries. We don't need to present our
|
||||
// certificate here, so lets not include it. May be insecure if requested.
|
||||
var queryClient httpClient = &http.Client{
|
||||
var queryClient httpClient = &contextClient{&http.Client{
|
||||
Timeout: requestTimeout,
|
||||
Transport: &http.Transport{
|
||||
DialContext: dialer.DialContext,
|
||||
@@ -115,7 +115,7 @@ func NewGlobal(server string, cert tls.Certificate, addrList AddressLister, evLo
|
||||
InsecureSkipVerify: opts.insecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}}
|
||||
if opts.id != "" {
|
||||
queryClient = newIDCheckingHTTPClient(queryClient, devID)
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func NewGlobal(server string, cert tls.Certificate, addrList AddressLister, evLo
|
||||
}
|
||||
|
||||
// Lookup returns the list of addresses where the given device is available
|
||||
func (c *globalClient) Lookup(device protocol.DeviceID) (addresses []string, err error) {
|
||||
func (c *globalClient) Lookup(ctx context.Context, device protocol.DeviceID) (addresses []string, err error) {
|
||||
if c.noLookup {
|
||||
return nil, lookupError{
|
||||
error: errors.New("lookups not supported"),
|
||||
@@ -156,7 +156,7 @@ func (c *globalClient) Lookup(device protocol.DeviceID) (addresses []string, err
|
||||
q.Set("device", device.String())
|
||||
qURL.RawQuery = q.Encode()
|
||||
|
||||
resp, err := c.queryClient.Get(qURL.String())
|
||||
resp, err := c.queryClient.Get(ctx, qURL.String())
|
||||
if err != nil {
|
||||
l.Debugln("globalClient.Lookup", qURL, err)
|
||||
return nil, err
|
||||
@@ -211,7 +211,7 @@ func (c *globalClient) serve(ctx context.Context) {
|
||||
timer.Reset(2 * time.Second)
|
||||
|
||||
case <-timer.C:
|
||||
c.sendAnnouncement(timer)
|
||||
c.sendAnnouncement(ctx, timer)
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -219,7 +219,7 @@ func (c *globalClient) serve(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *globalClient) sendAnnouncement(timer *time.Timer) {
|
||||
func (c *globalClient) sendAnnouncement(ctx context.Context, timer *time.Timer) {
|
||||
var ann announcement
|
||||
if c.addrList != nil {
|
||||
ann.Addresses = c.addrList.ExternalAddresses()
|
||||
@@ -239,7 +239,7 @@ func (c *globalClient) sendAnnouncement(timer *time.Timer) {
|
||||
|
||||
l.Debugf("Announcement: %s", postData)
|
||||
|
||||
resp, err := c.announceClient.Post(c.server, "application/json", bytes.NewReader(postData))
|
||||
resp, err := c.announceClient.Post(ctx, c.server, "application/json", bytes.NewReader(postData))
|
||||
if err != nil {
|
||||
l.Debugln("announce POST:", err)
|
||||
c.setError(err)
|
||||
@@ -362,8 +362,8 @@ func (c *idCheckingHTTPClient) check(resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *idCheckingHTTPClient) Get(url string) (*http.Response, error) {
|
||||
resp, err := c.httpClient.Get(url)
|
||||
func (c *idCheckingHTTPClient) Get(ctx context.Context, url string) (*http.Response, error) {
|
||||
resp, err := c.httpClient.Get(ctx, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -374,8 +374,8 @@ func (c *idCheckingHTTPClient) Get(url string) (*http.Response, error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *idCheckingHTTPClient) Post(url, ctype string, data io.Reader) (*http.Response, error) {
|
||||
resp, err := c.httpClient.Post(url, ctype, data)
|
||||
func (c *idCheckingHTTPClient) Post(ctx context.Context, url, ctype string, data io.Reader) (*http.Response, error) {
|
||||
resp, err := c.httpClient.Post(ctx, url, ctype, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -403,3 +403,32 @@ func (e *errorHolder) Error() error {
|
||||
e.mut.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
type contextClient struct {
|
||||
*http.Client
|
||||
}
|
||||
|
||||
func (c *contextClient) Get(ctx context.Context, url string) (*http.Response, error) {
|
||||
// For <go1.13 compatibility. Use the following commented line once that
|
||||
// isn't required anymore.
|
||||
// req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Cancel = ctx.Done()
|
||||
return c.Client.Do(req)
|
||||
}
|
||||
|
||||
func (c *contextClient) Post(ctx context.Context, url, ctype string, data io.Reader) (*http.Response, error) {
|
||||
// For <go1.13 compatibility. Use the following commented line once that
|
||||
// isn't required anymore.
|
||||
// req, err := http.NewRequestWithContext(ctx, "POST", url, data)
|
||||
req, err := http.NewRequest("POST", url, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Cancel = ctx.Done()
|
||||
req.Header.Set("Content-Type", ctype)
|
||||
return c.Client.Do(req)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -225,7 +226,7 @@ func testLookup(url string) ([]string, error) {
|
||||
go disco.Serve()
|
||||
defer disco.Stop()
|
||||
|
||||
return disco.Lookup(protocol.LocalDeviceID)
|
||||
return disco.Lookup(context.Background(), protocol.LocalDeviceID)
|
||||
}
|
||||
|
||||
type fakeDiscoveryServer struct {
|
||||
|
||||
@@ -91,7 +91,7 @@ func NewLocal(id protocol.DeviceID, addr string, addrList AddressLister, evLogge
|
||||
}
|
||||
|
||||
// Lookup returns a list of addresses the device is available at.
|
||||
func (c *localClient) Lookup(device protocol.DeviceID) (addresses []string, err error) {
|
||||
func (c *localClient) Lookup(_ context.Context, device protocol.DeviceID) (addresses []string, err error) {
|
||||
if cache, ok := c.Get(device); ok {
|
||||
if time.Since(cache.when) < CacheLifeTime {
|
||||
addresses = cache.Addresses
|
||||
|
||||
@@ -52,9 +52,10 @@ const randomBlockShift = 14 // 128k
|
||||
// - Two fakefs:s pointing at the same root path see the same files.
|
||||
//
|
||||
type fakefs struct {
|
||||
mut sync.Mutex
|
||||
root *fakeEntry
|
||||
insens bool
|
||||
mut sync.Mutex
|
||||
root *fakeEntry
|
||||
insens bool
|
||||
withContent bool
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -93,9 +94,9 @@ func newFakeFilesystem(root string) *fakefs {
|
||||
sizeavg, _ := strconv.Atoi(params.Get("sizeavg"))
|
||||
seed, _ := strconv.Atoi(params.Get("seed"))
|
||||
|
||||
if params.Get("insens") == "true" {
|
||||
fs.insens = true
|
||||
}
|
||||
fs.insens = params.Get("insens") == "true"
|
||||
fs.withContent = params.Get("content") == "true"
|
||||
|
||||
if sizeavg == 0 {
|
||||
sizeavg = 1 << 20
|
||||
}
|
||||
@@ -151,6 +152,7 @@ type fakeEntry struct {
|
||||
gid int
|
||||
mtime time.Time
|
||||
children map[string]*fakeEntry
|
||||
content []byte
|
||||
}
|
||||
|
||||
func (fs *fakefs) entryForName(name string) *fakeEntry {
|
||||
@@ -227,6 +229,10 @@ func (fs *fakefs) create(name string) (*fakeEntry, error) {
|
||||
entry.size = 0
|
||||
entry.mtime = time.Now()
|
||||
entry.mode = 0666
|
||||
entry.content = nil
|
||||
if fs.withContent {
|
||||
entry.content = make([]byte, 0)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
@@ -246,6 +252,10 @@ func (fs *fakefs) create(name string) (*fakeEntry, error) {
|
||||
base = UnicodeLowercase(base)
|
||||
}
|
||||
|
||||
if fs.withContent {
|
||||
new.content = make([]byte, 0)
|
||||
}
|
||||
|
||||
entry.children[base] = new
|
||||
return new, nil
|
||||
}
|
||||
@@ -417,6 +427,9 @@ func (fs *fakefs) OpenFile(name string, flags int, mode FileMode) (File, error)
|
||||
mode: mode,
|
||||
mtime: time.Now(),
|
||||
}
|
||||
if fs.withContent {
|
||||
newEntry.content = make([]byte, 0)
|
||||
}
|
||||
|
||||
entry.children[key] = newEntry
|
||||
return &fakeFile{fakeEntry: newEntry}, nil
|
||||
@@ -660,6 +673,12 @@ func (f *fakeFile) readShortAt(p []byte, offs int64) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if f.content != nil {
|
||||
n := copy(p, f.content[int(offs):])
|
||||
f.offset = offs + int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Lazily calculate our main seed, a simple 64 bit FNV hash our file
|
||||
// name.
|
||||
if f.seed == 0 {
|
||||
@@ -746,6 +765,15 @@ func (f *fakeFile) WriteAt(p []byte, off int64) (int, error) {
|
||||
return 0, errors.New("is a directory")
|
||||
}
|
||||
|
||||
if f.content != nil {
|
||||
if len(f.content) < int(off)+len(p) {
|
||||
newc := make([]byte, int(off)+len(p))
|
||||
copy(newc, f.content)
|
||||
f.content = newc
|
||||
}
|
||||
copy(f.content[int(off):], p)
|
||||
}
|
||||
|
||||
f.rng = nil
|
||||
f.offset = off + int64(len(p))
|
||||
if f.offset > f.size {
|
||||
@@ -765,6 +793,9 @@ func (f *fakeFile) Truncate(size int64) error {
|
||||
f.mut.Lock()
|
||||
defer f.mut.Unlock()
|
||||
|
||||
if f.content != nil {
|
||||
f.content = f.content[:int(size)]
|
||||
}
|
||||
f.rng = nil
|
||||
f.size = size
|
||||
if f.offset > size {
|
||||
|
||||
@@ -896,6 +896,35 @@ func testFakeFSCreateInsens(t *testing.T, fs Filesystem) {
|
||||
assertDir(t, fs, "/", []string{"FOO"})
|
||||
}
|
||||
|
||||
func TestReadWriteContent(t *testing.T) {
|
||||
fs := newFakeFilesystem("foo?content=true")
|
||||
fd, err := fs.Create("file")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := fd.Write([]byte("foo")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := fd.WriteAt([]byte("bar"), 5); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := []byte("foo\x00\x00bar")
|
||||
|
||||
buf := make([]byte, len(expected)-1)
|
||||
n, err := fd.ReadAt(buf, 1) // note offset one byte
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(expected)-1 {
|
||||
t.Fatal("wrong number of bytes read")
|
||||
}
|
||||
if !bytes.Equal(buf[:n], expected[1:]) {
|
||||
fmt.Printf("%d %q\n", n, buf[:n])
|
||||
t.Error("wrong data in file")
|
||||
}
|
||||
}
|
||||
|
||||
func cleanup(fs Filesystem) error {
|
||||
filenames, _ := fs.DirNames("/")
|
||||
for _, filename := range filenames {
|
||||
|
||||
@@ -532,7 +532,8 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
||||
}
|
||||
return true
|
||||
}
|
||||
nf := file.ConvertToDeletedFileInfo(f.shortID, f.localFlags)
|
||||
nf := file.ConvertToDeletedFileInfo(f.shortID)
|
||||
nf.LocalFlags = f.localFlags
|
||||
if file.ShouldConflict() {
|
||||
// We do not want to override the global version with
|
||||
// the deleted file. Setting to an empty version makes
|
||||
|
||||
@@ -104,14 +104,8 @@ func (f *receiveOnlyFolder) Revert() {
|
||||
return true // continue
|
||||
}
|
||||
|
||||
fi = protocol.FileInfo{
|
||||
Name: fi.Name,
|
||||
Type: fi.Type,
|
||||
ModifiedS: time.Now().Unix(),
|
||||
ModifiedBy: f.shortID,
|
||||
Deleted: true,
|
||||
Version: protocol.Vector{}, // if this file ever resurfaces anywhere we want our delete to be strictly older
|
||||
}
|
||||
fi.SetDeleted(f.shortID)
|
||||
fi.Version = protocol.Vector{} // if this file ever resurfaces anywhere we want our delete to be strictly older
|
||||
} else {
|
||||
// Revert means to throw away our local changes. We reset the
|
||||
// version to the empty vector, which is strictly older than any
|
||||
|
||||
@@ -9,8 +9,6 @@ package model
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -28,18 +26,18 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
|
||||
|
||||
// Get us a model up and running
|
||||
|
||||
m, f := setupROFolder()
|
||||
m, f := setupROFolder(t)
|
||||
ffs := f.Filesystem()
|
||||
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
||||
defer cleanupModel(m)
|
||||
|
||||
// Create some test data
|
||||
|
||||
for _, dir := range []string{".stfolder", "ignDir", "unknownDir"} {
|
||||
must(t, ffs.MkdirAll(dir, 0755))
|
||||
}
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "ignDir/ignFile"), []byte("hello\n"), 0644))
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "unknownDir/unknownFile"), []byte("hello\n"), 0644))
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), ".stignore"), []byte("ignDir\n"), 0644))
|
||||
must(t, writeFile(ffs, "ignDir/ignFile", []byte("hello\n"), 0644))
|
||||
must(t, writeFile(ffs, "unknownDir/unknownFile", []byte("hello\n"), 0644))
|
||||
must(t, writeFile(ffs, ".stignore", []byte("ignDir\n"), 0644))
|
||||
|
||||
knownFiles := setupKnownFiles(t, ffs, []byte("hello\n"))
|
||||
|
||||
@@ -48,15 +46,18 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
|
||||
m.Index(device1, "ro", knownFiles)
|
||||
f.updateLocalsFromScanning(knownFiles)
|
||||
|
||||
size := globalSize(t, m, "ro")
|
||||
m.fmut.RLock()
|
||||
snap := m.folderFiles["ro"].Snapshot()
|
||||
m.fmut.RUnlock()
|
||||
size := snap.GlobalSize()
|
||||
snap.Release()
|
||||
if size.Files != 1 || size.Directories != 1 {
|
||||
t.Fatalf("Global: expected 1 file and 1 directory: %+v", size)
|
||||
}
|
||||
|
||||
// Start the folder. This will cause a scan, should discover the other stuff in the folder
|
||||
// Scan, should discover the other stuff in the folder
|
||||
|
||||
m.startFolder("ro")
|
||||
m.ScanFolder("ro")
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
// We should now have two files and two directories.
|
||||
|
||||
@@ -109,9 +110,9 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
|
||||
|
||||
// Get us a model up and running
|
||||
|
||||
m, f := setupROFolder()
|
||||
m, f := setupROFolder(t)
|
||||
ffs := f.Filesystem()
|
||||
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
||||
defer cleanupModel(m)
|
||||
|
||||
// Create some test data
|
||||
|
||||
@@ -124,10 +125,9 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
|
||||
m.Index(device1, "ro", knownFiles)
|
||||
f.updateLocalsFromScanning(knownFiles)
|
||||
|
||||
// Start the folder. This will cause a scan.
|
||||
// Scan the folder.
|
||||
|
||||
m.startFolder("ro")
|
||||
m.ScanFolder("ro")
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
// Everything should be in sync.
|
||||
|
||||
@@ -151,7 +151,7 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
|
||||
// Update the file.
|
||||
|
||||
newData := []byte("totally different data\n")
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "knownDir/knownFile"), newData, 0644))
|
||||
must(t, writeFile(ffs, "knownDir/knownFile", newData, 0644))
|
||||
|
||||
// Rescan.
|
||||
|
||||
@@ -196,13 +196,11 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRecvOnlyUndoChanges(t *testing.T) {
|
||||
testOs := &fatalOs{t}
|
||||
|
||||
// Get us a model up and running
|
||||
|
||||
m, f := setupROFolder()
|
||||
m, f := setupROFolder(t)
|
||||
ffs := f.Filesystem()
|
||||
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
||||
defer cleanupModel(m)
|
||||
|
||||
// Create some test data
|
||||
|
||||
@@ -210,20 +208,14 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
|
||||
oldData := []byte("hello\n")
|
||||
knownFiles := setupKnownFiles(t, ffs, oldData)
|
||||
|
||||
m.fmut.Lock()
|
||||
fset := m.folderFiles["ro"]
|
||||
m.fmut.Unlock()
|
||||
folderFs := fset.MtimeFS()
|
||||
|
||||
// Send and index update for the known stuff
|
||||
// Send an index update for the known stuff
|
||||
|
||||
m.Index(device1, "ro", knownFiles)
|
||||
f.updateLocalsFromScanning(knownFiles)
|
||||
|
||||
// Start the folder. This will cause a scan.
|
||||
// Scan the folder.
|
||||
|
||||
m.startFolder("ro")
|
||||
m.ScanFolder("ro")
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
// Everything should be in sync.
|
||||
|
||||
@@ -246,12 +238,11 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
|
||||
|
||||
// Create a file and modify another
|
||||
|
||||
file := filepath.Join(ffs.URI(), "foo")
|
||||
must(t, ioutil.WriteFile(file, []byte("hello\n"), 0644))
|
||||
const file = "foo"
|
||||
must(t, writeFile(ffs, file, []byte("hello\n"), 0644))
|
||||
must(t, writeFile(ffs, "knownDir/knownFile", []byte("bye\n"), 0644))
|
||||
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "knownDir/knownFile"), []byte("bye\n"), 0644))
|
||||
|
||||
m.ScanFolder("ro")
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
size = receiveOnlyChangedSize(t, m, "ro")
|
||||
if size.Files != 2 {
|
||||
@@ -260,11 +251,11 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
|
||||
|
||||
// Remove the file again and undo the modification
|
||||
|
||||
testOs.Remove(file)
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "knownDir/knownFile"), oldData, 0644))
|
||||
folderFs.Chtimes("knownDir/knownFile", knownFiles[1].ModTime(), knownFiles[1].ModTime())
|
||||
must(t, ffs.Remove(file))
|
||||
must(t, writeFile(ffs, "knownDir/knownFile", oldData, 0644))
|
||||
must(t, ffs.Chtimes("knownDir/knownFile", knownFiles[1].ModTime(), knownFiles[1].ModTime()))
|
||||
|
||||
m.ScanFolder("ro")
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
size = receiveOnlyChangedSize(t, m, "ro")
|
||||
if size.Files+size.Directories+size.Deleted != 0 {
|
||||
@@ -276,7 +267,7 @@ func setupKnownFiles(t *testing.T, ffs fs.Filesystem, data []byte) []protocol.Fi
|
||||
t.Helper()
|
||||
|
||||
must(t, ffs.MkdirAll("knownDir", 0755))
|
||||
must(t, ioutil.WriteFile(filepath.Join(ffs.URI(), "knownDir/knownFile"), data, 0644))
|
||||
must(t, writeFile(ffs, "knownDir/knownFile", data, 0644))
|
||||
|
||||
t0 := time.Now().Add(-1 * time.Minute)
|
||||
must(t, ffs.Chtimes("knownDir/knownFile", t0, t0))
|
||||
@@ -310,30 +301,38 @@ func setupKnownFiles(t *testing.T, ffs fs.Filesystem, data []byte) []protocol.Fi
|
||||
return knownFiles
|
||||
}
|
||||
|
||||
func setupROFolder() (*model, *sendOnlyFolder) {
|
||||
func setupROFolder(t *testing.T) (*model, *receiveOnlyFolder) {
|
||||
t.Helper()
|
||||
|
||||
w := createTmpWrapper(defaultCfg)
|
||||
fcfg := testFolderConfigTmp()
|
||||
fcfg := testFolderConfigFake()
|
||||
fcfg.ID = "ro"
|
||||
fcfg.Label = "ro"
|
||||
fcfg.Type = config.FolderTypeReceiveOnly
|
||||
w.SetFolder(fcfg)
|
||||
|
||||
m := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil)
|
||||
|
||||
m.ServeBackground()
|
||||
|
||||
// Folder should only be added, not started.
|
||||
m.removeFolder(fcfg)
|
||||
m.addFolder(fcfg)
|
||||
must(t, m.ScanFolder("ro"))
|
||||
|
||||
m.fmut.RLock()
|
||||
f := &sendOnlyFolder{
|
||||
folder: folder{
|
||||
stateTracker: newStateTracker(fcfg.ID, m.evLogger),
|
||||
fset: m.folderFiles[fcfg.ID],
|
||||
FolderConfiguration: fcfg,
|
||||
},
|
||||
}
|
||||
m.fmut.RUnlock()
|
||||
defer m.fmut.RUnlock()
|
||||
f := m.folderRunners["ro"].(*receiveOnlyFolder)
|
||||
|
||||
return m, f
|
||||
}
|
||||
|
||||
func writeFile(fs fs.Filesystem, filename string, data []byte, perm fs.FileMode) error {
|
||||
fd, err := fs.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fd.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.Chmod(filename, perm)
|
||||
}
|
||||
|
||||
@@ -118,10 +118,7 @@ func (f *sendOnlyFolder) Override() {
|
||||
}
|
||||
if !ok || have.Name != need.Name {
|
||||
// We are missing the file
|
||||
need.Deleted = true
|
||||
need.Blocks = nil
|
||||
need.Version = need.Version.Update(f.shortID)
|
||||
need.Size = 0
|
||||
need.SetDeleted(f.shortID)
|
||||
} else {
|
||||
// We have the file, replace with our version
|
||||
have.Version = have.Version.Merge(need.Version).Update(f.shortID)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/thejerf/suture"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -77,28 +78,37 @@ func (c *folderSummaryService) String() string {
|
||||
func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, error) {
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
snap, err := c.model.DBSnapshot(folder)
|
||||
if err != nil {
|
||||
var local, global, need, ro db.Counts
|
||||
var ourSeq, remoteSeq int64
|
||||
errors, err := c.model.FolderErrors(folder)
|
||||
if err == nil {
|
||||
var snap *db.Snapshot
|
||||
if snap, err = c.model.DBSnapshot(folder); err == nil {
|
||||
global = snap.GlobalSize()
|
||||
local = snap.LocalSize()
|
||||
need = snap.NeedSize()
|
||||
ro = snap.ReceiveOnlyChangedSize()
|
||||
ourSeq = snap.Sequence(protocol.LocalDeviceID)
|
||||
remoteSeq = snap.Sequence(protocol.GlobalDeviceID)
|
||||
snap.Release()
|
||||
}
|
||||
}
|
||||
// For API backwards compatibility (SyncTrayzor needs it) an empty folder
|
||||
// summary is returned for not running folders, an error might actually be
|
||||
// more appropriate
|
||||
if err != nil && err != ErrFolderPaused && err != errFolderNotRunning {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
errors, err := c.model.FolderErrors(folder)
|
||||
if err != nil && err != ErrFolderPaused && err != errFolderNotRunning {
|
||||
// Stats from the db can still be obtained if the folder is just paused/being started
|
||||
return nil, err
|
||||
}
|
||||
res["errors"] = len(errors)
|
||||
res["pullErrors"] = len(errors) // deprecated
|
||||
|
||||
res["invalid"] = "" // Deprecated, retains external API for now
|
||||
|
||||
global := snap.GlobalSize()
|
||||
res["globalFiles"], res["globalDirectories"], res["globalSymlinks"], res["globalDeleted"], res["globalBytes"], res["globalTotalItems"] = global.Files, global.Directories, global.Symlinks, global.Deleted, global.Bytes, global.TotalItems()
|
||||
|
||||
local := snap.LocalSize()
|
||||
res["localFiles"], res["localDirectories"], res["localSymlinks"], res["localDeleted"], res["localBytes"], res["localTotalItems"] = local.Files, local.Directories, local.Symlinks, local.Deleted, local.Bytes, local.TotalItems()
|
||||
|
||||
need := snap.NeedSize()
|
||||
need.Bytes -= c.model.FolderProgressBytesCompleted(folder)
|
||||
// This may happen if we are in progress of pulling files that were
|
||||
// deleted globally after the pull started.
|
||||
@@ -116,7 +126,6 @@ func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, e
|
||||
if ok && fcfg.Type == config.FolderTypeReceiveOnly {
|
||||
// Add statistics for things that have changed locally in a receive
|
||||
// only folder.
|
||||
ro := snap.ReceiveOnlyChangedSize()
|
||||
res["receiveOnlyChangedFiles"] = ro.Files
|
||||
res["receiveOnlyChangedDirectories"] = ro.Directories
|
||||
res["receiveOnlyChangedSymlinks"] = ro.Symlinks
|
||||
@@ -132,9 +141,6 @@ func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, e
|
||||
res["error"] = err.Error()
|
||||
}
|
||||
|
||||
ourSeq := snap.Sequence(protocol.LocalDeviceID)
|
||||
remoteSeq := snap.Sequence(protocol.GlobalDeviceID)
|
||||
|
||||
res["version"] = ourSeq + remoteSeq // legacy
|
||||
res["sequence"] = ourSeq + remoteSeq // new name
|
||||
|
||||
@@ -264,7 +270,12 @@ func (c *folderSummaryService) calculateSummaries(ctx context.Context) {
|
||||
case <-pump.C:
|
||||
t0 := time.Now()
|
||||
for _, folder := range c.foldersToHandle() {
|
||||
c.sendSummary(folder)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
c.sendSummary(ctx, folder)
|
||||
}
|
||||
|
||||
// We don't want to spend all our time calculating summaries. Lets
|
||||
@@ -274,7 +285,7 @@ func (c *folderSummaryService) calculateSummaries(ctx context.Context) {
|
||||
pump.Reset(wait)
|
||||
|
||||
case folder := <-c.immediate:
|
||||
c.sendSummary(folder)
|
||||
c.sendSummary(ctx, folder)
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -307,7 +318,7 @@ func (c *folderSummaryService) foldersToHandle() []string {
|
||||
}
|
||||
|
||||
// sendSummary send the summary events for a single folder
|
||||
func (c *folderSummaryService) sendSummary(folder string) {
|
||||
func (c *folderSummaryService) sendSummary(ctx context.Context, folder string) {
|
||||
// The folder summary contains how many bytes, files etc
|
||||
// are in the folder and how in sync we are.
|
||||
data, err := c.Summary(folder)
|
||||
@@ -320,6 +331,12 @@ func (c *folderSummaryService) sendSummary(folder string) {
|
||||
})
|
||||
|
||||
for _, devCfg := range c.cfg.Folders()[folder].Devices {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if devCfg.DeviceID.Equals(c.id) {
|
||||
// We already know about ourselves.
|
||||
continue
|
||||
|
||||
@@ -840,10 +840,11 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
|
||||
// DBSnapshot returns a snapshot of the database content relevant to the given folder.
|
||||
func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) {
|
||||
m.fmut.RLock()
|
||||
rf, ok := m.folderFiles[folder]
|
||||
err := m.checkFolderRunningLocked(folder)
|
||||
rf := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rf.Snapshot(), nil
|
||||
}
|
||||
@@ -2319,10 +2320,11 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly
|
||||
|
||||
func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) {
|
||||
m.fmut.RLock()
|
||||
ver, ok := m.folderVersioners[folder]
|
||||
err := m.checkFolderRunningLocked(folder)
|
||||
ver := m.folderVersioners[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ver == nil {
|
||||
return nil, errNoVersioner
|
||||
@@ -2332,16 +2334,13 @@ func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVer
|
||||
}
|
||||
|
||||
func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]string, error) {
|
||||
fcfg, ok := m.cfg.Folder(folder)
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
}
|
||||
|
||||
m.fmut.RLock()
|
||||
err := m.checkFolderRunningLocked(folder)
|
||||
fcfg := m.folderCfgs[folder]
|
||||
ver := m.folderVersioners[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return nil, errFolderMissing
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ver == nil {
|
||||
return nil, errNoVersioner
|
||||
|
||||
@@ -3479,3 +3479,48 @@ func TestNewLimitedRequestResponse(t *testing.T) {
|
||||
t.Error("Bytes weren't returned in a timely fashion")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummaryPausedNoError(t *testing.T) {
|
||||
wcfg, fcfg := tmpDefaultWrapper()
|
||||
fcfg.Paused = true
|
||||
wcfg.SetFolder(fcfg)
|
||||
m := setupModel(wcfg)
|
||||
defer cleanupModel(m)
|
||||
|
||||
fss := NewFolderSummaryService(wcfg, m, myID, events.NoopLogger)
|
||||
if _, err := fss.Summary(fcfg.ID); err != nil {
|
||||
t.Error("Expected no error getting a summary for a paused folder:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFolderAPIErrors(t *testing.T) {
|
||||
wcfg, fcfg := tmpDefaultWrapper()
|
||||
fcfg.Paused = true
|
||||
wcfg.SetFolder(fcfg)
|
||||
m := setupModel(wcfg)
|
||||
defer cleanupModel(m)
|
||||
|
||||
methods := []func(folder string) error{
|
||||
m.ScanFolder,
|
||||
func(folder string) error {
|
||||
return m.ScanFolderSubdirs(folder, nil)
|
||||
},
|
||||
func(folder string) error {
|
||||
_, err := m.GetFolderVersions(folder)
|
||||
return err
|
||||
},
|
||||
func(folder string) error {
|
||||
_, err := m.RestoreFolderVersions(folder, nil)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
for i, method := range methods {
|
||||
if err := method(fcfg.ID); err != ErrFolderPaused {
|
||||
t.Errorf(`Expected "%v", got "%v" (method no %v)`, ErrFolderPaused, err, i)
|
||||
}
|
||||
if err := method("notexisting"); err != errFolderMissing {
|
||||
t.Errorf(`Expected "%v", got "%v" (method no %v)`, errFolderMissing, err, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,6 +87,13 @@ func testFolderConfig(path string) config.FolderConfiguration {
|
||||
return cfg
|
||||
}
|
||||
|
||||
func testFolderConfigFake() config.FolderConfiguration {
|
||||
cfg := config.NewFolderConfiguration(myID, "default", "default", fs.FilesystemTypeFake, rand.String(32)+"?content=true")
|
||||
cfg.FSWatcherEnabled = false
|
||||
cfg.Devices = append(cfg.Devices, config.FolderDeviceConfiguration{DeviceID: device1})
|
||||
return cfg
|
||||
}
|
||||
|
||||
func setupModelWithConnection() (*model, *fakeConnection, config.FolderConfiguration) {
|
||||
w, fcfg := tmpDefaultWrapper()
|
||||
m, fc := setupModelWithConnectionFromWrapper(w)
|
||||
|
||||
@@ -266,24 +266,36 @@ func BlocksEqual(a, b []BlockInfo) bool {
|
||||
}
|
||||
|
||||
func (f *FileInfo) SetMustRescan(by ShortID) {
|
||||
f.LocalFlags = FlagLocalMustRescan
|
||||
f.ModifiedBy = by
|
||||
f.Blocks = nil
|
||||
f.Sequence = 0
|
||||
f.setLocalFlags(by, FlagLocalMustRescan)
|
||||
}
|
||||
|
||||
func (f *FileInfo) SetIgnored(by ShortID) {
|
||||
f.LocalFlags = FlagLocalIgnored
|
||||
f.ModifiedBy = by
|
||||
f.Blocks = nil
|
||||
f.Sequence = 0
|
||||
f.setLocalFlags(by, FlagLocalIgnored)
|
||||
}
|
||||
|
||||
func (f *FileInfo) SetUnsupported(by ShortID) {
|
||||
f.LocalFlags = FlagLocalUnsupported
|
||||
f.setLocalFlags(by, FlagLocalUnsupported)
|
||||
}
|
||||
|
||||
func (f *FileInfo) SetDeleted(by ShortID) {
|
||||
f.ModifiedBy = by
|
||||
f.Deleted = true
|
||||
f.Version = f.Version.Update(by)
|
||||
f.ModifiedS = time.Now().Unix()
|
||||
f.setNoContent()
|
||||
}
|
||||
|
||||
func (f *FileInfo) setLocalFlags(by ShortID, flags uint32) {
|
||||
f.RawInvalid = false
|
||||
f.LocalFlags = flags
|
||||
f.ModifiedBy = by
|
||||
f.setNoContent()
|
||||
}
|
||||
|
||||
func (f *FileInfo) setNoContent() {
|
||||
f.Blocks = nil
|
||||
f.Sequence = 0
|
||||
f.BlocksHash = nil
|
||||
f.Size = 0
|
||||
}
|
||||
|
||||
func (b BlockInfo) String() string {
|
||||
|
||||
@@ -160,12 +160,6 @@ USER-AGENT: syncthing/1.0
|
||||
}
|
||||
defer socket.Close() // Make sure our socket gets closed
|
||||
|
||||
err = socket.SetDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
l.Debugln("UPnP discovery: setting socket deadline:", err)
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugln("Sending search request for device type", deviceType, "on", intf.Name)
|
||||
|
||||
_, err = socket.WriteTo(search, ssdp)
|
||||
@@ -178,16 +172,33 @@ USER-AGENT: syncthing/1.0
|
||||
|
||||
l.Debugln("Listening for UPnP response for device type", deviceType, "on", intf.Name)
|
||||
|
||||
// Listen for responses until a timeout is reached
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
// Listen for responses until a timeout is reached or the context is
|
||||
// cancelled
|
||||
resp := make([]byte, 65536)
|
||||
loop:
|
||||
for {
|
||||
resp := make([]byte, 65536)
|
||||
n, _, err := socket.ReadFrom(resp)
|
||||
if err != nil {
|
||||
if e, ok := err.(net.Error); !ok || !e.Timeout() {
|
||||
l.Infoln("UPnP read:", err) //legitimate error, not a timeout.
|
||||
}
|
||||
if err := socket.SetDeadline(time.Now().Add(250 * time.Millisecond)); err != nil {
|
||||
l.Infoln("UPnP socket:", err)
|
||||
break
|
||||
}
|
||||
|
||||
n, _, err := socket.ReadFrom(resp)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
if e, ok := err.(net.Error); ok && e.Timeout() {
|
||||
continue // continue reading
|
||||
}
|
||||
l.Infoln("UPnP read:", err) //legitimate error, not a timeout.
|
||||
break
|
||||
}
|
||||
|
||||
igds, err := parseResponse(ctx, deviceType, resp[:n])
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STDISCOSRV" "1" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "STDISCOSRV" "1" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
stdiscosrv \- Syncthing Discovery Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STRELAYSRV" "1" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "STRELAYSRV" "1" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
strelaysrv \- Syncthing Relay Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v4
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-NETWORKING" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-NETWORKING" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-networking \- Firewall Setup
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-RELAY" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-RELAY" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-relay \- Relay Protocol v1
|
||||
.
|
||||
@@ -227,7 +227,7 @@ _
|
||||
.SH SESSION MODE
|
||||
.sp
|
||||
The first and only message the client sends in the session mode is the
|
||||
SessionInvitation message which contains the session key identifying which
|
||||
JoinSessionRequest message which contains the session key identifying which
|
||||
session you are trying to join. The relay responds with one of the following
|
||||
Response messages:
|
||||
.INDENT 0.0
|
||||
@@ -578,7 +578,7 @@ Empty/all zero IP should be replaced with the relay’s public IP address that
|
||||
was used when establishing the protocol mode connection.
|
||||
.TP
|
||||
.B : Port
|
||||
An optional port on which the relay server is expecting you to connect,
|
||||
The port on which the relay server is expecting you to connect,
|
||||
in order to start a connection in session mode.
|
||||
.TP
|
||||
.B : Server Socket
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-REST-API" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-REST-API" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-rest-api \- REST API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-SECURITY" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-SECURITY" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-security \- Security Principles
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-STIGNORE" "5" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-STIGNORE" "5" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-stignore \- Prevent files from being synchronized to other nodes
|
||||
.
|
||||
@@ -43,7 +43,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.UNINDENT
|
||||
.SH DESCRIPTION
|
||||
.sp
|
||||
If some files should not be synchronized to other devices, a file called
|
||||
If some files should not be synchronized to (or from) other devices, a file called
|
||||
\fB\&.stignore\fP can be created containing file patterns to ignore. The
|
||||
\fB\&.stignore\fP file must be placed in the root of the folder. The
|
||||
\fB\&.stignore\fP file itself will never be synced to other devices, although it can
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-VERSIONING" "7" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-VERSIONING" "7" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-versioning \- Keep automatic backups of deleted files by other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING" "1" "Feb 02, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING" "1" "Feb 10, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing \- Syncthing
|
||||
.
|
||||
|
||||
Reference in New Issue
Block a user