Compare commits

..

5 Commits

Author SHA1 Message Date
Simon Frei
8c74177699 lib/db: Initialize need meta on first dev occurrence (fixes #6668) (#6669) 2020-05-20 11:04:45 +02:00
Simon Frei
9a5f7fbadf lib/db: Don't panic on seq. coruption when debugging (#6662) 2020-05-20 11:04:38 +02:00
Jakob Borg
22c222bf75 lib/model: Partial revert of rename fix (fixes #6653) (#6656)
We can't just drop the snap because it's in use elsewhere. This should
be equally functional.
2020-05-16 23:06:25 +02:00
Audrius Butkevicius
258341f8bf lib/model: Fix rename handling (ref #6650) (#6652) 2020-05-16 14:40:20 +02:00
Simon Frei
f5ca213682 lib/db: Filter repeat files in one update (ref #6650) (#6651) 2020-05-16 14:40:20 +02:00
9 changed files with 277 additions and 18 deletions

View File

@@ -553,3 +553,44 @@ func TestUpdateTo10(t *testing.T) {
t.Error("vl.Versions[1] not deleted for c")
}
}
func TestDropDuplicates(t *testing.T) {
names := []string{
"foo",
"bar",
"dcxvoijnds",
"3d/dsfase/4/ss2",
}
tcs := []struct{ in, out []int }{
{[]int{0}, []int{0}},
{[]int{0, 1}, []int{0, 1}},
{[]int{0, 1, 0, 1}, []int{0, 1}},
{[]int{0, 1, 1, 1, 1}, []int{0, 1}},
{[]int{0, 0, 0, 1}, []int{0, 1}},
{[]int{0, 1, 2, 3}, []int{0, 1, 2, 3}},
{[]int{3, 2, 1, 0, 0, 1, 2, 3}, []int{0, 1, 2, 3}},
{[]int{0, 1, 1, 3, 0, 1, 0, 1, 2, 3}, []int{0, 1, 2, 3}},
}
for tci, tc := range tcs {
inp := make([]protocol.FileInfo, len(tc.in))
expSeq := make(map[string]int)
for i, j := range tc.in {
inp[i] = protocol.FileInfo{Name: names[j], Sequence: int64(i)}
expSeq[names[j]] = i
}
outp := normalizeFilenamesAndDropDuplicates(inp)
if len(outp) != len(tc.out) {
t.Errorf("tc %v: Expected %v entries, got %v", tci, len(tc.out), len(outp))
continue
}
for i, f := range outp {
if exp := names[tc.out[i]]; exp != f.Name {
t.Errorf("tc %v: Got file %v at pos %v, expected %v", tci, f.Name, i, exp)
}
if exp := int64(expSeq[outp[i].Name]); exp != f.Sequence {
t.Errorf("tc %v: Got sequence %v at pos %v, expected %v", tci, f.Sequence, i, exp)
}
}
}
}

View File

@@ -198,7 +198,7 @@ func (db *Lowlevel) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
blocksHashSame := ok && bytes.Equal(ef.BlocksHash, f.BlocksHash)
if ok {
if len(ef.Blocks) != 0 && !ef.IsInvalid() {
if len(ef.Blocks) != 0 && !ef.IsInvalid() && ef.Size > 0 {
for _, block := range ef.Blocks {
keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
if err != nil {
@@ -259,7 +259,7 @@ func (db *Lowlevel) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
}
l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
if len(f.Blocks) != 0 && !f.IsInvalid() {
if len(f.Blocks) != 0 && !f.IsInvalid() && f.Size > 0 {
for i, block := range f.Blocks {
binary.BigEndian.PutUint32(blockBuf, uint32(i))
keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)

View File

@@ -118,9 +118,16 @@ func (m *metadataTracker) countsPtr(dev protocol.DeviceID, flag uint32) *Counts
idx = len(m.counts.Counts)
m.counts.Counts = append(m.counts.Counts, Counts{DeviceID: dev[:], LocalFlags: flag})
m.indexes[key] = idx
if flag == needFlag {
// Need bucket must be initialized when a device first occurs in
// the metadatatracker, even if there's no change to the need
// bucket itself.
nkey := metaKey{dev, needFlag}
nidx, ok := m.indexes[nkey]
if !ok {
// Initially a new device needs everything, except deletes
m.counts.Counts[idx] = m.allNeededCounts(dev)
nidx = len(m.counts.Counts)
m.counts.Counts = append(m.counts.Counts, m.allNeededCounts(dev))
m.indexes[nkey] = nidx
}
}
return &m.counts.Counts[idx]

View File

@@ -124,7 +124,13 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
// do not modify fs in place, it is still used in outer scope
fs = append([]protocol.FileInfo(nil), fs...)
normalizeFilenames(fs)
// If one file info is present multiple times, only keep the last.
// Updating the same file multiple times is problematic, because the
// previous updates won't yet be represented in the db when we update it
// again. Additionally even if that problem was taken care of, it would
// be pointless because we remove the previously added file info again
// right away.
fs = normalizeFilenamesAndDropDuplicates(fs)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
@@ -470,10 +476,24 @@ func DropDeltaIndexIDs(db *Lowlevel) {
}
}
func normalizeFilenames(fs []protocol.FileInfo) {
for i := range fs {
fs[i].Name = osutil.NormalizedFilename(fs[i].Name)
func normalizeFilenamesAndDropDuplicates(fs []protocol.FileInfo) []protocol.FileInfo {
positions := make(map[string]int, len(fs))
for i, f := range fs {
norm := osutil.NormalizedFilename(f.Name)
if pos, ok := positions[norm]; ok {
fs[pos] = protocol.FileInfo{}
}
positions[norm] = i
fs[i].Name = norm
}
for i := 0; i < len(fs); {
if fs[i].Name == "" {
fs = append(fs[:i], fs[i+1:]...)
continue
}
i++
}
return fs
}
func nativeFileIterator(fn Iterator) Iterator {

View File

@@ -484,11 +484,11 @@ func TestUpdateToInvalid(t *testing.T) {
f := db.NewBlockFinder(ldb)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), LocalFlags: protocol.FlagLocalIgnored},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, LocalFlags: protocol.FlagLocalIgnored},
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1), Size: 1},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2), Size: 1},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), LocalFlags: protocol.FlagLocalIgnored, Size: 1},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7), Size: 1},
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, LocalFlags: protocol.FlagLocalIgnored, Size: 1},
}
replace(s, protocol.LocalDeviceID, localHave)
@@ -1615,6 +1615,62 @@ func TestIgnoreAfterReceiveOnly(t *testing.T) {
}
}
// https://github.com/syncthing/syncthing/issues/6650
func TestUpdateWithOneFileTwice(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory())
defer ldb.Close()
file := "foo"
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
fs := fileList{{
Name: file,
Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}},
Sequence: 1,
}}
s.Update(protocol.LocalDeviceID, fs)
fs = append(fs, fs[0])
for i := range fs {
fs[i].Sequence++
fs[i].Version = fs[i].Version.Update(myID)
}
fs[1].Sequence++
fs[1].Version = fs[1].Version.Update(myID)
s.Update(protocol.LocalDeviceID, fs)
snap := s.Snapshot()
defer snap.Release()
count := 0
snap.WithHaveSequence(0, func(f db.FileIntf) bool {
count++
return true
})
if count != 1 {
t.Error("Expected to have one file, got", count)
}
}
// https://github.com/syncthing/syncthing/issues/6668
func TestNeedRemoteOnly(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory())
defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
remote0Have := fileList{
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
}
s.Update(remoteDevice0, remote0Have)
need := needSize(s, remoteDevice0)
if !need.Equal(db.Counts{}) {
t.Error("Expected nothing needed, got", need)
}
}
func replace(fs *db.FileSet, device protocol.DeviceID, files []protocol.FileInfo) {
fs.Drop(device)
fs.Update(device, files)

View File

@@ -233,8 +233,7 @@ func (t *readOnlyTransaction) withHaveSequence(folder []byte, startSeq int64, fn
if shouldDebug() {
if seq := t.keyer.SequenceFromSequenceKey(dbi.Key()); f.Sequence != seq {
l.Warnf("Sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq)
panic("sequence index corruption")
l.Debugf("Sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq)
}
}
if !fn(f) {

View File

@@ -453,11 +453,13 @@ func (f *folder) scanSubdirs(subDirs []string) error {
}()
f.clearScanErrors(subDirs)
alreadyUsed := make(map[string]struct{})
for res := range fchan {
if res.Err != nil {
f.newScanError(res.Path, res.Err)
continue
}
if err := batch.flushIfFull(); err != nil {
return err
}
@@ -466,7 +468,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
changes++
if f.localFlags&protocol.FlagLocalReceiveOnly == 0 {
if nf, ok := f.findRename(snap, mtimefs, res.File); ok {
if nf, ok := f.findRename(snap, mtimefs, res.File, alreadyUsed); ok {
batch.append(nf)
changes++
}
@@ -622,8 +624,8 @@ func (f *folder) scanSubdirs(subDirs []string) error {
return nil
}
func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file protocol.FileInfo) (protocol.FileInfo, bool) {
if len(file.Blocks) == 0 {
func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file protocol.FileInfo, alreadyUsed map[string]struct{}) (protocol.FileInfo, bool) {
if len(file.Blocks) == 0 || file.Size == 0 {
return protocol.FileInfo{}, false
}
@@ -639,6 +641,10 @@ func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file proto
default:
}
if _, ok := alreadyUsed[fi.Name]; ok {
return true
}
if fi.ShouldConflict() {
return true
}
@@ -658,6 +664,8 @@ func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file proto
return true
}
alreadyUsed[fi.Name] = struct{}{}
nf = fi
nf.SetDeleted(f.shortID)
nf.LocalFlags = f.localFlags

View File

@@ -213,6 +213,7 @@ func TestCopierFinder(t *testing.T) {
existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
existingFile := setupFile(fs.TempName("file"), existingBlocks)
existingFile.Size = 1
requiredFile := existingFile
requiredFile.Blocks = blocks[1:]
requiredFile.Name = "file2"
@@ -422,6 +423,7 @@ func TestCopierCleanup(t *testing.T) {
// Create a file
file := setupFile("test", []int{0})
file.Size = 1
m, f := setupSendReceiveFolder(file)
defer cleanupSRFolder(f, m)

View File

@@ -3609,6 +3609,132 @@ func TestRenameSequenceOrder(t *testing.T) {
}
}
func TestRenameSameFile(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg)
defer cleanupModel(m)
ffs := fcfg.Filesystem()
must(t, writeFile(ffs, "file", []byte("file"), 0644))
m.ScanFolders()
count := 0
snap := dbSnapshot(t, m, "default")
snap.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
count++
return true
})
snap.Release()
if count != 1 {
t.Errorf("Unexpected count: %d != %d", count, 1)
}
must(t, ffs.Rename("file", "file1"))
must(t, osutil.Copy(ffs, ffs, "file1", "file0"))
must(t, osutil.Copy(ffs, ffs, "file1", "file2"))
must(t, osutil.Copy(ffs, ffs, "file1", "file3"))
must(t, osutil.Copy(ffs, ffs, "file1", "file4"))
m.ScanFolders()
snap = dbSnapshot(t, m, "default")
defer snap.Release()
prevSeq := int64(0)
seen := false
snap.WithHaveSequence(0, func(i db.FileIntf) bool {
if i.SequenceNo() <= prevSeq {
t.Fatalf("non-increasing sequences: %d <= %d", i.SequenceNo(), prevSeq)
}
if i.FileName() == "file" {
if seen {
t.Fatal("already seen file")
}
seen = true
}
prevSeq = i.SequenceNo()
return true
})
}
func TestRenameEmptyFile(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg)
defer cleanupModel(m)
ffs := fcfg.Filesystem()
must(t, writeFile(ffs, "file", []byte("data"), 0644))
must(t, writeFile(ffs, "empty", nil, 0644))
m.ScanFolders()
snap := dbSnapshot(t, m, "default")
defer snap.Release()
empty, eok := snap.Get(protocol.LocalDeviceID, "empty")
if !eok {
t.Fatal("failed to find empty file")
}
file, fok := snap.Get(protocol.LocalDeviceID, "file")
if !fok {
t.Fatal("failed to find non-empty file")
}
count := 0
snap.WithBlocksHash(empty.BlocksHash, func(_ db.FileIntf) bool {
count++
return true
})
if count != 0 {
t.Fatalf("Found %d entries for empty file, expected 0", count)
}
count = 0
snap.WithBlocksHash(file.BlocksHash, func(_ db.FileIntf) bool {
count++
return true
})
if count != 1 {
t.Fatalf("Found %d entries for non-empty file, expected 1", count)
}
must(t, ffs.Rename("file", "new-file"))
must(t, ffs.Rename("empty", "new-empty"))
// Scan
m.ScanFolders()
snap = dbSnapshot(t, m, "default")
defer snap.Release()
count = 0
snap.WithBlocksHash(empty.BlocksHash, func(_ db.FileIntf) bool {
count++
return true
})
if count != 0 {
t.Fatalf("Found %d entries for empty file, expected 0", count)
}
count = 0
snap.WithBlocksHash(file.BlocksHash, func(i db.FileIntf) bool {
count++
if i.FileName() != "new-file" {
t.Fatalf("unexpected file name %s, expected new-file", i.FileName())
}
return true
})
if count != 1 {
t.Fatalf("Found %d entries for non-empty file, expected 1", count)
}
}
func TestBlockListMap(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg)