feat: make block indexing configurable (#10608)

This adds a new folder-level configuration `FullBlockIndex`. It controls
whether we maintain the block index for a given folder -- currently
that's always true, now it becomes possible to turn off. The block index
is used for lookup of blocks across files and folders. Effectively, when
syncing a change, for each block, we check:

1. Is the block already present in the old version of the file? If so,
we can reuse (copy) it without network transfer. **This check is always
possible.**
2. Is the block already present in any other file in this folder or
other folders? If so we can copy it. **This check is only possible with
the full block index.**
3. We must transfer the block over the network.

Maintaining the full block index is costly in time, I/O and database
size. With this PR, maintaining the full block index becomes the default
for send-receive and receive-only folders only, with it disabled for
send-only and receive-encrypted folders. The block index is never useful
for encrypted folders, as blocks are encrypted separate for each file.
It is also not useful for send-only folders by themselves, though the
data in the send-only folder could be reused by other receive-type
folders if it were enabled.

For very large folders it may make sense to disable the full block index
regardless of folder type and just accept the resulting decrease in data
reuse.

Disabling or enabling the option in the GUI causes the index to be
destroyed or rebuilt accordingly.

https://github.com/syncthing/docs/pull/1005

---------

Signed-off-by: Jakob Borg <jakob@kastelo.net>
This commit is contained in:
Jakob Borg
2026-04-26 11:58:09 +02:00
committed by GitHub
parent 84c6b37913
commit 86ac4e5017
19 changed files with 449 additions and 36 deletions

View File

@@ -50,6 +50,7 @@
"Automatically create or share folders that this device advertises at the default path.": "Automatically create or share folders that this device advertises at the default path.",
"Available debug logging facilities:": "Available debug logging facilities:",
"Be careful!": "Be careful!",
"Block Indexing": "Block Indexing",
"Body:": "Body:",
"Bugs": "Bugs",
"Cancel": "Cancel",
@@ -251,6 +252,7 @@
"Log tailing paused. Scroll to the bottom to continue.": "Log tailing paused. Scroll to the bottom to continue.",
"Login failed, see Syncthing logs for details.": "Login failed, see Syncthing logs for details.",
"Logs": "Logs",
"Maintain an index of all blocks in the folder, enabling reuse of blocks from other files when syncing changes. Disable to reduce database size at the cost of not being able to reuse blocks across files.": "Maintain an index of all blocks in the folder, enabling reuse of blocks from other files when syncing changes. Disable to reduce database size at the cost of not being able to reuse blocks across files.",
"Major Upgrade": "Major Upgrade",
"Mass actions": "Mass actions",
"Maximum Age": "Maximum Age",
@@ -394,6 +396,7 @@
"Staggered": "Staggered",
"Staggered File Versioning": "Staggered File Versioning",
"Start Browser": "Start Browser",
"Starting": "Starting",
"Statistics": "Statistics",
"Stay logged in": "Stay logged in",
"Stopped": "Stopped",

View File

@@ -382,7 +382,7 @@
<div class="col-md-6" aria-labelledby="folder_list" role="region" >
<h3 id="folder_list"><span translate>Folders</span><span ng-if="folderList().length > 1"> ({{folderList().length}})</span></h3>
<div ng-repeat="(folderGroupName, groupedFolders) in foldersGrouped">
<h4 ng-if="folderGroupName !== ''">{{ folderGroupName }}
<h4 ng-if="folderGroupName !== ''">{{ folderGroupName }}
<span ng-if="groupedFolders.length > 1 && folderGroupName.length > 0"> ({{groupedFolders.length}})</span>
</h4>
<div class="panel-group" id="folders-{{ $index }}">
@@ -501,6 +501,13 @@
<span ng-if="folder.type == 'receiveencrypted'" translate>Receive Encrypted</span>
</td>
</tr>
<tr>
<th><span class="far fa-fw fa-book"></span>&nbsp;<span translate>Block Indexing</span></th>
<td class="text-right">
<span ng-if="folder.blockIndexing" translate>Yes</span>
<span ng-if="!folder.blockIndexing" translate>No</span>
</td>
</tr>
<tr ng-if="folder.ignorePerms">
<th><span class="far fa-fw fa-minus-square"></span>&nbsp;<span translate>Ignore Permissions</span></th>
<td class="text-right">
@@ -776,7 +783,7 @@
<!-- Remote devices -->
<h3><span translate>Remote Devices</span> <span ng-if="otherDevices().length > 1"> ({{otherDevices().length}})</span></h3>
<div ng-repeat="(deviceGroupName, groupedDevices) in devicesGrouped">
<h4>{{ deviceGroupName }}
<h4>{{ deviceGroupName }}
<span ng-if="groupedDevices.length > 1 && deviceGroupName.length > 0"> ({{groupedDevices.length}})</span>
</h4>
<div class="panel-group" id="devices-{{ $index }}">

View File

@@ -571,7 +571,7 @@ angular.module('syncthing.core')
};
// myID is watched as $scope.otherDevices() relies on this
// and it can potenitally not be loaded due to this function
// and it can potenitally not be loaded due to this function
// scope being called in an undetermistic manner
$scope.$watch('myID', function(myID) {
if (myID) {
@@ -579,7 +579,7 @@ angular.module('syncthing.core')
const otherDevices = $scope.otherDevices();
for (var id in otherDevices) {
if ($scope.devicesGrouped[otherDevices[id].group] === undefined) {
$scope.devicesGrouped[otherDevices[id].group] = [];
$scope.devicesGrouped[otherDevices[id].group] = [];
}
$scope.devicesGrouped[otherDevices[id].group].push(otherDevices[id]);
};
@@ -595,7 +595,7 @@ angular.module('syncthing.core')
$scope.folders[folder].devices.forEach(function (deviceCfg) {
refreshCompletion(deviceCfg.deviceID, folder);
});
if ($scope.foldersGrouped[$scope.folders[folder].group] === undefined) {
$scope.foldersGrouped[$scope.folders[folder].group] = [];
}
@@ -612,7 +612,7 @@ angular.module('syncthing.core')
}
}
// Sort firstly by the top level key of the object and then by
// Sort firstly by the top level key of the object and then by
// prop name provided for the array of objects for each key.
// If the prop returns has an empty value, then use the
// fallback prop provided.
@@ -1122,7 +1122,7 @@ angular.module('syncthing.core')
if (status == 'paused') {
return 'default';
}
if (status === 'syncing' || status === 'sync-preparing' || status === 'scanning' || status === 'cleaning') {
if (status === 'syncing' || status === 'sync-preparing' || status === 'scanning' || status === 'cleaning' || status === 'starting') {
return 'primary';
}
if (status === 'unknown') {
@@ -1320,6 +1320,7 @@ angular.module('syncthing.core')
case 'scan-waiting':
case 'sync-preparing':
case 'sync-waiting':
case 'starting':
return 'fa-hourglass-half';
case 'cleaning':
return 'fa-recycle';
@@ -1355,6 +1356,8 @@ angular.module('syncthing.core')
return $translate.instant('Failed Items');
case 'idle':
return $translate.instant('Up to Date');
case 'starting':
return $translate.instant('Starting');
case 'localadditions':
return $translate.instant('Local Additions');
case 'localunencrypted':
@@ -2334,6 +2337,12 @@ angular.module('syncthing.core')
} else {
$scope.currentFolder.fsWatcherEnabled = true;
}
var type = $scope.currentFolder.type;
if ($scope.currentFolder._editing !== 'existing') {
// Never automatically change block indexing, only suggest
// the value on new folder creation.
$scope.currentFolder.blockIndexing = (type === 'sendreceive' || type === 'receiveonly');
}
$scope.setFSWatcherIntervalDefault();
};

View File

@@ -387,6 +387,17 @@
</div>
</div>
<div class="row">
<div class="col-md-6 form-group">
<label>
<input type="checkbox" ng-model="currentFolder.blockIndexing" /> <span translate>Block Indexing</span>
</label>
<p translate class="help-block">
Maintain an index of all blocks in the folder, enabling reuse of blocks from other files when syncing changes. Disable to reduce database size at the cost of not being able to reuse blocks across files.
</p>
</div>
</div>
<div class="row" ng-if="currentFolder.syncXattrs || currentFolder.sendXattrs">
<div class="col-md-12">
<p>

View File

@@ -27,13 +27,29 @@ type DBService interface {
LastMaintenanceTime() time.Time
}
// UpdateOption modifies the behavior of a DB Update call.
type UpdateOption func(*UpdateOptions)
// UpdateOptions holds options for a DB Update call.
type UpdateOptions struct {
SkipBlockIndex bool
}
// WithSkipBlockIndex skips inserting individual blocks into the block
// index (the "blocks" table). Blocklists are still stored.
func WithSkipBlockIndex() UpdateOption {
return func(o *UpdateOptions) {
o.SkipBlockIndex = true
}
}
type DB interface {
// Create a service that performs database maintenance periodically (no
// more often than the requested interval)
Service(maintenanceInterval time.Duration) DBService
// Basics
Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error
Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo, opts ...UpdateOption) error
Close() error
// Single files
@@ -57,6 +73,10 @@ type DB interface {
AllNeededGlobalFiles(folder string, device protocol.DeviceID, order config.PullOrder, limit, offset int) (iter.Seq[protocol.FileInfo], func() error)
AllLocalBlocksWithHash(folder string, hash []byte) (iter.Seq[BlockMapEntry], func() error)
// Block index management
DropBlockIndex(folder string) error
PopulateBlockIndex(folder string) error
// Cleanup
DropAllFiles(folder string, device protocol.DeviceID) error
DropDevice(device protocol.DeviceID) error

View File

@@ -198,10 +198,10 @@ func (m metricsDB) SetIndexID(folder string, device protocol.DeviceID, id protoc
return m.DB.SetIndexID(folder, device, id)
}
func (m metricsDB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
func (m metricsDB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo, opts ...UpdateOption) error {
defer m.account(folder, "Update")()
defer metricTotalFilesUpdatedCount.WithLabelValues(folder).Add(float64(len(fs)))
return m.DB.Update(folder, device, fs)
return m.DB.Update(folder, device, fs, opts...)
}
func (m metricsDB) GetKV(key string) ([]byte, error) {

View File

@@ -92,12 +92,35 @@ func (s *DB) getFolderDB(folder string, create bool) (*folderDB, error) {
return fdb, nil
}
func (s *DB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo) error {
func (s *DB) Update(folder string, device protocol.DeviceID, fs []protocol.FileInfo, opts ...db.UpdateOption) error {
fdb, err := s.getFolderDB(folder, true)
if err != nil {
return err
}
return fdb.Update(device, fs)
var options db.UpdateOptions
for _, o := range opts {
o(&options)
}
return fdb.Update(device, fs, options)
}
func (s *DB) DropBlockIndex(folder string) error {
fdb, err := s.getFolderDB(folder, false)
if errors.Is(err, errNoSuchFolder) {
return nil
}
if err != nil {
return err
}
return fdb.DropBlockIndex()
}
func (s *DB) PopulateBlockIndex(folder string) error {
fdb, err := s.getFolderDB(folder, true)
if err != nil {
return err
}
return fdb.PopulateBlockIndex()
}
func (s *DB) GetDeviceFile(folder string, device protocol.DeviceID, file string) (protocol.FileInfo, bool, error) {

View File

@@ -9,6 +9,7 @@ package sqlite
import (
"testing"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/lib/protocol"
)
@@ -138,6 +139,211 @@ func TestBlocksDeleted(t *testing.T) {
}
}
func TestDropBlockIndex(t *testing.T) {
t.Parallel()
sdb, err := Open(t.TempDir())
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { sdb.Close() })
// Insert files with blocks
files := []protocol.FileInfo{
genFile("a", 3, 0),
genFile("b", 2, 0),
}
if err := sdb.Update(folderID, protocol.LocalDeviceID, files); err != nil {
t.Fatal(err)
}
// Verify blocks exist
hits, err := itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, files[0].Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) == 0 {
t.Fatal("expected block hits before drop")
}
// Drop the block index
if err := sdb.DropBlockIndex(folderID); err != nil {
t.Fatal(err)
}
// Verify blocks are gone
hits, err = itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, files[0].Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) != 0 {
t.Fatal("expected no block hits after drop")
}
// Dropping again should be a no-op (already empty)
if err := sdb.DropBlockIndex(folderID); err != nil {
t.Fatal(err)
}
// Dropping a nonexistent folder should be fine
if err := sdb.DropBlockIndex("nonexistent"); err != nil {
t.Fatal(err)
}
}
func TestPopulateBlockIndex(t *testing.T) {
t.Parallel()
sdb, err := Open(t.TempDir())
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { sdb.Close() })
// Insert files with blocks
files := []protocol.FileInfo{
genFile("a", 3, 0),
genFile("b", 2, 0),
}
if err := sdb.Update(folderID, protocol.LocalDeviceID, files); err != nil {
t.Fatal(err)
}
// Collect the original block entries for comparison
origHitsA, err := itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, files[0].Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(origHitsA) != 1 {
t.Fatal("expected one hit for block a[0]")
}
// Drop the block index
if err := sdb.DropBlockIndex(folderID); err != nil {
t.Fatal(err)
}
// Populate it back from existing blocklists
if err := sdb.PopulateBlockIndex(folderID); err != nil {
t.Fatal(err)
}
// Verify all blocks are back
for i, f := range files {
for j, b := range f.Blocks {
hits, err := itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, b.Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) == 0 {
t.Errorf("file %d block %d: expected hits after populate", i, j)
}
}
}
// Populating again should be a no-op (not empty)
if err := sdb.PopulateBlockIndex(folderID); err != nil {
t.Fatal(err)
}
}
func TestPopulateBlockIndexSkipsRemoteFiles(t *testing.T) {
t.Parallel()
sdb, err := Open(t.TempDir())
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { sdb.Close() })
// Insert a local file (blocks indexed) and a remote file (blocks not indexed)
localFile := genFile("local", 2, 0)
if err := sdb.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{localFile}); err != nil {
t.Fatal(err)
}
remoteFile := genFile("remote", 2, 1)
if err := sdb.Update(folderID, protocol.DeviceID{42}, []protocol.FileInfo{remoteFile}); err != nil {
t.Fatal(err)
}
// Drop and repopulate
if err := sdb.DropBlockIndex(folderID); err != nil {
t.Fatal(err)
}
if err := sdb.PopulateBlockIndex(folderID); err != nil {
t.Fatal(err)
}
// Local file blocks should be present
hits, err := itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, localFile.Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) == 0 {
t.Error("expected hits for local file blocks")
}
// Remote file blocks should not be present (blocks are only
// indexed for local files)
hits, err = itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, remoteFile.Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) != 0 {
t.Error("expected no hits for remote file blocks")
}
}
func TestSkipBlockIndexOnUpdate(t *testing.T) {
t.Parallel()
sdb, err := Open(t.TempDir())
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { sdb.Close() })
// Insert a file with SkipBlockIndex
file := genFile("a", 3, 0)
if err := sdb.Update(folderID, protocol.LocalDeviceID, []protocol.FileInfo{file}, db.WithSkipBlockIndex()); err != nil {
t.Fatal(err)
}
// Blocks should not be indexed
hits, err := itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, file.Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) != 0 {
t.Fatal("expected no block hits with SkipBlockIndex")
}
// The blocklist should still be stored (file info is retrievable with blocks)
fi, ok, err := sdb.GetDeviceFile(folderID, protocol.LocalDeviceID, "a")
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("file not found")
}
if len(fi.Blocks) != 3 {
t.Fatalf("expected 3 blocks in file info, got %d", len(fi.Blocks))
}
// Populate should fill in the blocks
if err := sdb.PopulateBlockIndex(folderID); err != nil {
t.Fatal(err)
}
hits, err = itererr.Collect(sdb.AllLocalBlocksWithHash(folderID, file.Blocks[0].Hash))
if err != nil {
t.Fatal(err)
}
if len(hits) != 1 {
t.Fatal("expected one hit after populate")
}
}
func TestRemoteSequence(t *testing.T) {
t.Parallel()

View File

@@ -14,6 +14,7 @@ import (
"slices"
"github.com/jmoiron/sqlx"
"github.com/syncthing/syncthing/internal/db"
"github.com/syncthing/syncthing/internal/gen/dbproto"
"github.com/syncthing/syncthing/internal/itererr"
"github.com/syncthing/syncthing/internal/slogutil"
@@ -30,7 +31,7 @@ const (
updatePointsThreshold = 250_000
)
func (s *folderDB) Update(device protocol.DeviceID, fs []protocol.FileInfo) error {
func (s *folderDB) Update(device protocol.DeviceID, fs []protocol.FileInfo, options db.UpdateOptions) error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
@@ -151,7 +152,7 @@ func (s *folderDB) Update(device protocol.DeviceID, fs []protocol.FileInfo) erro
}
if _, err := insertBlockListStmt.Exec(f.BlocksHash, bs); err != nil {
return wrap(err, "insert blocklist")
} else if device == protocol.LocalDeviceID {
} else if device == protocol.LocalDeviceID && !options.SkipBlockIndex {
// Insert all blocks
if err := s.insertBlocksLocked(txp, f.BlocksHash, f.Blocks); err != nil {
return wrap(err, "insert blocks")
@@ -303,6 +304,86 @@ func (s *folderDB) DropFilesNamed(device protocol.DeviceID, names []string) erro
return wrap(tx.Commit())
}
func (s *folderDB) blockIndexEmpty() (bool, error) {
var exists bool
err := s.sql.Get(&exists, `SELECT EXISTS (SELECT 1 FROM blocks LIMIT 1)`)
if err != nil {
return false, wrap(err)
}
return !exists, nil
}
func (s *folderDB) DropBlockIndex() error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
empty, err := s.blockIndexEmpty()
if err != nil || empty {
return err
}
if _, err := s.sql.Exec(`DELETE FROM blocks`); err != nil {
return wrap(err)
}
return s.vacuumAndOptimize()
}
func (s *folderDB) PopulateBlockIndex() error {
s.updateLock.Lock()
defer s.updateLock.Unlock()
empty, err := s.blockIndexEmpty()
if err != nil || !empty {
return err
}
tx, err := s.sql.BeginTxx(context.Background(), nil)
if err != nil {
return wrap(err)
}
defer tx.Rollback()
txp := &txPreparedStmts{Tx: tx}
// Iterate all local files that have a blocklist
rows, err := tx.Queryx(`
SELECT f.blocklist_hash, bl.blprotobuf FROM files f
INNER JOIN blocklists bl ON bl.blocklist_hash = f.blocklist_hash
WHERE f.device_idx = ? AND f.blocklist_hash IS NOT NULL
`, s.localDeviceIdx)
if err != nil {
return wrap(err)
}
defer rows.Close()
for rows.Next() {
var blocklistHash []byte
var blProtobuf []byte
if err := rows.Scan(&blocklistHash, &blProtobuf); err != nil {
return wrap(err)
}
var bl dbproto.BlockList
if err := proto.Unmarshal(blProtobuf, &bl); err != nil {
return wrap(err, "unmarshal blocklist")
}
blocks := make([]protocol.BlockInfo, len(bl.Blocks))
for i, b := range bl.Blocks {
blocks[i] = protocol.BlockInfoFromWire(b)
}
if err := s.insertBlocksLocked(txp, blocklistHash, blocks); err != nil {
return wrap(err, "insert blocks")
}
}
if err := rows.Err(); err != nil {
return wrap(err)
}
return wrap(tx.Commit())
}
func (*folderDB) insertBlocksLocked(tx *txPreparedStmts, blocklistHash []byte, blocks []protocol.BlockInfo) error {
if len(blocks) == 0 {
return nil

View File

@@ -126,6 +126,7 @@ func TestDefaultValues(t *testing.T) {
MaxSingleEntrySize: 1024,
MaxTotalSize: 4096,
},
BlockIndexing: true,
},
Device: DeviceConfiguration{
Addresses: []string{"dynamic"},
@@ -204,6 +205,7 @@ func TestDeviceConfig(t *testing.T) {
MaxTotalSize: 4096,
Entries: []XattrFilterEntry{},
},
BlockIndexing: true,
},
}

View File

@@ -87,6 +87,7 @@ type FolderConfiguration struct {
SendOwnership bool `json:"sendOwnership" xml:"sendOwnership"`
SyncXattrs bool `json:"syncXattrs" xml:"syncXattrs"`
SendXattrs bool `json:"sendXattrs" xml:"sendXattrs"`
BlockIndexing bool `json:"blockIndexing" xml:"blockIndexing" default:"true"`
XattrFilter XattrFilter `json:"xattrFilter" xml:"xattrFilter"`
// Legacy deprecated
DeprecatedReadOnly bool `json:"-" xml:"ro,attr,omitempty"` // Deprecated: Do not use.

View File

@@ -153,12 +153,19 @@ func (f *folder) Serve(ctx context.Context) error {
f.sl.DebugContext(ctx, "Folder starting")
defer f.sl.DebugContext(ctx, "Folder exiting")
f.setState(FolderStarting)
defer func() {
f.scanTimer.Stop()
f.versionCleanupTimer.Stop()
f.setState(FolderIdle)
}()
if err := f.reconcileBlockIndex(ctx); err != nil {
f.setError(ctx, err)
return err // will get restarted by suture
}
if f.FSWatcherEnabled && f.getHealthErrorAndLoadIgnores() == nil {
f.startWatch(ctx)
}
@@ -175,6 +182,8 @@ func (f *folder) Serve(ctx context.Context) error {
pullTimer := time.NewTimer(0)
pullTimer.Stop()
f.setState(FolderIdle)
for {
var err error
@@ -256,6 +265,15 @@ func (f *folder) Serve(ctx context.Context) error {
}
}
func (f *folder) reconcileBlockIndex(ctx context.Context) error {
if !f.BlockIndexing {
f.sl.DebugContext(ctx, "Dropping block index (block indexing disabled)")
return f.db.DropBlockIndex(f.folderID)
}
f.sl.DebugContext(ctx, "Populating block index if empty")
return f.db.PopulateBlockIndex(f.folderID)
}
func (*folder) BringToFront(string) {}
func (*folder) Override() {}
@@ -1273,7 +1291,11 @@ func (f *folder) updateLocalsFromPulling(fs []protocol.FileInfo) error {
}
func (f *folder) updateLocals(fs []protocol.FileInfo) error {
if err := f.db.Update(f.folderID, protocol.LocalDeviceID, fs); err != nil {
var opts []db.UpdateOption
if !f.BlockIndexing {
opts = append(opts, db.WithSkipBlockIndex())
}
if err := f.db.Update(f.folderID, protocol.LocalDeviceID, fs, opts...); err != nil {
return err
}

View File

@@ -1166,6 +1166,7 @@ func (f *sendReceiveFolder) handleFile(ctx context.Context, file protocol.FileIn
blocks: blocks,
have: len(have),
}
copyChan <- cs
return nil
}
@@ -1322,7 +1323,7 @@ func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo, dbUpdateChan ch
func (f *sendReceiveFolder) copierRoutine(ctx context.Context, in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
otherFolderFilesystems := make(map[string]fs.Filesystem)
for folder, cfg := range f.model.cfg.Folders() {
if folder == f.ID {
if folder == f.ID || !cfg.BlockIndexing {
continue
}
otherFolderFilesystems[folder] = cfg.Filesystem()
@@ -1390,13 +1391,26 @@ func (f *sendReceiveFolder) copyBlock(ctx context.Context, block protocol.BlockI
buf := protocol.BufferPool.Get(block.Size)
defer protocol.BufferPool.Put(buf)
// Hope that it's usually in the same folder, so start with that
// one. Also possibly more efficient copy (same filesystem).
if f.copyBlockFromFolder(ctx, f.ID, block, state, f.mtimefs, buf) {
return true
// Check for the block in the current version of the file
if idx, ok := state.curFileBlocks[string(block.Hash)]; ok {
if f.copyBlockFromFile(ctx, state.curFile.Name, state.curFile.Blocks[idx].Offset, state, f.mtimefs, block, buf) {
state.copiedFromOrigin(block.Size)
return true
}
if state.failed() != nil {
return false
}
}
if state.failed() != nil {
return false
if f.folder.BlockIndexing {
// Hope that it's usually in the same folder, so start with that
// one. Also possibly more efficient copy (same filesystem).
if f.copyBlockFromFolder(ctx, f.ID, block, state, f.mtimefs, buf) {
return true
}
if state.failed() != nil {
return false
}
}
for folderID, ffs := range otherFolderFilesystems {

View File

@@ -27,12 +27,15 @@ const (
FolderCleaning
FolderCleanWaiting
FolderError
FolderStarting
)
func (s folderState) String() string {
switch s {
case FolderIdle:
return "idle"
case FolderStarting:
return "starting"
case FolderScanning:
return "scanning"
case FolderScanWaiting:

View File

@@ -1667,8 +1667,6 @@ func waitForState(t *testing.T, sub events.Subscription, folder, expected string
}
if err == expected {
return
} else {
t.Error(ev)
}
}
case <-timeout:

View File

@@ -25,18 +25,19 @@ import (
// updated along the way.
type sharedPullerState struct {
// Immutable, does not require locking
file protocol.FileInfo // The new file (desired end state)
fs fs.Filesystem
folder string
tempName string
realName string
reused int // Number of blocks reused from temporary file
ignorePerms bool
hasCurFile bool // Whether curFile is set
curFile protocol.FileInfo // The file as it exists now in our database
sparse bool
created time.Time
fsync bool
file protocol.FileInfo // The new file (desired end state)
fs fs.Filesystem
folder string
tempName string
realName string
reused int // Number of blocks reused from temporary file
ignorePerms bool
hasCurFile bool // Whether curFile is set
curFile protocol.FileInfo // The file as it exists now in our database
curFileBlocks map[string]int // block hash to index in curFile
sparse bool
created time.Time
fsync bool
// Mutable, must be locked for access
err error // The first error we hit
@@ -54,6 +55,12 @@ type sharedPullerState struct {
}
func newSharedPullerState(file protocol.FileInfo, fs fs.Filesystem, folderID, tempName string, blocks []protocol.BlockInfo, reused []int, ignorePerms, hasCurFile bool, curFile protocol.FileInfo, sparse bool, fsync bool) *sharedPullerState {
// Map the existing blocks by hash to block index in the current file
blocksMap := make(map[string]int, len(curFile.Blocks))
for idx, block := range curFile.Blocks {
blocksMap[string(block.Hash)] = idx
}
return &sharedPullerState{
file: file,
fs: fs,
@@ -69,6 +76,7 @@ func newSharedPullerState(file protocol.FileInfo, fs fs.Filesystem, folderID, te
ignorePerms: ignorePerms,
hasCurFile: hasCurFile,
curFile: curFile,
curFileBlocks: blocksMap,
sparse: sparse,
fsync: fsync,
created: time.Now(),

View File

@@ -110,6 +110,7 @@ func newFolderConfig() config.FolderConfiguration {
cfg.FSWatcherEnabled = false
cfg.PullerDelayS = 0
cfg.Devices = append(cfg.Devices, config.FolderDeviceConfiguration{DeviceID: device1})
cfg.BlockIndexing = true
return cfg
}

View File

@@ -127,6 +127,7 @@ type Report struct {
SyncXattrs int `json:"syncXattrs,omitempty" metric:"folder_feature{feature=SyncXattrs},summary" since:"3"`
SendOwnership int `json:"sendOwnership,omitempty" metric:"folder_feature{feature=SendOwnership},summary" since:"3"`
SyncOwnership int `json:"syncOwnership,omitempty" metric:"folder_feature{feature=SyncOwnership},summary" since:"3"`
NoBlockIndexing int `json:"noBlockIndexing,omitempty" metric:"folder_feature{feature=NoBlockIndexing},summary" since:"3"`
} `json:"folderUsesV3,omitzero" since:"3"`
DeviceUsesV3 struct {

View File

@@ -280,6 +280,9 @@ func (s *Service) reportData(ctx context.Context, urVersion int, preview bool) (
if cfg.SyncOwnership {
report.FolderUsesV3.SyncOwnership++
}
if !cfg.BlockIndexing {
report.FolderUsesV3.NoBlockIndexing++
}
}
slices.Sort(report.FolderUsesV3.FsWatcherDelays)