mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-23 22:18:14 -05:00
fix(model): consider MaxFolderConcurrency when calculating number of hashers (#10285)
Currently, the number of hashers, with the exception of some specific operating systems or when defined manually, equals the number of CPU cores divided by the overall number of folders, and it does not take into account the value of MaxFolderConcurrency at all. This leads to artificial performance limits even when MaxFolderConcurrency is set to values lower than the number of cores. For example, let's say that the number of folders is 50 and MaxFolderConcurrency is set a value of 4 on a 16-core CPU. With the old calculation, the number of hashers would still end up being just 1 due to the large number of folders. However, with the new calculation, the number of hashers in this case will be 4, leading to better hashing performance per folder. Signed-off-by: Tomasz Wilczyński <twilczynski@naver.com> Co-authored-by: Jakob Borg <jakob@kastelo.net>
This commit is contained in:
committed by
GitHub
parent
958f51ace6
commit
6ed4cca691
@@ -2548,6 +2548,12 @@ func (m *model) numHashers(folder string) int {
|
||||
m.mut.RLock()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
numFolders := max(1, len(m.folderCfgs))
|
||||
// MaxFolderConcurrency already limits the number of scanned folders, so
|
||||
// prefer it over the overall number of folders to avoid limiting performance
|
||||
// further for no reason.
|
||||
if concurrency := m.cfg.Options().MaxFolderConcurrency(); concurrency > 0 {
|
||||
numFolders = min(numFolders, concurrency)
|
||||
}
|
||||
m.mut.RUnlock()
|
||||
|
||||
if folderCfg.Hashers > 0 {
|
||||
|
||||
Reference in New Issue
Block a user