mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-23 22:18:14 -05:00
chore(model): refactor context handling for folder type (#10472)
Signed-off-by: Jakob Borg <jakob@kastelo.net>
This commit is contained in:
8
go.mod
8
go.mod
@@ -76,7 +76,7 @@ require (
|
|||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 // indirect
|
github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||||
github.com/nxadm/tail v1.4.11 // indirect
|
github.com/nxadm/tail v1.4.11 // indirect
|
||||||
@@ -97,10 +97,10 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||||
golang.org/x/mod v0.29.0 // indirect
|
golang.org/x/mod v0.30.0 // indirect
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
golang.org/x/sync v0.18.0 // indirect
|
||||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
|
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
|
||||||
golang.org/x/tools v0.38.0 // indirect
|
golang.org/x/tools v0.39.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
modernc.org/libc v1.66.3 // indirect
|
modernc.org/libc v1.66.3 // indirect
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
|
|||||||
24
go.sum
24
go.sum
@@ -163,8 +163,8 @@ github.com/maruel/panicparse/v2 v2.5.0/go.mod h1:DA2fDiBk63bKfBf4CVZP9gb4fuvzdPb
|
|||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 h1:Eaq36EIyJNp7b3qDhjV7jmDVq/yPeW2v4pTqzGbOGB4=
|
github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 h1:aOeI7xAOVdK+R6xbVsZuU9HmCZYmQVmZgPf9xJUd2Sg=
|
||||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3/go.mod h1:6KKUoQBZBW6PDXJtNfqeEjPXMj/ITTk+cWK9t9uS5+E=
|
github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0/go.mod h1:0hZWbtfeCYUQeAQdPLUzETiBhUSns7O6LDj9vH88xKA=
|
||||||
github.com/maxmind/geoipupdate/v6 v6.1.0 h1:sdtTHzzQNJlXF5+fd/EoPTucRHyMonYt/Cok8xzzfqA=
|
github.com/maxmind/geoipupdate/v6 v6.1.0 h1:sdtTHzzQNJlXF5+fd/EoPTucRHyMonYt/Cok8xzzfqA=
|
||||||
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
|
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
|
||||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+UfmdEAZGJ8IiKld1O0dbGotEnkMolG5hfMSY=
|
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+UfmdEAZGJ8IiKld1O0dbGotEnkMolG5hfMSY=
|
||||||
@@ -187,8 +187,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||||
github.com/oschwald/geoip2-golang v1.13.0 h1:Q44/Ldc703pasJeP5V9+aFSZFmBN7DKHbNsSFzQATJI=
|
github.com/oschwald/geoip2-golang v1.13.0 h1:Q44/Ldc703pasJeP5V9+aFSZFmBN7DKHbNsSFzQATJI=
|
||||||
github.com/oschwald/geoip2-golang v1.13.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
|
github.com/oschwald/geoip2-golang v1.13.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
|
||||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||||
@@ -279,8 +279,8 @@ go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
|||||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||||
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
@@ -289,8 +289,8 @@ golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvm
|
|||||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4=
|
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4=
|
||||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -330,8 +330,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
|
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo=
|
||||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
|
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@@ -345,8 +345,8 @@ golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
|||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|||||||
@@ -55,8 +55,7 @@ type folder struct {
|
|||||||
ignores *ignore.Matcher
|
ignores *ignore.Matcher
|
||||||
mtimefs fs.Filesystem
|
mtimefs fs.Filesystem
|
||||||
modTimeWindow time.Duration
|
modTimeWindow time.Duration
|
||||||
ctx context.Context //nolint:containedctx // used internally, only accessible on serve lifetime
|
done chan struct{} // used externally, accessible regardless of serve
|
||||||
done chan struct{} // used externally, accessible regardless of serve
|
|
||||||
sl *slog.Logger
|
sl *slog.Logger
|
||||||
|
|
||||||
scanInterval time.Duration
|
scanInterval time.Duration
|
||||||
@@ -94,12 +93,12 @@ type folder struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type syncRequest struct {
|
type syncRequest struct {
|
||||||
fn func() error
|
fn func(context.Context) error
|
||||||
err chan error
|
err chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
type puller interface {
|
type puller interface {
|
||||||
pull() (bool, error) // true when successful and should not be retried
|
pull(ctx context.Context) (bool, error) // true when successful and should not be retried
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFolder(model *model, ignores *ignore.Matcher, cfg config.FolderConfiguration, evLogger events.Logger, ioLimiter *semaphore.Semaphore, ver versioner.Versioner) *folder {
|
func newFolder(model *model, ignores *ignore.Matcher, cfg config.FolderConfiguration, evLogger events.Logger, ioLimiter *semaphore.Semaphore, ver versioner.Versioner) *folder {
|
||||||
@@ -151,10 +150,8 @@ func (f *folder) Serve(ctx context.Context) error {
|
|||||||
f.model.foldersRunning.Add(1)
|
f.model.foldersRunning.Add(1)
|
||||||
defer f.model.foldersRunning.Add(-1)
|
defer f.model.foldersRunning.Add(-1)
|
||||||
|
|
||||||
f.ctx = ctx
|
f.sl.DebugContext(ctx, "Folder starting")
|
||||||
|
defer f.sl.DebugContext(ctx, "Folder exiting")
|
||||||
l.Debugln(f, "starting")
|
|
||||||
defer l.Debugln(f, "exiting")
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
f.scanTimer.Stop()
|
f.scanTimer.Stop()
|
||||||
@@ -163,7 +160,7 @@ func (f *folder) Serve(ctx context.Context) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if f.FSWatcherEnabled && f.getHealthErrorAndLoadIgnores() == nil {
|
if f.FSWatcherEnabled && f.getHealthErrorAndLoadIgnores() == nil {
|
||||||
f.startWatch()
|
f.startWatch(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're configured to not do version cleanup, or we don't have a
|
// If we're configured to not do version cleanup, or we don't have a
|
||||||
@@ -182,7 +179,7 @@ func (f *folder) Serve(ctx context.Context) error {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
close(f.done)
|
close(f.done)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
@@ -197,16 +194,16 @@ func (f *folder) Serve(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
pullTimer.Reset(time.Duration(float64(time.Second) * f.PullerDelayS))
|
pullTimer.Reset(time.Duration(float64(time.Second) * f.PullerDelayS))
|
||||||
} else {
|
} else {
|
||||||
_, err = f.pull()
|
_, err = f.pull(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-pullTimer.C:
|
case <-pullTimer.C:
|
||||||
f.setState(FolderIdle)
|
f.setState(FolderIdle)
|
||||||
_, err = f.pull()
|
_, err = f.pull(ctx)
|
||||||
|
|
||||||
case <-f.pullFailTimer.C:
|
case <-f.pullFailTimer.C:
|
||||||
var success bool
|
var success bool
|
||||||
success, err = f.pull()
|
success, err = f.pull(ctx)
|
||||||
if (err != nil || !success) && f.pullPause < 60*f.pullBasePause() {
|
if (err != nil || !success) && f.pullPause < 60*f.pullBasePause() {
|
||||||
// Back off from retrying to pull
|
// Back off from retrying to pull
|
||||||
f.pullPause *= 2
|
f.pullPause *= 2
|
||||||
@@ -215,46 +212,46 @@ func (f *folder) Serve(ctx context.Context) error {
|
|||||||
case <-initialCompleted:
|
case <-initialCompleted:
|
||||||
// Initial scan has completed, we should do a pull
|
// Initial scan has completed, we should do a pull
|
||||||
initialCompleted = nil // never hit this case again
|
initialCompleted = nil // never hit this case again
|
||||||
_, err = f.pull()
|
_, err = f.pull(ctx)
|
||||||
|
|
||||||
case <-f.forcedRescanRequested:
|
case <-f.forcedRescanRequested:
|
||||||
err = f.handleForcedRescans()
|
err = f.handleForcedRescans(ctx)
|
||||||
|
|
||||||
case <-f.scanTimer.C:
|
case <-f.scanTimer.C:
|
||||||
l.Debugln(f, "Scanning due to timer")
|
f.sl.DebugContext(ctx, "Scanning due to timer")
|
||||||
err = f.scanTimerFired()
|
err = f.scanTimerFired(ctx)
|
||||||
|
|
||||||
case req := <-f.doInSyncChan:
|
case req := <-f.doInSyncChan:
|
||||||
l.Debugln(f, "Running something due to request")
|
f.sl.DebugContext(ctx, "Running something due to request")
|
||||||
err = req.fn()
|
err = req.fn(ctx)
|
||||||
req.err <- err
|
req.err <- err
|
||||||
|
|
||||||
case next := <-f.scanDelay:
|
case next := <-f.scanDelay:
|
||||||
l.Debugln(f, "Delaying scan")
|
f.sl.DebugContext(ctx, "Delaying scan")
|
||||||
f.scanTimer.Reset(next)
|
f.scanTimer.Reset(next)
|
||||||
|
|
||||||
case <-f.scanScheduled:
|
case <-f.scanScheduled:
|
||||||
l.Debugln(f, "Scan was scheduled")
|
f.sl.DebugContext(ctx, "Scan was scheduled")
|
||||||
f.scanTimer.Reset(0)
|
f.scanTimer.Reset(0)
|
||||||
|
|
||||||
case fsEvents := <-f.watchChan:
|
case fsEvents := <-f.watchChan:
|
||||||
l.Debugln(f, "Scan due to watcher")
|
f.sl.DebugContext(ctx, "Scan due to watcher")
|
||||||
err = f.scanSubdirs(fsEvents)
|
err = f.scanSubdirs(ctx, fsEvents)
|
||||||
|
|
||||||
case <-f.restartWatchChan:
|
case <-f.restartWatchChan:
|
||||||
l.Debugln(f, "Restart watcher")
|
f.sl.DebugContext(ctx, "Restart watcher")
|
||||||
err = f.restartWatch()
|
err = f.restartWatch(ctx)
|
||||||
|
|
||||||
case <-f.versionCleanupTimer.C:
|
case <-f.versionCleanupTimer.C:
|
||||||
l.Debugln(f, "Doing version cleanup")
|
f.sl.DebugContext(ctx, "Doing version cleanup")
|
||||||
f.versionCleanupTimerFired()
|
f.versionCleanupTimerFired(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if svcutil.IsFatal(err) {
|
if svcutil.IsFatal(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f.setError(err)
|
f.setError(ctx, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -303,12 +300,14 @@ func (*folder) Jobs(_, _ int) ([]string, []string, int) {
|
|||||||
|
|
||||||
func (f *folder) Scan(subdirs []string) error {
|
func (f *folder) Scan(subdirs []string) error {
|
||||||
<-f.initialScanFinished
|
<-f.initialScanFinished
|
||||||
return f.doInSync(func() error { return f.scanSubdirs(subdirs) })
|
return f.doInSync(func(ctx context.Context) error {
|
||||||
|
return f.scanSubdirs(ctx, subdirs)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// doInSync allows to run functions synchronously in folder.serve from exported,
|
// doInSync allows to run functions synchronously in folder.serve from exported,
|
||||||
// asynchronously called methods.
|
// asynchronously called methods.
|
||||||
func (f *folder) doInSync(fn func() error) error {
|
func (f *folder) doInSync(fn func(context.Context) error) error {
|
||||||
req := syncRequest{
|
req := syncRequest{
|
||||||
fn: fn,
|
fn: fn,
|
||||||
err: make(chan error, 1),
|
err: make(chan error, 1),
|
||||||
@@ -329,7 +328,7 @@ func (f *folder) Reschedule() {
|
|||||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||||
sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4 //nolint:gosec
|
sleepNanos := (f.scanInterval.Nanoseconds()*3 + rand.Int63n(2*f.scanInterval.Nanoseconds())) / 4 //nolint:gosec
|
||||||
interval := time.Duration(sleepNanos) * time.Nanosecond
|
interval := time.Duration(sleepNanos) * time.Nanosecond
|
||||||
l.Debugln(f, "next rescan in", interval)
|
f.sl.Debug("Next rescan scheduled", slog.Duration("interval", interval))
|
||||||
f.scanTimer.Reset(interval)
|
f.scanTimer.Reset(interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -365,7 +364,7 @@ func (f *folder) getHealthErrorWithoutIgnores() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) pull() (success bool, err error) {
|
func (f *folder) pull(ctx context.Context) (success bool, err error) {
|
||||||
f.pullFailTimer.Stop()
|
f.pullFailTimer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-f.pullFailTimer.C:
|
case <-f.pullFailTimer.C:
|
||||||
@@ -402,7 +401,7 @@ func (f *folder) pull() (success bool, err error) {
|
|||||||
// Abort early (before acquiring a token) if there's a folder error
|
// Abort early (before acquiring a token) if there's a folder error
|
||||||
err = f.getHealthErrorWithoutIgnores()
|
err = f.getHealthErrorWithoutIgnores()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
|
f.sl.DebugContext(ctx, "Skipping pull due to folder error", slogutil.Error(err))
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -411,7 +410,7 @@ func (f *folder) pull() (success bool, err error) {
|
|||||||
if f.Type != config.FolderTypeSendOnly {
|
if f.Type != config.FolderTypeSendOnly {
|
||||||
f.setState(FolderSyncWaiting)
|
f.setState(FolderSyncWaiting)
|
||||||
|
|
||||||
if err := f.ioLimiter.TakeWithContext(f.ctx, 1); err != nil {
|
if err := f.ioLimiter.TakeWithContext(ctx, 1); err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
defer f.ioLimiter.Give(1)
|
defer f.ioLimiter.Give(1)
|
||||||
@@ -428,12 +427,12 @@ func (f *folder) pull() (success bool, err error) {
|
|||||||
}()
|
}()
|
||||||
err = f.getHealthErrorAndLoadIgnores()
|
err = f.getHealthErrorAndLoadIgnores()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
|
f.sl.DebugContext(ctx, "Skipping pull due to folder error", slogutil.Error(err))
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
f.setError(nil)
|
f.setError(ctx, nil)
|
||||||
|
|
||||||
success, err = f.puller.pull()
|
success, err = f.puller.pull(ctx)
|
||||||
|
|
||||||
if success && err == nil {
|
if success && err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -441,14 +440,14 @@ func (f *folder) pull() (success bool, err error) {
|
|||||||
|
|
||||||
// Pulling failed, try again later.
|
// Pulling failed, try again later.
|
||||||
delay := f.pullPause + time.Since(startTime)
|
delay := f.pullPause + time.Since(startTime)
|
||||||
f.sl.Info("Folder failed to sync, will be retried", slog.String("wait", stringutil.NiceDurationString(delay)))
|
f.sl.InfoContext(ctx, "Folder failed to sync, will be retried", slog.String("wait", stringutil.NiceDurationString(delay)))
|
||||||
f.pullFailTimer.Reset(delay)
|
f.pullFailTimer.Reset(delay)
|
||||||
|
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) scanSubdirs(subDirs []string) error {
|
func (f *folder) scanSubdirs(ctx context.Context, subDirs []string) error {
|
||||||
l.Debugf("%v scanning", f)
|
f.sl.DebugContext(ctx, "Scanning")
|
||||||
|
|
||||||
oldHash := f.ignores.Hash()
|
oldHash := f.ignores.Hash()
|
||||||
|
|
||||||
@@ -456,14 +455,14 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f.setError(nil)
|
f.setError(ctx, nil)
|
||||||
|
|
||||||
// Check on the way out if the ignore patterns changed as part of scanning
|
// Check on the way out if the ignore patterns changed as part of scanning
|
||||||
// this folder. If they did we should schedule a pull of the folder so that
|
// this folder. If they did we should schedule a pull of the folder so that
|
||||||
// we request things we might have suddenly become unignored and so on.
|
// we request things we might have suddenly become unignored and so on.
|
||||||
defer func() {
|
defer func() {
|
||||||
if f.ignores.Hash() != oldHash {
|
if f.ignores.Hash() != oldHash {
|
||||||
l.Debugln("Folder", f.Description(), "ignore patterns change detected while scanning; triggering puller")
|
f.sl.DebugContext(ctx, "Ignore patterns change detected while scanning; triggering puller")
|
||||||
f.ignoresUpdated()
|
f.ignoresUpdated()
|
||||||
f.SchedulePull()
|
f.SchedulePull()
|
||||||
}
|
}
|
||||||
@@ -472,15 +471,15 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
|||||||
f.setState(FolderScanWaiting)
|
f.setState(FolderScanWaiting)
|
||||||
defer f.setState(FolderIdle)
|
defer f.setState(FolderIdle)
|
||||||
|
|
||||||
if err := f.ioLimiter.TakeWithContext(f.ctx, 1); err != nil {
|
if err := f.ioLimiter.TakeWithContext(ctx, 1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.ioLimiter.Give(1)
|
defer f.ioLimiter.Give(1)
|
||||||
|
|
||||||
metricFolderScans.WithLabelValues(f.ID).Inc()
|
metricFolderScans.WithLabelValues(f.ID).Inc()
|
||||||
ctx, cancel := context.WithCancel(f.ctx)
|
scanCtx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
go addTimeUntilCancelled(ctx, metricFolderScanSeconds.WithLabelValues(f.ID))
|
go addTimeUntilCancelled(scanCtx, metricFolderScanSeconds.WithLabelValues(f.ID))
|
||||||
|
|
||||||
for i := range subDirs {
|
for i := range subDirs {
|
||||||
sub := osutil.NativeFilename(subDirs[i])
|
sub := osutil.NativeFilename(subDirs[i])
|
||||||
@@ -512,13 +511,13 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
|||||||
// changes.
|
// changes.
|
||||||
changes := 0
|
changes := 0
|
||||||
defer func() {
|
defer func() {
|
||||||
l.Debugf("%v finished scanning, detected %v changes", f, changes)
|
f.sl.DebugContext(ctx, "Finished scanning", slog.Int("changes", changes))
|
||||||
if changes > 0 {
|
if changes > 0 {
|
||||||
f.SchedulePull()
|
f.SchedulePull()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
changesHere, err := f.scanSubdirsChangedAndNew(subDirs, batch)
|
changesHere, err := f.scanSubdirsChangedAndNew(ctx, subDirs, batch)
|
||||||
changes += changesHere
|
changes += changesHere
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -537,7 +536,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
|||||||
// Do a scan of the database for each prefix, to check for deleted and
|
// Do a scan of the database for each prefix, to check for deleted and
|
||||||
// ignored files.
|
// ignored files.
|
||||||
|
|
||||||
changesHere, err = f.scanSubdirsDeletedAndIgnored(subDirs, batch)
|
changesHere, err = f.scanSubdirsDeletedAndIgnored(ctx, subDirs, batch)
|
||||||
changes += changesHere
|
changes += changesHere
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -566,7 +565,7 @@ func (f *folder) newScanBatch() *scanBatch {
|
|||||||
}
|
}
|
||||||
b.updateBatch = NewFileInfoBatch(func(fs []protocol.FileInfo) error {
|
b.updateBatch = NewFileInfoBatch(func(fs []protocol.FileInfo) error {
|
||||||
if err := b.f.getHealthErrorWithoutIgnores(); err != nil {
|
if err := b.f.getHealthErrorWithoutIgnores(); err != nil {
|
||||||
l.Debugf("Stopping scan of folder %s due to: %s", b.f.Description(), err)
|
b.f.sl.Debug("Stopping scan due to folder error", slogutil.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
b.f.updateLocalsFromScanning(fs)
|
b.f.updateLocalsFromScanning(fs)
|
||||||
@@ -627,7 +626,7 @@ func (b *scanBatch) Update(fi protocol.FileInfo) (bool, error) {
|
|||||||
// Our item is deleted and the global item is our own receive only
|
// Our item is deleted and the global item is our own receive only
|
||||||
// file. No point in keeping track of that.
|
// file. No point in keeping track of that.
|
||||||
b.Remove(fi.Name)
|
b.Remove(fi.Name)
|
||||||
l.Debugf("%v scanning: deleting deleted receive-only local-changed file: %v", b.f, fi)
|
b.f.sl.Debug("Deleting deleted receive-only local-changed file", slogutil.FilePath(fi.Name))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
case (b.f.Type == config.FolderTypeReceiveOnly || b.f.Type == config.FolderTypeReceiveEncrypted) &&
|
case (b.f.Type == config.FolderTypeReceiveOnly || b.f.Type == config.FolderTypeReceiveEncrypted) &&
|
||||||
@@ -640,19 +639,19 @@ func (b *scanBatch) Update(fi protocol.FileInfo) (bool, error) {
|
|||||||
IgnoreXattrs: !b.f.SyncXattrs && !b.f.SendXattrs,
|
IgnoreXattrs: !b.f.SyncXattrs && !b.f.SendXattrs,
|
||||||
}):
|
}):
|
||||||
// What we have locally is equivalent to the global file.
|
// What we have locally is equivalent to the global file.
|
||||||
l.Debugf("%v scanning: Merging identical locally changed item with global: %v", b.f, fi)
|
b.f.sl.Debug("Merging identical locally changed item with global", slogutil.FilePath(fi.Name))
|
||||||
fi = gf
|
fi = gf
|
||||||
}
|
}
|
||||||
b.updateBatch.Append(fi)
|
b.updateBatch.Append(fi)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) scanSubdirsChangedAndNew(subDirs []string, batch *scanBatch) (int, error) {
|
func (f *folder) scanSubdirsChangedAndNew(ctx context.Context, subDirs []string, batch *scanBatch) (int, error) {
|
||||||
changes := 0
|
changes := 0
|
||||||
|
|
||||||
// If we return early e.g. due to a folder health error, the scan needs
|
// If we return early e.g. due to a folder health error, the scan needs
|
||||||
// to be cancelled.
|
// to be cancelled.
|
||||||
scanCtx, scanCancel := context.WithCancel(f.ctx)
|
scanCtx, scanCancel := context.WithCancel(ctx)
|
||||||
defer scanCancel()
|
defer scanCancel()
|
||||||
|
|
||||||
scanConfig := scanner.Config{
|
scanConfig := scanner.Config{
|
||||||
@@ -706,7 +705,7 @@ func (f *folder) scanSubdirsChangedAndNew(subDirs []string, batch *scanBatch) (i
|
|||||||
switch f.Type {
|
switch f.Type {
|
||||||
case config.FolderTypeReceiveOnly, config.FolderTypeReceiveEncrypted:
|
case config.FolderTypeReceiveOnly, config.FolderTypeReceiveEncrypted:
|
||||||
default:
|
default:
|
||||||
if nf, ok := f.findRename(res.File, alreadyUsedOrExisting); ok {
|
if nf, ok := f.findRename(ctx, res.File, alreadyUsedOrExisting); ok {
|
||||||
if ok, err := batch.Update(nf); err != nil {
|
if ok, err := batch.Update(nf); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
@@ -719,7 +718,7 @@ func (f *folder) scanSubdirsChangedAndNew(subDirs []string, batch *scanBatch) (i
|
|||||||
return changes, nil
|
return changes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) scanSubdirsDeletedAndIgnored(subDirs []string, batch *scanBatch) (int, error) {
|
func (f *folder) scanSubdirsDeletedAndIgnored(ctx context.Context, subDirs []string, batch *scanBatch) (int, error) {
|
||||||
var toIgnore []protocol.FileInfo
|
var toIgnore []protocol.FileInfo
|
||||||
ignoredParent := ""
|
ignoredParent := ""
|
||||||
changes := 0
|
changes := 0
|
||||||
@@ -732,7 +731,7 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
break outer
|
break outer
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -743,7 +742,7 @@ outer:
|
|||||||
|
|
||||||
if ignoredParent != "" && !fs.IsParent(fi.Name, ignoredParent) {
|
if ignoredParent != "" && !fs.IsParent(fi.Name, ignoredParent) {
|
||||||
for _, file := range toIgnore {
|
for _, file := range toIgnore {
|
||||||
l.Debugln("marking file as ignored", file)
|
f.sl.DebugContext(ctx, "Marking file as ignored", slogutil.FilePath(file.Name))
|
||||||
nf := file
|
nf := file
|
||||||
nf.SetIgnored()
|
nf.SetIgnored()
|
||||||
if ok, err := batch.Update(nf); err != nil {
|
if ok, err := batch.Update(nf); err != nil {
|
||||||
@@ -775,7 +774,7 @@ outer:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Debugln("marking file as ignored", fi)
|
f.sl.DebugContext(ctx, "Marking file as ignored", slogutil.FilePath(fi.Name))
|
||||||
nf := fi
|
nf := fi
|
||||||
nf.SetIgnored()
|
nf.SetIgnored()
|
||||||
if ok, err := batch.Update(nf); err != nil {
|
if ok, err := batch.Update(nf); err != nil {
|
||||||
@@ -810,7 +809,7 @@ outer:
|
|||||||
// sure the file gets in sync on the following pull.
|
// sure the file gets in sync on the following pull.
|
||||||
nf.Version = protocol.Vector{}
|
nf.Version = protocol.Vector{}
|
||||||
}
|
}
|
||||||
l.Debugln("marking file as deleted", nf)
|
f.sl.DebugContext(ctx, "Marking file as deleted", slogutil.FilePath(nf.Name))
|
||||||
if ok, err := batch.Update(nf); err != nil {
|
if ok, err := batch.Update(nf); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
@@ -824,13 +823,13 @@ outer:
|
|||||||
return 0, err
|
return 0, err
|
||||||
case !ok:
|
case !ok:
|
||||||
case gf.IsReceiveOnlyChanged():
|
case gf.IsReceiveOnlyChanged():
|
||||||
l.Debugln("removing deleted, receive-only item that is globally receive-only from db", fi)
|
f.sl.DebugContext(ctx, "Removing deleted receive-only item that is globally receive-only from db", slogutil.FilePath(fi.Name))
|
||||||
batch.Remove(fi.Name)
|
batch.Remove(fi.Name)
|
||||||
changes++
|
changes++
|
||||||
case gf.IsDeleted():
|
case gf.IsDeleted():
|
||||||
// Our item is deleted and the global item is deleted too. We just
|
// Our item is deleted and the global item is deleted too. We just
|
||||||
// pretend it is a normal deleted file (nobody cares about that).
|
// pretend it is a normal deleted file (nobody cares about that).
|
||||||
l.Debugf("%v scanning: Marking globally deleted item as not locally changed: %v", f, fi.Name)
|
f.sl.DebugContext(ctx, "Marking globally deleted item as not locally changed", slogutil.FilePath(fi.Name))
|
||||||
fi.LocalFlags &^= protocol.FlagLocalReceiveOnly
|
fi.LocalFlags &^= protocol.FlagLocalReceiveOnly
|
||||||
if ok, err := batch.Update(fi); err != nil {
|
if ok, err := batch.Update(fi); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@@ -842,7 +841,7 @@ outer:
|
|||||||
// No need to bump the version for a file that was and is
|
// No need to bump the version for a file that was and is
|
||||||
// deleted and just the folder type/local flags changed.
|
// deleted and just the folder type/local flags changed.
|
||||||
fi.LocalFlags &^= protocol.FlagLocalReceiveOnly
|
fi.LocalFlags &^= protocol.FlagLocalReceiveOnly
|
||||||
l.Debugln("removing receive-only flag on deleted item", fi)
|
f.sl.DebugContext(ctx, "Removing receive-only flag on deleted item", slogutil.FilePath(fi.Name))
|
||||||
if ok, err := batch.Update(fi); err != nil {
|
if ok, err := batch.Update(fi); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
} else if ok {
|
} else if ok {
|
||||||
@@ -853,14 +852,14 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return changes, f.ctx.Err()
|
return changes, ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(toIgnore) > 0 {
|
if len(toIgnore) > 0 {
|
||||||
for _, file := range toIgnore {
|
for _, file := range toIgnore {
|
||||||
l.Debugln("marking file as ignored", file)
|
f.sl.DebugContext(ctx, "Marking file as ignored", slogutil.FilePath(file.Name))
|
||||||
nf := file
|
nf := file
|
||||||
nf.SetIgnored()
|
nf.SetIgnored()
|
||||||
if ok, err := batch.Update(nf); err != nil {
|
if ok, err := batch.Update(nf); err != nil {
|
||||||
@@ -879,7 +878,7 @@ outer:
|
|||||||
return changes, nil
|
return changes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) findRename(file protocol.FileInfo, alreadyUsedOrExisting map[string]struct{}) (protocol.FileInfo, bool) {
|
func (f *folder) findRename(ctx context.Context, file protocol.FileInfo, alreadyUsedOrExisting map[string]struct{}) (protocol.FileInfo, bool) {
|
||||||
if len(file.Blocks) == 0 || file.Size == 0 {
|
if len(file.Blocks) == 0 || file.Size == 0 {
|
||||||
return protocol.FileInfo{}, false
|
return protocol.FileInfo{}, false
|
||||||
}
|
}
|
||||||
@@ -894,7 +893,7 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
break loop
|
break loop
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -943,16 +942,16 @@ loop:
|
|||||||
return nf, found
|
return nf, found
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) scanTimerFired() error {
|
func (f *folder) scanTimerFired(ctx context.Context) error {
|
||||||
err := f.scanSubdirs(nil)
|
err := f.scanSubdirs(ctx, nil)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.initialScanFinished:
|
case <-f.initialScanFinished:
|
||||||
default:
|
default:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.sl.Error("Failed initial scan", slogutil.Error(err))
|
f.sl.ErrorContext(ctx, "Failed initial scan", slogutil.Error(err))
|
||||||
} else {
|
} else {
|
||||||
f.sl.Info("Completed initial scan")
|
f.sl.InfoContext(ctx, "Completed initial scan")
|
||||||
}
|
}
|
||||||
close(f.initialScanFinished)
|
close(f.initialScanFinished)
|
||||||
}
|
}
|
||||||
@@ -962,19 +961,19 @@ func (f *folder) scanTimerFired() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) versionCleanupTimerFired() {
|
func (f *folder) versionCleanupTimerFired(ctx context.Context) {
|
||||||
f.setState(FolderCleanWaiting)
|
f.setState(FolderCleanWaiting)
|
||||||
defer f.setState(FolderIdle)
|
defer f.setState(FolderIdle)
|
||||||
|
|
||||||
if err := f.ioLimiter.TakeWithContext(f.ctx, 1); err != nil {
|
if err := f.ioLimiter.TakeWithContext(ctx, 1); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer f.ioLimiter.Give(1)
|
defer f.ioLimiter.Give(1)
|
||||||
|
|
||||||
f.setState(FolderCleaning)
|
f.setState(FolderCleaning)
|
||||||
|
|
||||||
if err := f.versioner.Clean(f.ctx); err != nil {
|
if err := f.versioner.Clean(ctx); err != nil {
|
||||||
f.sl.Warn("Failed to clean versions", slogutil.Error(err))
|
f.sl.WarnContext(ctx, "Failed to clean versions", slogutil.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
f.versionCleanupTimer.Reset(f.versionCleanupInterval)
|
f.versionCleanupTimer.Reset(f.versionCleanupInterval)
|
||||||
@@ -1008,21 +1007,21 @@ func (f *folder) scheduleWatchRestart() {
|
|||||||
|
|
||||||
// restartWatch should only ever be called synchronously. If you want to use
|
// restartWatch should only ever be called synchronously. If you want to use
|
||||||
// this asynchronously, you should probably use scheduleWatchRestart instead.
|
// this asynchronously, you should probably use scheduleWatchRestart instead.
|
||||||
func (f *folder) restartWatch() error {
|
func (f *folder) restartWatch(ctx context.Context) error {
|
||||||
f.stopWatch()
|
f.stopWatch()
|
||||||
f.startWatch()
|
f.startWatch(ctx)
|
||||||
return f.scanSubdirs(nil)
|
return f.scanSubdirs(ctx, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWatch should only ever be called synchronously. If you want to use
|
// startWatch should only ever be called synchronously. If you want to use
|
||||||
// this asynchronously, you should probably use scheduleWatchRestart instead.
|
// this asynchronously, you should probably use scheduleWatchRestart instead.
|
||||||
func (f *folder) startWatch() {
|
func (f *folder) startWatch(ctx context.Context) {
|
||||||
ctx, cancel := context.WithCancel(f.ctx)
|
watchCtx, cancel := context.WithCancel(ctx)
|
||||||
f.watchMut.Lock()
|
f.watchMut.Lock()
|
||||||
f.watchChan = make(chan []string)
|
f.watchChan = make(chan []string)
|
||||||
f.watchCancel = cancel
|
f.watchCancel = cancel
|
||||||
f.watchMut.Unlock()
|
f.watchMut.Unlock()
|
||||||
go f.monitorWatch(ctx)
|
go f.monitorWatch(watchCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// monitorWatch starts the filesystem watching and retries every minute on failure.
|
// monitorWatch starts the filesystem watching and retries every minute on failure.
|
||||||
@@ -1066,7 +1065,7 @@ func (f *folder) monitorWatch(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
lastWatch = time.Now()
|
lastWatch = time.Now()
|
||||||
watchaggregator.Aggregate(aggrCtx, eventChan, f.watchChan, f.FolderConfiguration, f.model.cfg, f.evLogger)
|
watchaggregator.Aggregate(aggrCtx, eventChan, f.watchChan, f.FolderConfiguration, f.model.cfg, f.evLogger)
|
||||||
l.Debugln("Started filesystem watcher for folder", f.Description())
|
f.sl.DebugContext(ctx, "Started filesystem watcher")
|
||||||
case err = <-errChan:
|
case err = <-errChan:
|
||||||
var next time.Duration
|
var next time.Duration
|
||||||
if dur := time.Since(lastWatch); dur > pause {
|
if dur := time.Since(lastWatch); dur > pause {
|
||||||
@@ -1148,9 +1147,9 @@ func (f *folder) scanOnWatchErr() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) setError(err error) {
|
func (f *folder) setError(ctx context.Context, err error) {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -1162,12 +1161,12 @@ func (f *folder) setError(err error) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if oldErr == nil {
|
if oldErr == nil {
|
||||||
f.sl.Warn("Error on folder", slogutil.Error(err))
|
f.sl.WarnContext(ctx, "Error on folder", slogutil.Error(err))
|
||||||
} else {
|
} else {
|
||||||
f.sl.Info("Folder error changed", slogutil.Error(err), slog.Any("previously", oldErr))
|
f.sl.InfoContext(ctx, "Folder error changed", slogutil.Error(err), slog.Any("previously", oldErr))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
f.sl.Info("Folder error cleared")
|
f.sl.InfoContext(ctx, "Folder error cleared")
|
||||||
f.SchedulePull()
|
f.SchedulePull()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1324,7 +1323,7 @@ func (f *folder) emitDiskChangeEvents(fs []protocol.FileInfo, typeOfEvent events
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *folder) handleForcedRescans() error {
|
func (f *folder) handleForcedRescans(ctx context.Context) error {
|
||||||
f.forcedRescanPathsMut.Lock()
|
f.forcedRescanPathsMut.Lock()
|
||||||
paths := make([]string, 0, len(f.forcedRescanPaths))
|
paths := make([]string, 0, len(f.forcedRescanPaths))
|
||||||
for path := range f.forcedRescanPaths {
|
for path := range f.forcedRescanPaths {
|
||||||
@@ -1360,7 +1359,7 @@ func (f *folder) handleForcedRescans() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.scanSubdirs(paths)
|
return f.scanSubdirs(ctx, paths)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The exists function is expected to return true for all known paths
|
// The exists function is expected to return true for all known paths
|
||||||
@@ -1377,7 +1376,7 @@ func unifySubs(dirs []string, exists func(dir string) bool) []string {
|
|||||||
for i := 0; i < len(dirs); {
|
for i := 0; i < len(dirs); {
|
||||||
dir, err := fs.Canonicalize(dirs[i])
|
dir, err := fs.Canonicalize(dirs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("Skipping %v for scan: %s", dirs[i], err)
|
slog.Debug("Skipping directory for scan", slog.String("dir", dirs[i]), slogutil.Error(err))
|
||||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -39,8 +40,8 @@ func (f *receiveEncryptedFolder) Revert() {
|
|||||||
f.doInSync(f.revert)
|
f.doInSync(f.revert)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *receiveEncryptedFolder) revert() error {
|
func (f *receiveEncryptedFolder) revert(ctx context.Context) error {
|
||||||
f.sl.Info("Reverting unexpected items")
|
f.sl.InfoContext(ctx, "Reverting unexpected items")
|
||||||
|
|
||||||
f.setState(FolderScanning)
|
f.setState(FolderScanning)
|
||||||
defer f.setState(FolderIdle)
|
defer f.setState(FolderIdle)
|
||||||
@@ -84,7 +85,7 @@ func (f *receiveEncryptedFolder) revert() error {
|
|||||||
batch.Append(fi)
|
batch.Append(fi)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.revertHandleDirs(dirs)
|
f.revertHandleDirs(ctx, dirs)
|
||||||
|
|
||||||
if err := batch.Flush(); err != nil {
|
if err := batch.Flush(); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -96,13 +97,13 @@ func (f *receiveEncryptedFolder) revert() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *receiveEncryptedFolder) revertHandleDirs(dirs []string) {
|
func (f *receiveEncryptedFolder) revertHandleDirs(ctx context.Context, dirs []string) {
|
||||||
if len(dirs) == 0 {
|
if len(dirs) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
scanChan := make(chan string)
|
scanChan := make(chan string)
|
||||||
go f.pullScannerRoutine(scanChan)
|
go f.pullScannerRoutine(ctx, scanChan)
|
||||||
defer close(scanChan)
|
defer close(scanChan)
|
||||||
|
|
||||||
slices.SortFunc(dirs, func(a, b string) int {
|
slices.SortFunc(dirs, func(a, b string) int {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -69,14 +70,14 @@ func (f *receiveOnlyFolder) Revert() {
|
|||||||
f.doInSync(f.revert)
|
f.doInSync(f.revert)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *receiveOnlyFolder) revert() error {
|
func (f *receiveOnlyFolder) revert(ctx context.Context) error {
|
||||||
f.sl.Info("Reverting folder")
|
f.sl.InfoContext(ctx, "Reverting folder")
|
||||||
|
|
||||||
f.setState(FolderScanning)
|
f.setState(FolderScanning)
|
||||||
defer f.setState(FolderIdle)
|
defer f.setState(FolderIdle)
|
||||||
|
|
||||||
scanChan := make(chan string)
|
scanChan := make(chan string)
|
||||||
go f.pullScannerRoutine(scanChan)
|
go f.pullScannerRoutine(ctx, scanChan)
|
||||||
defer close(scanChan)
|
defer close(scanChan)
|
||||||
|
|
||||||
delQueue := &deleteQueue{
|
delQueue := &deleteQueue{
|
||||||
@@ -155,7 +156,7 @@ func (f *receiveOnlyFolder) revert() error {
|
|||||||
// Handle any queued directories
|
// Handle any queued directories
|
||||||
deleted, err := delQueue.flush()
|
deleted, err := delQueue.flush()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.sl.Warn("Failed to revert directories", slogutil.Error(err))
|
f.sl.WarnContext(ctx, "Failed to revert directories", slogutil.Error(err))
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
for _, dir := range deleted {
|
for _, dir := range deleted {
|
||||||
|
|||||||
@@ -407,8 +407,8 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
|
|||||||
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: "ro", Files: files}))
|
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: "ro", Files: files}))
|
||||||
|
|
||||||
// Ensure the pull to resolve conflicts (content identical) happened
|
// Ensure the pull to resolve conflicts (content identical) happened
|
||||||
must(t, f.doInSync(func() error {
|
must(t, f.doInSync(func(ctx context.Context) error {
|
||||||
f.pull()
|
f.pull(ctx)
|
||||||
return nil
|
return nil
|
||||||
}))
|
}))
|
||||||
|
|
||||||
@@ -456,7 +456,7 @@ func TestRecvOnlyRevertOwnID(t *testing.T) {
|
|||||||
// Monitor the outcome
|
// Monitor the outcome
|
||||||
sub := f.evLogger.Subscribe(events.LocalIndexUpdated)
|
sub := f.evLogger.Subscribe(events.LocalIndexUpdated)
|
||||||
defer sub.Unsubscribe()
|
defer sub.Unsubscribe()
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
go func() {
|
go func() {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -567,7 +567,7 @@ func setupKnownFiles(t *testing.T, ffs fs.Filesystem, data []byte) []protocol.Fi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
blocks, _ := scanner.Blocks(context.TODO(), bytes.NewReader(data), protocol.BlockSize(int64(len(data))), int64(len(data)), nil)
|
blocks, _ := scanner.Blocks(t.Context(), bytes.NewReader(data), protocol.BlockSize(int64(len(data))), int64(len(data)), nil)
|
||||||
knownFiles := []protocol.FileInfo{
|
knownFiles := []protocol.FileInfo{
|
||||||
{
|
{
|
||||||
Name: "knownDir",
|
Name: "knownDir",
|
||||||
|
|||||||
@@ -7,6 +7,8 @@
|
|||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/internal/itererr"
|
"github.com/syncthing/syncthing/internal/itererr"
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/events"
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
@@ -37,7 +39,7 @@ func (*sendOnlyFolder) PullErrors() []FileError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// pull checks need for files that only differ by metadata (no changes on disk)
|
// pull checks need for files that only differ by metadata (no changes on disk)
|
||||||
func (f *sendOnlyFolder) pull() (bool, error) {
|
func (f *sendOnlyFolder) pull(ctx context.Context) (bool, error) {
|
||||||
batch := NewFileInfoBatch(func(files []protocol.FileInfo) error {
|
batch := NewFileInfoBatch(func(files []protocol.FileInfo) error {
|
||||||
f.updateLocalsFromPulling(files)
|
f.updateLocalsFromPulling(files)
|
||||||
return nil
|
return nil
|
||||||
@@ -92,8 +94,8 @@ func (f *sendOnlyFolder) Override() {
|
|||||||
f.doInSync(f.override)
|
f.doInSync(f.override)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendOnlyFolder) override() error {
|
func (f *sendOnlyFolder) override(ctx context.Context) error {
|
||||||
f.sl.Info("Overriding global state ")
|
f.sl.InfoContext(ctx, "Overriding global state ")
|
||||||
|
|
||||||
f.setState(FolderScanning)
|
f.setState(FolderScanning)
|
||||||
defer f.setState(FolderIdle)
|
defer f.setState(FolderIdle)
|
||||||
|
|||||||
@@ -161,20 +161,20 @@ func newSendReceiveFolder(model *model, ignores *ignore.Matcher, cfg config.Fold
|
|||||||
|
|
||||||
// pull returns true if it manages to get all needed items from peers, i.e. get
|
// pull returns true if it manages to get all needed items from peers, i.e. get
|
||||||
// the device in sync with the global state.
|
// the device in sync with the global state.
|
||||||
func (f *sendReceiveFolder) pull() (bool, error) {
|
func (f *sendReceiveFolder) pull(ctx context.Context) (bool, error) {
|
||||||
l.Debugf("%v pulling", f)
|
f.sl.DebugContext(ctx, "Pulling")
|
||||||
|
|
||||||
scanChan := make(chan string)
|
scanChan := make(chan string)
|
||||||
go f.pullScannerRoutine(scanChan)
|
go f.pullScannerRoutine(ctx, scanChan)
|
||||||
defer func() {
|
defer func() {
|
||||||
close(scanChan)
|
close(scanChan)
|
||||||
f.setState(FolderIdle)
|
f.setState(FolderIdle)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
metricFolderPulls.WithLabelValues(f.ID).Inc()
|
metricFolderPulls.WithLabelValues(f.ID).Inc()
|
||||||
ctx, cancel := context.WithCancel(f.ctx)
|
pullCtx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
go addTimeUntilCancelled(ctx, metricFolderPullSeconds.WithLabelValues(f.ID))
|
go addTimeUntilCancelled(pullCtx, metricFolderPullSeconds.WithLabelValues(f.ID))
|
||||||
|
|
||||||
changed := 0
|
changed := 0
|
||||||
|
|
||||||
@@ -185,8 +185,8 @@ func (f *sendReceiveFolder) pull() (bool, error) {
|
|||||||
var err error
|
var err error
|
||||||
for tries := range maxPullerIterations {
|
for tries := range maxPullerIterations {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return false, f.ctx.Err()
|
return false, ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,12 +194,12 @@ func (f *sendReceiveFolder) pull() (bool, error) {
|
|||||||
// it to FolderSyncing during the last iteration.
|
// it to FolderSyncing during the last iteration.
|
||||||
f.setState(FolderSyncPreparing)
|
f.setState(FolderSyncPreparing)
|
||||||
|
|
||||||
changed, err = f.pullerIteration(scanChan)
|
changed, err = f.pullerIteration(ctx, scanChan)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Debugln(f, "changed", changed, "on try", tries+1)
|
f.sl.DebugContext(ctx, "Pull iteration completed", "changed", changed, "try", tries+1)
|
||||||
|
|
||||||
if changed == 0 {
|
if changed == 0 {
|
||||||
// No files were changed by the puller, so we are in
|
// No files were changed by the puller, so we are in
|
||||||
@@ -214,7 +214,7 @@ func (f *sendReceiveFolder) pull() (bool, error) {
|
|||||||
if pullErrNum > 0 {
|
if pullErrNum > 0 {
|
||||||
f.pullErrors = make([]FileError, 0, len(f.tempPullErrors))
|
f.pullErrors = make([]FileError, 0, len(f.tempPullErrors))
|
||||||
for path, err := range f.tempPullErrors {
|
for path, err := range f.tempPullErrors {
|
||||||
f.sl.Warn("Failed to sync", slogutil.FilePath(path), slogutil.Error(err))
|
f.sl.WarnContext(ctx, "Failed to sync", slogutil.FilePath(path), slogutil.Error(err))
|
||||||
f.pullErrors = append(f.pullErrors, FileError{
|
f.pullErrors = append(f.pullErrors, FileError{
|
||||||
Err: err,
|
Err: err,
|
||||||
Path: path,
|
Path: path,
|
||||||
@@ -238,7 +238,7 @@ func (f *sendReceiveFolder) pull() (bool, error) {
|
|||||||
// returns the number items that should have been synced (even those that
|
// returns the number items that should have been synced (even those that
|
||||||
// might have failed). One puller iteration handles all files currently
|
// might have failed). One puller iteration handles all files currently
|
||||||
// flagged as needed in the folder.
|
// flagged as needed in the folder.
|
||||||
func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error) {
|
func (f *sendReceiveFolder) pullerIteration(ctx context.Context, scanChan chan<- string) (int, error) {
|
||||||
f.errorsMut.Lock()
|
f.errorsMut.Lock()
|
||||||
f.tempPullErrors = make(map[string]string)
|
f.tempPullErrors = make(map[string]string)
|
||||||
f.errorsMut.Unlock()
|
f.errorsMut.Unlock()
|
||||||
@@ -253,7 +253,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error)
|
|||||||
var doneWg sync.WaitGroup
|
var doneWg sync.WaitGroup
|
||||||
var updateWg sync.WaitGroup
|
var updateWg sync.WaitGroup
|
||||||
|
|
||||||
l.Debugln(f, "copiers:", f.Copiers, "pullerPendingKiB:", f.PullerMaxPendingKiB)
|
f.sl.DebugContext(ctx, "Starting puller iteration", "copiers", f.Copiers, "pullerPendingKiB", f.PullerMaxPendingKiB)
|
||||||
|
|
||||||
updateWg.Add(1)
|
updateWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -266,7 +266,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error)
|
|||||||
copyWg.Add(1)
|
copyWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// copierRoutine finishes when copyChan is closed
|
// copierRoutine finishes when copyChan is closed
|
||||||
f.copierRoutine(copyChan, pullChan, finisherChan)
|
f.copierRoutine(ctx, copyChan, pullChan, finisherChan)
|
||||||
copyWg.Done()
|
copyWg.Done()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -274,18 +274,18 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error)
|
|||||||
pullWg.Add(1)
|
pullWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
// pullerRoutine finishes when pullChan is closed
|
// pullerRoutine finishes when pullChan is closed
|
||||||
f.pullerRoutine(pullChan, finisherChan)
|
f.pullerRoutine(ctx, pullChan, finisherChan)
|
||||||
pullWg.Done()
|
pullWg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
doneWg.Add(1)
|
doneWg.Add(1)
|
||||||
// finisherRoutine finishes when finisherChan is closed
|
// finisherRoutine finishes when finisherChan is closed
|
||||||
go func() {
|
go func() {
|
||||||
f.finisherRoutine(finisherChan, dbUpdateChan, scanChan)
|
f.finisherRoutine(ctx, finisherChan, dbUpdateChan, scanChan)
|
||||||
doneWg.Done()
|
doneWg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
changed, fileDeletions, dirDeletions, err := f.processNeeded(dbUpdateChan, copyChan, scanChan)
|
changed, fileDeletions, dirDeletions, err := f.processNeeded(ctx, dbUpdateChan, copyChan, scanChan)
|
||||||
|
|
||||||
// Signal copy and puller routines that we are done with the in data for
|
// Signal copy and puller routines that we are done with the in data for
|
||||||
// this iteration. Wait for them to finish.
|
// this iteration. Wait for them to finish.
|
||||||
@@ -300,7 +300,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error)
|
|||||||
doneWg.Wait()
|
doneWg.Wait()
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.processDeletions(fileDeletions, dirDeletions, dbUpdateChan, scanChan)
|
f.processDeletions(ctx, fileDeletions, dirDeletions, dbUpdateChan, scanChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for db updates and scan scheduling to complete
|
// Wait for db updates and scan scheduling to complete
|
||||||
@@ -312,7 +312,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error)
|
|||||||
return changed, err
|
return changed, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyChan chan<- copyBlocksState, scanChan chan<- string) (int, map[string]protocol.FileInfo, []protocol.FileInfo, error) {
|
func (f *sendReceiveFolder) processNeeded(ctx context.Context, dbUpdateChan chan<- dbUpdateJob, copyChan chan<- copyBlocksState, scanChan chan<- string) (int, map[string]protocol.FileInfo, []protocol.FileInfo, error) {
|
||||||
changed := 0
|
changed := 0
|
||||||
var dirDeletions []protocol.FileInfo
|
var dirDeletions []protocol.FileInfo
|
||||||
fileDeletions := map[string]protocol.FileInfo{}
|
fileDeletions := map[string]protocol.FileInfo{}
|
||||||
@@ -328,13 +328,13 @@ loop:
|
|||||||
return changed, nil, nil, err
|
return changed, nil, nil, err
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
break loop
|
break loop
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.IgnoreDelete && file.IsDeleted() {
|
if f.IgnoreDelete && file.IsDeleted() {
|
||||||
l.Debugln(f, "ignore file deletion (config)", file.FileName())
|
f.sl.DebugContext(ctx, "Ignoring file deletion per config", slogutil.FilePath(file.FileName()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -343,7 +343,7 @@ loop:
|
|||||||
switch {
|
switch {
|
||||||
case f.ignores.Match(file.Name).IsIgnored():
|
case f.ignores.Match(file.Name).IsIgnored():
|
||||||
file.SetIgnored()
|
file.SetIgnored()
|
||||||
l.Debugln(f, "Handling ignored file", file)
|
f.sl.DebugContext(ctx, "Handling ignored file", file.LogAttr())
|
||||||
dbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}
|
dbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}
|
||||||
|
|
||||||
case build.IsWindows && fs.WindowsInvalidFilename(file.Name) != nil:
|
case build.IsWindows && fs.WindowsInvalidFilename(file.Name) != nil:
|
||||||
@@ -409,17 +409,17 @@ loop:
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
file.SetUnsupported()
|
file.SetUnsupported()
|
||||||
l.Debugln(f, "Invalidating symlink (unsupported)", file.Name)
|
f.sl.DebugContext(ctx, "Invalidating unsupported symlink", slogutil.FilePath(file.Name))
|
||||||
dbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}
|
dbUpdateChan <- dbUpdateJob{file, dbUpdateInvalidate}
|
||||||
|
|
||||||
case file.IsDirectory() && !file.IsSymlink():
|
case file.IsDirectory() && !file.IsSymlink():
|
||||||
l.Debugln(f, "Handling directory", file.Name)
|
f.sl.DebugContext(ctx, "Handling directory", slogutil.FilePath(file.Name))
|
||||||
if f.checkParent(file.Name, scanChan) {
|
if f.checkParent(file.Name, scanChan) {
|
||||||
f.handleDir(file, dbUpdateChan, scanChan)
|
f.handleDir(file, dbUpdateChan, scanChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
case file.IsSymlink():
|
case file.IsSymlink():
|
||||||
l.Debugln(f, "Handling symlink", file.Name)
|
f.sl.DebugContext(ctx, "Handling symlink", slogutil.FilePath(file.Name))
|
||||||
if f.checkParent(file.Name, scanChan) {
|
if f.checkParent(file.Name, scanChan) {
|
||||||
f.handleSymlink(file, dbUpdateChan, scanChan)
|
f.handleSymlink(file, dbUpdateChan, scanChan)
|
||||||
}
|
}
|
||||||
@@ -430,8 +430,8 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return changed, nil, nil, f.ctx.Err()
|
return changed, nil, nil, ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,8 +440,8 @@ loop:
|
|||||||
nextFile:
|
nextFile:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return changed, fileDeletions, dirDeletions, f.ctx.Err()
|
return changed, fileDeletions, dirDeletions, ctx.Err()
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -481,7 +481,7 @@ nextFile:
|
|||||||
// map.
|
// map.
|
||||||
desired := fileDeletions[candidate.Name]
|
desired := fileDeletions[candidate.Name]
|
||||||
if err := f.renameFile(candidate, desired, fi, dbUpdateChan, scanChan); err != nil {
|
if err := f.renameFile(candidate, desired, fi, dbUpdateChan, scanChan); err != nil {
|
||||||
l.Debugf("rename shortcut for %s failed: %s", fi.Name, err.Error())
|
f.sl.DebugContext(ctx, "Rename shortcut failed", slogutil.FilePath(fi.Name), slogutil.Error(err))
|
||||||
// Failed to rename, try next one.
|
// Failed to rename, try next one.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -510,7 +510,7 @@ nextFile:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.handleFile(fi, copyChan); err != nil {
|
if err := f.handleFile(ctx, fi, copyChan); err != nil {
|
||||||
f.newPullError(fileName, err)
|
f.newPullError(fileName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -528,10 +528,10 @@ func popCandidate(buckets map[string][]protocol.FileInfo, key string) (protocol.
|
|||||||
return cands[0], true
|
return cands[0], true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.FileInfo, dirDeletions []protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
|
func (f *sendReceiveFolder) processDeletions(ctx context.Context, fileDeletions map[string]protocol.FileInfo, dirDeletions []protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
|
||||||
for _, file := range fileDeletions {
|
for _, file := range fileDeletions {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -542,13 +542,13 @@ func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.F
|
|||||||
// Process in reverse order to delete depth first
|
// Process in reverse order to delete depth first
|
||||||
for i := range dirDeletions {
|
for i := range dirDeletions {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := dirDeletions[len(dirDeletions)-i-1]
|
dir := dirDeletions[len(dirDeletions)-i-1]
|
||||||
l.Debugln(f, "Deleting dir", dir.Name)
|
f.sl.DebugContext(ctx, "Deleting directory", slogutil.FilePath(dir.Name))
|
||||||
f.deleteDir(dir, dbUpdateChan, scanChan)
|
f.deleteDir(dir, dbUpdateChan, scanChan)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -708,10 +708,10 @@ func (f *sendReceiveFolder) checkParent(file string, scanChan chan<- string) boo
|
|||||||
// Encrypted files have made-up filenames with two synthetic parent
|
// Encrypted files have made-up filenames with two synthetic parent
|
||||||
// directories which don't have any meaning. Create those if necessary.
|
// directories which don't have any meaning. Create those if necessary.
|
||||||
if _, err := f.mtimefs.Lstat(parent); !fs.IsNotExist(err) {
|
if _, err := f.mtimefs.Lstat(parent); !fs.IsNotExist(err) {
|
||||||
l.Debugf("%v parent not missing %v", f, file)
|
f.sl.Debug("Parent directory exists", slogutil.FilePath(file))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
l.Debugf("%v creating parent directory of %v", f, file)
|
f.sl.Debug("Creating parent directory", slogutil.FilePath(file))
|
||||||
if err := f.mtimefs.MkdirAll(parent, 0o755); err != nil {
|
if err := f.mtimefs.MkdirAll(parent, 0o755); err != nil {
|
||||||
f.newPullError(file, fmt.Errorf("creating parent dir: %w", err))
|
f.newPullError(file, fmt.Errorf("creating parent dir: %w", err))
|
||||||
return false
|
return false
|
||||||
@@ -880,7 +880,7 @@ func (f *sendReceiveFolder) deleteFileWithCurrent(file, cur protocol.FileInfo, h
|
|||||||
// care not declare another err.
|
// care not declare another err.
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
l.Debugln(f, "Deleting file or symlink", file.Name)
|
f.sl.Debug("Deleting file or symlink", slogutil.FilePath(file.Name))
|
||||||
|
|
||||||
f.evLogger.Log(events.ItemStarted, map[string]string{
|
f.evLogger.Log(events.ItemStarted, map[string]string{
|
||||||
"folder": f.folderID,
|
"folder": f.folderID,
|
||||||
@@ -1001,7 +1001,7 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
|
|||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
|
|
||||||
l.Debugln(f, "taking rename shortcut", source.Name, "->", target.Name)
|
f.sl.Debug("Taking rename shortcut", "from", source.Name, "to", target.Name)
|
||||||
|
|
||||||
// Check that source is compatible with what we have in the DB
|
// Check that source is compatible with what we have in the DB
|
||||||
if err = f.checkToBeDeleted(source, cur, true, scanChan); err != nil {
|
if err = f.checkToBeDeleted(source, cur, true, scanChan); err != nil {
|
||||||
@@ -1130,7 +1130,7 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
|
|||||||
|
|
||||||
// handleFile queues the copies and pulls as necessary for a single new or
|
// handleFile queues the copies and pulls as necessary for a single new or
|
||||||
// changed file.
|
// changed file.
|
||||||
func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState) error {
|
func (f *sendReceiveFolder) handleFile(ctx context.Context, file protocol.FileInfo, copyChan chan<- copyBlocksState) error {
|
||||||
curFile, hasCurFile, err := f.model.sdb.GetDeviceFile(f.folderID, protocol.LocalDeviceID, file.Name)
|
curFile, hasCurFile, err := f.model.sdb.GetDeviceFile(f.folderID, protocol.LocalDeviceID, file.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1146,7 +1146,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
|||||||
reused := make([]int, 0, len(file.Blocks))
|
reused := make([]int, 0, len(file.Blocks))
|
||||||
|
|
||||||
if f.Type != config.FolderTypeReceiveEncrypted {
|
if f.Type != config.FolderTypeReceiveEncrypted {
|
||||||
blocks, reused = f.reuseBlocks(blocks, reused, file, tempName)
|
blocks, reused = f.reuseBlocks(ctx, blocks, reused, file, tempName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The sharedpullerstate will know which flags to use when opening the
|
// The sharedpullerstate will know which flags to use when opening the
|
||||||
@@ -1170,7 +1170,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
|||||||
|
|
||||||
s := newSharedPullerState(file, f.mtimefs, f.folderID, tempName, blocks, reused, f.IgnorePerms || file.NoPermissions, hasCurFile, curFile, !f.DisableSparseFiles, !f.DisableFsync)
|
s := newSharedPullerState(file, f.mtimefs, f.folderID, tempName, blocks, reused, f.IgnorePerms || file.NoPermissions, hasCurFile, curFile, !f.DisableSparseFiles, !f.DisableFsync)
|
||||||
|
|
||||||
l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), len(reused))
|
f.sl.DebugContext(ctx, "Handling file", slogutil.FilePath(file.Name), "blocksToCopy", len(blocks), "reused", len(reused))
|
||||||
|
|
||||||
cs := copyBlocksState{
|
cs := copyBlocksState{
|
||||||
sharedPullerState: s,
|
sharedPullerState: s,
|
||||||
@@ -1181,15 +1181,15 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) reuseBlocks(blocks []protocol.BlockInfo, reused []int, file protocol.FileInfo, tempName string) ([]protocol.BlockInfo, []int) {
|
func (f *sendReceiveFolder) reuseBlocks(ctx context.Context, blocks []protocol.BlockInfo, reused []int, file protocol.FileInfo, tempName string) ([]protocol.BlockInfo, []int) {
|
||||||
// Check for an old temporary file which might have some blocks we could
|
// Check for an old temporary file which might have some blocks we could
|
||||||
// reuse.
|
// reuse.
|
||||||
tempBlocks, err := scanner.HashFile(f.ctx, f.ID, f.mtimefs, tempName, file.BlockSize(), nil)
|
tempBlocks, err := scanner.HashFile(ctx, f.ID, f.mtimefs, tempName, file.BlockSize(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var caseErr *fs.CaseConflictError
|
var caseErr *fs.CaseConflictError
|
||||||
if errors.As(err, &caseErr) {
|
if errors.As(err, &caseErr) {
|
||||||
if rerr := f.mtimefs.Rename(caseErr.Real, tempName); rerr == nil {
|
if rerr := f.mtimefs.Rename(caseErr.Real, tempName); rerr == nil {
|
||||||
tempBlocks, err = scanner.HashFile(f.ctx, f.ID, f.mtimefs, tempName, file.BlockSize(), nil)
|
tempBlocks, err = scanner.HashFile(ctx, f.ID, f.mtimefs, tempName, file.BlockSize(), nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1262,7 +1262,7 @@ func populateOffsets(blocks []protocol.BlockInfo) {
|
|||||||
// shortcutFile sets file metadata, when that's the only thing that has
|
// shortcutFile sets file metadata, when that's the only thing that has
|
||||||
// changed.
|
// changed.
|
||||||
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob) {
|
func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob) {
|
||||||
l.Debugln(f, "taking shortcut on", file.Name)
|
f.sl.Debug("Taking metadata shortcut", slogutil.FilePath(file.Name))
|
||||||
|
|
||||||
f.evLogger.Log(events.ItemStarted, map[string]string{
|
f.evLogger.Log(events.ItemStarted, map[string]string{
|
||||||
"folder": f.folderID,
|
"folder": f.folderID,
|
||||||
@@ -1330,7 +1330,7 @@ func (f *sendReceiveFolder) shortcutFile(file protocol.FileInfo, dbUpdateChan ch
|
|||||||
|
|
||||||
// copierRoutine reads copierStates until the in channel closes and performs
|
// copierRoutine reads copierStates until the in channel closes and performs
|
||||||
// the relevant copies when possible, or passes it to the puller routine.
|
// the relevant copies when possible, or passes it to the puller routine.
|
||||||
func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
func (f *sendReceiveFolder) copierRoutine(ctx context.Context, in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
||||||
otherFolderFilesystems := make(map[string]fs.Filesystem)
|
otherFolderFilesystems := make(map[string]fs.Filesystem)
|
||||||
for folder, cfg := range f.model.cfg.Folders() {
|
for folder, cfg := range f.model.cfg.Folders() {
|
||||||
if folder == f.ID {
|
if folder == f.ID {
|
||||||
@@ -1349,8 +1349,8 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
|
|||||||
blocks:
|
blocks:
|
||||||
for _, block := range state.blocks {
|
for _, block := range state.blocks {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
state.fail(fmt.Errorf("folder stopped: %w", f.ctx.Err()))
|
state.fail(fmt.Errorf("folder stopped: %w", ctx.Err()))
|
||||||
break blocks
|
break blocks
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -1368,7 +1368,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.copyBlock(block, state, otherFolderFilesystems) {
|
if f.copyBlock(ctx, block, state, otherFolderFilesystems) {
|
||||||
state.copyDone(block)
|
state.copyDone(block)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -1397,13 +1397,13 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns true when the block was successfully copied.
|
// Returns true when the block was successfully copied.
|
||||||
func (f *sendReceiveFolder) copyBlock(block protocol.BlockInfo, state copyBlocksState, otherFolderFilesystems map[string]fs.Filesystem) bool {
|
func (f *sendReceiveFolder) copyBlock(ctx context.Context, block protocol.BlockInfo, state copyBlocksState, otherFolderFilesystems map[string]fs.Filesystem) bool {
|
||||||
buf := protocol.BufferPool.Get(block.Size)
|
buf := protocol.BufferPool.Get(block.Size)
|
||||||
defer protocol.BufferPool.Put(buf)
|
defer protocol.BufferPool.Put(buf)
|
||||||
|
|
||||||
// Hope that it's usually in the same folder, so start with that
|
// Hope that it's usually in the same folder, so start with that
|
||||||
// one. Also possibly more efficient copy (same filesystem).
|
// one. Also possibly more efficient copy (same filesystem).
|
||||||
if f.copyBlockFromFolder(f.ID, block, state, f.mtimefs, buf) {
|
if f.copyBlockFromFolder(ctx, f.ID, block, state, f.mtimefs, buf) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if state.failed() != nil {
|
if state.failed() != nil {
|
||||||
@@ -1411,7 +1411,7 @@ func (f *sendReceiveFolder) copyBlock(block protocol.BlockInfo, state copyBlocks
|
|||||||
}
|
}
|
||||||
|
|
||||||
for folderID, ffs := range otherFolderFilesystems {
|
for folderID, ffs := range otherFolderFilesystems {
|
||||||
if f.copyBlockFromFolder(folderID, block, state, ffs, buf) {
|
if f.copyBlockFromFolder(ctx, folderID, block, state, ffs, buf) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if state.failed() != nil {
|
if state.failed() != nil {
|
||||||
@@ -1424,17 +1424,17 @@ func (f *sendReceiveFolder) copyBlock(block protocol.BlockInfo, state copyBlocks
|
|||||||
|
|
||||||
// Returns true when the block was successfully copied.
|
// Returns true when the block was successfully copied.
|
||||||
// The passed buffer must be large enough to accommodate the block.
|
// The passed buffer must be large enough to accommodate the block.
|
||||||
func (f *sendReceiveFolder) copyBlockFromFolder(folderID string, block protocol.BlockInfo, state copyBlocksState, ffs fs.Filesystem, buf []byte) bool {
|
func (f *sendReceiveFolder) copyBlockFromFolder(ctx context.Context, folderID string, block protocol.BlockInfo, state copyBlocksState, ffs fs.Filesystem, buf []byte) bool {
|
||||||
for e, err := range itererr.Zip(f.model.sdb.AllLocalBlocksWithHash(folderID, block.Hash)) {
|
for e, err := range itererr.Zip(f.model.sdb.AllLocalBlocksWithHash(folderID, block.Hash)) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We just ignore this and continue pulling instead (though
|
// We just ignore this and continue pulling instead (though
|
||||||
// there's a good chance that will fail too, if the DB is
|
// there's a good chance that will fail too, if the DB is
|
||||||
// unhealthy).
|
// unhealthy).
|
||||||
l.Debugf("Failed to get information from DB about block %v in copier (folderID %v, file %v): %v", block.Hash, f.folderID, state.file.Name, err)
|
f.sl.DebugContext(ctx, "Failed to get block information from database", "blockHash", block.Hash, slogutil.FilePath(state.file.Name), slogutil.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.copyBlockFromFile(e.FileName, e.Offset, state, ffs, block, buf) {
|
if !f.copyBlockFromFile(ctx, e.FileName, e.Offset, state, ffs, block, buf) {
|
||||||
if state.failed() != nil {
|
if state.failed() != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -1454,17 +1454,17 @@ func (f *sendReceiveFolder) copyBlockFromFolder(folderID string, block protocol.
|
|||||||
|
|
||||||
// Returns true when the block was successfully copied.
|
// Returns true when the block was successfully copied.
|
||||||
// The passed buffer must be large enough to accommodate the block.
|
// The passed buffer must be large enough to accommodate the block.
|
||||||
func (f *sendReceiveFolder) copyBlockFromFile(srcName string, srcOffset int64, state copyBlocksState, ffs fs.Filesystem, block protocol.BlockInfo, buf []byte) bool {
|
func (f *sendReceiveFolder) copyBlockFromFile(ctx context.Context, srcName string, srcOffset int64, state copyBlocksState, ffs fs.Filesystem, block protocol.BlockInfo, buf []byte) bool {
|
||||||
fd, err := ffs.Open(srcName)
|
fd, err := ffs.Open(srcName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("Failed to open file %v trying to copy block %v (folderID %v): %v", srcName, block.Hash, f.folderID, err)
|
f.sl.DebugContext(ctx, "Failed to open source file for block copy", slogutil.FilePath(srcName), "blockHash", block.Hash, slogutil.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
|
|
||||||
_, err = fd.ReadAt(buf, srcOffset)
|
_, err = fd.ReadAt(buf, srcOffset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("Failed to read block from file %v in copier (folderID: %v, hash: %v): %v", srcName, f.folderID, block.Hash, err)
|
f.sl.DebugContext(ctx, "Failed to read block from file", slogutil.FilePath(srcName), "blockHash", block.Hash, slogutil.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1473,7 +1473,7 @@ func (f *sendReceiveFolder) copyBlockFromFile(srcName string, srcOffset int64, s
|
|||||||
// trust. (The other side can and will verify.)
|
// trust. (The other side can and will verify.)
|
||||||
if f.Type != config.FolderTypeReceiveEncrypted {
|
if f.Type != config.FolderTypeReceiveEncrypted {
|
||||||
if err := f.verifyBuffer(buf, block); err != nil {
|
if err := f.verifyBuffer(buf, block); err != nil {
|
||||||
l.Debugf("Failed to verify buffer in copier (folderID: %v): %v", f.folderID, err)
|
f.sl.DebugContext(ctx, "Failed to verify block buffer", slogutil.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1485,13 +1485,13 @@ func (f *sendReceiveFolder) copyBlockFromFile(srcName string, srcOffset int64, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
if f.CopyRangeMethod != config.CopyRangeMethodStandard {
|
if f.CopyRangeMethod != config.CopyRangeMethodStandard {
|
||||||
err = f.withLimiter(func() error {
|
err = f.withLimiter(ctx, func() error {
|
||||||
dstFd.mut.Lock()
|
dstFd.mut.Lock()
|
||||||
defer dstFd.mut.Unlock()
|
defer dstFd.mut.Unlock()
|
||||||
return fs.CopyRange(f.CopyRangeMethod.ToFS(), fd, dstFd.fd, srcOffset, block.Offset, int64(block.Size))
|
return fs.CopyRange(f.CopyRangeMethod.ToFS(), fd, dstFd.fd, srcOffset, block.Offset, int64(block.Size))
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
err = f.limitedWriteAt(dstFd, buf, block.Offset)
|
err = f.limitedWriteAt(ctx, dstFd, buf, block.Offset)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
state.fail(fmt.Errorf("dst write: %w", err))
|
state.fail(fmt.Errorf("dst write: %w", err))
|
||||||
@@ -1513,7 +1513,7 @@ func (*sendReceiveFolder) verifyBuffer(buf []byte, block protocol.BlockInfo) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
func (f *sendReceiveFolder) pullerRoutine(ctx context.Context, in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
||||||
requestLimiter := semaphore.New(f.PullerMaxPendingKiB * 1024)
|
requestLimiter := semaphore.New(f.PullerMaxPendingKiB * 1024)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
@@ -1531,7 +1531,7 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
|
|||||||
|
|
||||||
bytes := state.block.Size
|
bytes := state.block.Size
|
||||||
|
|
||||||
if err := requestLimiter.TakeWithContext(f.ctx, bytes); err != nil {
|
if err := requestLimiter.TakeWithContext(ctx, bytes); err != nil {
|
||||||
state.fail(err)
|
state.fail(err)
|
||||||
out <- state.sharedPullerState
|
out <- state.sharedPullerState
|
||||||
continue
|
continue
|
||||||
@@ -1543,13 +1543,13 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer requestLimiter.Give(bytes)
|
defer requestLimiter.Give(bytes)
|
||||||
|
|
||||||
f.pullBlock(state, out)
|
f.pullBlock(ctx, state, out)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPullerState) {
|
func (f *sendReceiveFolder) pullBlock(ctx context.Context, state pullBlockState, out chan<- *sharedPullerState) {
|
||||||
// Get an fd to the temporary file. Technically we don't need it until
|
// Get an fd to the temporary file. Technically we don't need it until
|
||||||
// after fetching the block, but if we run into an error here there is
|
// after fetching the block, but if we run into an error here there is
|
||||||
// no point in issuing the request to the network.
|
// no point in issuing the request to the network.
|
||||||
@@ -1572,8 +1572,8 @@ func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPu
|
|||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-f.ctx.Done():
|
case <-ctx.Done():
|
||||||
state.fail(fmt.Errorf("folder stopped: %w", f.ctx.Err()))
|
state.fail(fmt.Errorf("folder stopped: %w", ctx.Err()))
|
||||||
break loop
|
break loop
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -1600,10 +1600,10 @@ loop:
|
|||||||
activity.using(selected)
|
activity.using(selected)
|
||||||
var buf []byte
|
var buf []byte
|
||||||
blockNo := int(state.block.Offset / int64(state.file.BlockSize()))
|
blockNo := int(state.block.Offset / int64(state.file.BlockSize()))
|
||||||
buf, lastError = f.model.RequestGlobal(f.ctx, selected.ID, f.folderID, state.file.Name, blockNo, state.block.Offset, state.block.Size, state.block.Hash, selected.FromTemporary)
|
buf, lastError = f.model.RequestGlobal(ctx, selected.ID, f.folderID, state.file.Name, blockNo, state.block.Offset, state.block.Size, state.block.Hash, selected.FromTemporary)
|
||||||
activity.done(selected)
|
activity.done(selected)
|
||||||
if lastError != nil {
|
if lastError != nil {
|
||||||
l.Debugln("request:", f.folderID, state.file.Name, state.block.Offset, state.block.Size, selected.ID.Short(), "returned error:", lastError)
|
f.sl.DebugContext(ctx, "Block request returned error", slogutil.FilePath(state.file.Name), "offset", state.block.Offset, "size", state.block.Size, "device", selected.ID.Short(), slogutil.Error(lastError))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1617,12 +1617,12 @@ loop:
|
|||||||
lastError = f.verifyBuffer(buf, state.block)
|
lastError = f.verifyBuffer(buf, state.block)
|
||||||
}
|
}
|
||||||
if lastError != nil {
|
if lastError != nil {
|
||||||
l.Debugln("request:", f.folderID, state.file.Name, state.block.Offset, state.block.Size, "hash mismatch")
|
f.sl.DebugContext(ctx, "Block hash mismatch", slogutil.FilePath(state.file.Name), "offset", state.block.Offset, "size", state.block.Size)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the block data we got from the cluster
|
// Save the block data we got from the cluster
|
||||||
err = f.limitedWriteAt(fd, buf, state.block.Offset)
|
err = f.limitedWriteAt(ctx, fd, buf, state.block.Offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
state.fail(fmt.Errorf("save: %w", err))
|
state.fail(fmt.Errorf("save: %w", err))
|
||||||
} else {
|
} else {
|
||||||
@@ -1687,10 +1687,10 @@ func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCu
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
|
func (f *sendReceiveFolder) finisherRoutine(ctx context.Context, in <-chan *sharedPullerState, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
|
||||||
for state := range in {
|
for state := range in {
|
||||||
if closed, err := state.finalClose(); closed {
|
if closed, err := state.finalClose(); closed {
|
||||||
l.Debugln(f, "closing", state.file.Name)
|
f.sl.DebugContext(ctx, "Closing temp file", slogutil.FilePath(state.file.Name))
|
||||||
|
|
||||||
f.queue.Done(state.file.Name)
|
f.queue.Done(state.file.Name)
|
||||||
|
|
||||||
@@ -1701,7 +1701,7 @@ func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState, dbUpda
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
f.newPullError(state.file.Name, fmt.Errorf("finishing: %w", err))
|
f.newPullError(state.file.Name, fmt.Errorf("finishing: %w", err))
|
||||||
} else {
|
} else {
|
||||||
slog.Info("Synced file", f.LogAttr(), state.file.LogAttr(), slog.Group("blocks", slog.Int("local", state.reused+state.copyTotal), slog.Int("download", state.pullTotal)))
|
slog.InfoContext(ctx, "Synced file", f.LogAttr(), state.file.LogAttr(), slog.Group("blocks", slog.Int("local", state.reused+state.copyTotal), slog.Int("download", state.pullTotal)))
|
||||||
|
|
||||||
minBlocksPerBlock := state.file.BlockSize() / protocol.MinBlockSize
|
minBlocksPerBlock := state.file.BlockSize() / protocol.MinBlockSize
|
||||||
blockStatsMut.Lock()
|
blockStatsMut.Lock()
|
||||||
@@ -1756,11 +1756,11 @@ func (f *sendReceiveFolder) dbUpdaterRoutine(dbUpdateChan <-chan dbUpdateJob) {
|
|||||||
if !f.DisableFsync {
|
if !f.DisableFsync {
|
||||||
fd, err := f.mtimefs.Open(dir)
|
fd, err := f.mtimefs.Open(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugf("fsync %q failed: %v", dir, err)
|
f.sl.Debug("Fsync failed", slogutil.FilePath(dir), slogutil.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := fd.Sync(); err != nil {
|
if err := fd.Sync(); err != nil {
|
||||||
l.Debugf("fsync %q failed: %v", dir, err)
|
f.sl.Debug("Fsync failed", slogutil.FilePath(dir), slogutil.Error(err))
|
||||||
}
|
}
|
||||||
fd.Close()
|
fd.Close()
|
||||||
}
|
}
|
||||||
@@ -1832,7 +1832,7 @@ loop:
|
|||||||
|
|
||||||
// pullScannerRoutine aggregates paths to be scanned after pulling. The scan is
|
// pullScannerRoutine aggregates paths to be scanned after pulling. The scan is
|
||||||
// scheduled once when scanChan is closed (scanning can not happen during pulling).
|
// scheduled once when scanChan is closed (scanning can not happen during pulling).
|
||||||
func (f *sendReceiveFolder) pullScannerRoutine(scanChan <-chan string) {
|
func (f *sendReceiveFolder) pullScannerRoutine(ctx context.Context, scanChan <-chan string) {
|
||||||
toBeScanned := make(map[string]struct{})
|
toBeScanned := make(map[string]struct{})
|
||||||
|
|
||||||
for path := range scanChan {
|
for path := range scanChan {
|
||||||
@@ -1842,7 +1842,7 @@ func (f *sendReceiveFolder) pullScannerRoutine(scanChan <-chan string) {
|
|||||||
if len(toBeScanned) != 0 {
|
if len(toBeScanned) != 0 {
|
||||||
scanList := make([]string, 0, len(toBeScanned))
|
scanList := make([]string, 0, len(toBeScanned))
|
||||||
for path := range toBeScanned {
|
for path := range toBeScanned {
|
||||||
l.Debugln(f, "scheduling scan after pulling for", path)
|
slog.DebugContext(ctx, "Scheduling scan after pulling", slogutil.FilePath(path))
|
||||||
scanList = append(scanList, path)
|
scanList = append(scanList, path)
|
||||||
}
|
}
|
||||||
f.Scan(scanList)
|
f.Scan(scanList)
|
||||||
@@ -1883,7 +1883,7 @@ func (f *sendReceiveFolder) moveForConflict(name, lastModBy string, scanChan cha
|
|||||||
})
|
})
|
||||||
for _, match := range matches[f.MaxConflicts:] {
|
for _, match := range matches[f.MaxConflicts:] {
|
||||||
if gerr := f.mtimefs.Remove(match); gerr != nil {
|
if gerr := f.mtimefs.Remove(match); gerr != nil {
|
||||||
l.Debugln(f, "removing extra conflict", gerr)
|
f.sl.Debug("Failed to remove extra conflict copy", slogutil.Error(gerr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1895,7 +1895,7 @@ func (f *sendReceiveFolder) moveForConflict(name, lastModBy string, scanChan cha
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) newPullError(path string, err error) {
|
func (f *sendReceiveFolder) newPullError(path string, err error) {
|
||||||
if errors.Is(err, f.ctx.Err()) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
// Error because the folder stopped - no point logging/tracking
|
// Error because the folder stopped - no point logging/tracking
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1916,7 +1916,7 @@ func (f *sendReceiveFolder) newPullError(path string, err error) {
|
|||||||
errStr := fmt.Sprintf("syncing: %s", err)
|
errStr := fmt.Sprintf("syncing: %s", err)
|
||||||
f.tempPullErrors[path] = errStr
|
f.tempPullErrors[path] = errStr
|
||||||
|
|
||||||
l.Debugf("%v new error for %v: %v", f, path, err)
|
f.sl.Debug("New pull error", slogutil.FilePath(path), slogutil.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteItemOnDisk deletes the file represented by old that is about to be replaced by new.
|
// deleteItemOnDisk deletes the file represented by old that is about to be replaced by new.
|
||||||
@@ -2116,7 +2116,7 @@ func (f *sendReceiveFolder) scanIfItemChanged(name string, stat fs.FileInfo, ite
|
|||||||
// I.e. non-nil error status means "Do not delete!" or "is already deleted".
|
// I.e. non-nil error status means "Do not delete!" or "is already deleted".
|
||||||
func (f *sendReceiveFolder) checkToBeDeleted(file, cur protocol.FileInfo, hasCur bool, scanChan chan<- string) error {
|
func (f *sendReceiveFolder) checkToBeDeleted(file, cur protocol.FileInfo, hasCur bool, scanChan chan<- string) error {
|
||||||
if err := osutil.TraversesSymlink(f.mtimefs, filepath.Dir(file.Name)); err != nil {
|
if err := osutil.TraversesSymlink(f.mtimefs, filepath.Dir(file.Name)); err != nil {
|
||||||
l.Debugln(f, "not deleting item behind symlink on disk, but update db", file.Name)
|
f.sl.Debug("Not deleting item behind symlink on disk, but updating database", slogutil.FilePath(file.Name))
|
||||||
return fs.ErrNotExist
|
return fs.ErrNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2130,7 +2130,7 @@ func (f *sendReceiveFolder) checkToBeDeleted(file, cur protocol.FileInfo, hasCur
|
|||||||
scanChan <- file.Name
|
scanChan <- file.Name
|
||||||
return errModified
|
return errModified
|
||||||
}
|
}
|
||||||
l.Debugln(f, "not deleting item we don't have, but update db", file.Name)
|
f.sl.Debug("Not deleting item we don't have, but updating database", slogutil.FilePath(file.Name))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2144,7 +2144,7 @@ func (f *sendReceiveFolder) setPlatformData(file *protocol.FileInfo, name string
|
|||||||
if f.SyncXattrs {
|
if f.SyncXattrs {
|
||||||
// Set extended attributes.
|
// Set extended attributes.
|
||||||
if err := f.mtimefs.SetXattr(name, file.Platform.Xattrs(), f.XattrFilter); errors.Is(err, fs.ErrXattrsNotSupported) {
|
if err := f.mtimefs.SetXattr(name, file.Platform.Xattrs(), f.XattrFilter); errors.Is(err, fs.ErrXattrsNotSupported) {
|
||||||
l.Debugf("Cannot set xattrs on %q: %v", file.Name, err)
|
f.sl.Debug("Cannot set xattrs (not supported)", slogutil.FilePath(file.Name), slogutil.Error(err))
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -2185,15 +2185,15 @@ func (f *sendReceiveFolder) inWritableDir(fn func(string) error, path string) er
|
|||||||
return inWritableDir(fn, f.mtimefs, path, f.IgnorePerms)
|
return inWritableDir(fn, f.mtimefs, path, f.IgnorePerms)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) limitedWriteAt(fd io.WriterAt, data []byte, offset int64) error {
|
func (f *sendReceiveFolder) limitedWriteAt(ctx context.Context, fd io.WriterAt, data []byte, offset int64) error {
|
||||||
return f.withLimiter(func() error {
|
return f.withLimiter(ctx, func() error {
|
||||||
_, err := fd.WriteAt(data, offset)
|
_, err := fd.WriteAt(data, offset)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *sendReceiveFolder) withLimiter(fn func() error) error {
|
func (f *sendReceiveFolder) withLimiter(ctx context.Context, fn func() error) error {
|
||||||
if err := f.writeLimiter.TakeWithContext(f.ctx, 1); err != nil {
|
if err := f.writeLimiter.TakeWithContext(ctx, 1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.writeLimiter.Give(1)
|
defer f.writeLimiter.Give(1)
|
||||||
@@ -2235,7 +2235,7 @@ func existingConflicts(name string, fs fs.Filesystem) []string {
|
|||||||
ext := filepath.Ext(name)
|
ext := filepath.Ext(name)
|
||||||
matches, err := fs.Glob(name[:len(name)-len(ext)] + ".sync-conflict-????????-??????*" + ext)
|
matches, err := fs.Glob(name[:len(name)-len(ext)] + ".sync-conflict-????????-??????*" + ext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("globbing for conflicts", err)
|
slog.Debug("Globbing for conflicts failed", slogutil.Error(err))
|
||||||
}
|
}
|
||||||
return matches
|
return matches
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -65,8 +65,6 @@ func prepareTmpFile(to fs.Filesystem) (string, error) {
|
|||||||
return tmpName, nil
|
return tmpName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var folders = []string{"default"}
|
|
||||||
|
|
||||||
var diffTestData = []struct {
|
var diffTestData = []struct {
|
||||||
a string
|
a string
|
||||||
b string
|
b string
|
||||||
@@ -112,8 +110,8 @@ func createEmptyFileInfo(t *testing.T, name string, fs fs.Filesystem) protocol.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sets up a folder and model, but makes sure the services aren't actually running.
|
// Sets up a folder and model, but makes sure the services aren't actually running.
|
||||||
func setupSendReceiveFolder(t testing.TB, files ...protocol.FileInfo) (*testModel, *sendReceiveFolder, context.CancelFunc) {
|
func setupSendReceiveFolder(t testing.TB, files ...protocol.FileInfo) (*testModel, *sendReceiveFolder) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
// Initialise model and stop immediately.
|
// Initialise model and stop immediately.
|
||||||
model := setupModel(t, w)
|
model := setupModel(t, w)
|
||||||
model.cancel()
|
model.cancel()
|
||||||
@@ -121,14 +119,13 @@ func setupSendReceiveFolder(t testing.TB, files ...protocol.FileInfo) (*testMode
|
|||||||
r, _ := model.folderRunners.Get(fcfg.ID)
|
r, _ := model.folderRunners.Get(fcfg.ID)
|
||||||
f := r.(*sendReceiveFolder)
|
f := r.(*sendReceiveFolder)
|
||||||
f.tempPullErrors = make(map[string]string)
|
f.tempPullErrors = make(map[string]string)
|
||||||
f.ctx = context.Background()
|
|
||||||
|
|
||||||
// Update index
|
// Update index
|
||||||
if files != nil {
|
if files != nil {
|
||||||
f.updateLocalsFromScanning(files)
|
f.updateLocalsFromScanning(files)
|
||||||
}
|
}
|
||||||
|
|
||||||
return model, f, wCancel
|
return model, f
|
||||||
}
|
}
|
||||||
|
|
||||||
// Layout of the files: (indexes from the above array)
|
// Layout of the files: (indexes from the above array)
|
||||||
@@ -146,12 +143,11 @@ func TestHandleFile(t *testing.T) {
|
|||||||
requiredFile := existingFile
|
requiredFile := existingFile
|
||||||
requiredFile.Blocks = blocks[1:]
|
requiredFile.Blocks = blocks[1:]
|
||||||
|
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t, existingFile)
|
_, f := setupSendReceiveFolder(t, existingFile)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
copyChan := make(chan copyBlocksState, 1)
|
copyChan := make(chan copyBlocksState, 1)
|
||||||
|
|
||||||
f.handleFile(requiredFile, copyChan)
|
f.handleFile(t.Context(), requiredFile, copyChan)
|
||||||
|
|
||||||
// Receive the results
|
// Receive the results
|
||||||
toCopy := <-copyChan
|
toCopy := <-copyChan
|
||||||
@@ -188,8 +184,7 @@ func TestHandleFileWithTemp(t *testing.T) {
|
|||||||
requiredFile := existingFile
|
requiredFile := existingFile
|
||||||
requiredFile.Blocks = blocks[1:]
|
requiredFile.Blocks = blocks[1:]
|
||||||
|
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t, existingFile)
|
_, f := setupSendReceiveFolder(t, existingFile)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
if _, err := prepareTmpFile(f.Filesystem()); err != nil {
|
if _, err := prepareTmpFile(f.Filesystem()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -197,7 +192,7 @@ func TestHandleFileWithTemp(t *testing.T) {
|
|||||||
|
|
||||||
copyChan := make(chan copyBlocksState, 1)
|
copyChan := make(chan copyBlocksState, 1)
|
||||||
|
|
||||||
f.handleFile(requiredFile, copyChan)
|
f.handleFile(t.Context(), requiredFile, copyChan)
|
||||||
|
|
||||||
// Receive the results
|
// Receive the results
|
||||||
toCopy := <-copyChan
|
toCopy := <-copyChan
|
||||||
@@ -238,8 +233,7 @@ func TestCopierFinder(t *testing.T) {
|
|||||||
requiredFile.Blocks = blocks[1:]
|
requiredFile.Blocks = blocks[1:]
|
||||||
requiredFile.Name = "file2"
|
requiredFile.Name = "file2"
|
||||||
|
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t, existingFile)
|
_, f := setupSendReceiveFolder(t, existingFile)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
if _, err := prepareTmpFile(f.Filesystem()); err != nil {
|
if _, err := prepareTmpFile(f.Filesystem()); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -250,10 +244,10 @@ func TestCopierFinder(t *testing.T) {
|
|||||||
finisherChan := make(chan *sharedPullerState, 1)
|
finisherChan := make(chan *sharedPullerState, 1)
|
||||||
|
|
||||||
// Run a single fetcher routine
|
// Run a single fetcher routine
|
||||||
go f.copierRoutine(copyChan, pullChan, finisherChan)
|
go f.copierRoutine(t.Context(), copyChan, pullChan, finisherChan)
|
||||||
defer close(copyChan)
|
defer close(copyChan)
|
||||||
|
|
||||||
f.handleFile(requiredFile, copyChan)
|
f.handleFile(t.Context(), requiredFile, copyChan)
|
||||||
|
|
||||||
timeout := time.After(10 * time.Second)
|
timeout := time.After(10 * time.Second)
|
||||||
pulls := make([]pullBlockState, 4)
|
pulls := make([]pullBlockState, 4)
|
||||||
@@ -302,7 +296,7 @@ func TestCopierFinder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the fetched blocks have actually been written to the temp file
|
// Verify that the fetched blocks have actually been written to the temp file
|
||||||
blks, err := scanner.HashFile(context.TODO(), f.ID, f.Filesystem(), tempFile, protocol.MinBlockSize, nil)
|
blks, err := scanner.HashFile(t.Context(), f.ID, f.Filesystem(), tempFile, protocol.MinBlockSize, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
}
|
}
|
||||||
@@ -319,8 +313,7 @@ func TestCopierCleanup(t *testing.T) {
|
|||||||
// Create a file
|
// Create a file
|
||||||
file := setupFile("test", []int{0})
|
file := setupFile("test", []int{0})
|
||||||
file.Size = 1
|
file.Size = 1
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t, file)
|
m, f := setupSendReceiveFolder(t, file)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
file.Blocks = []protocol.BlockInfo{blocks[1]}
|
file.Blocks = []protocol.BlockInfo{blocks[1]}
|
||||||
file.Version = file.Version.Update(myID.Short())
|
file.Version = file.Version.Update(myID.Short())
|
||||||
@@ -352,8 +345,7 @@ func TestCopierCleanup(t *testing.T) {
|
|||||||
func TestDeregisterOnFailInCopy(t *testing.T) {
|
func TestDeregisterOnFailInCopy(t *testing.T) {
|
||||||
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
|
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
|
||||||
|
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
// Set up our evet subscription early
|
// Set up our evet subscription early
|
||||||
s := m.evLogger.Subscribe(events.ItemFinished)
|
s := m.evLogger.Subscribe(events.ItemFinished)
|
||||||
@@ -371,8 +363,8 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
|||||||
finisherChan := make(chan *sharedPullerState)
|
finisherChan := make(chan *sharedPullerState)
|
||||||
dbUpdateChan := make(chan dbUpdateJob, 1)
|
dbUpdateChan := make(chan dbUpdateJob, 1)
|
||||||
|
|
||||||
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
|
copyChan, copyWg := startCopier(t.Context(), f, pullChan, finisherBufferChan)
|
||||||
go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string))
|
go f.finisherRoutine(t.Context(), finisherChan, dbUpdateChan, make(chan string))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
close(copyChan)
|
close(copyChan)
|
||||||
@@ -382,7 +374,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
|||||||
close(finisherChan)
|
close(finisherChan)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
f.handleFile(file, copyChan)
|
f.handleFile(t.Context(), file, copyChan)
|
||||||
|
|
||||||
// Receive a block at puller, to indicate that at least a single copier
|
// Receive a block at puller, to indicate that at least a single copier
|
||||||
// loop has been performed.
|
// loop has been performed.
|
||||||
@@ -451,8 +443,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
|||||||
func TestDeregisterOnFailInPull(t *testing.T) {
|
func TestDeregisterOnFailInPull(t *testing.T) {
|
||||||
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
|
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
|
||||||
|
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
// Set up our evet subscription early
|
// Set up our evet subscription early
|
||||||
s := m.evLogger.Subscribe(events.ItemFinished)
|
s := m.evLogger.Subscribe(events.ItemFinished)
|
||||||
@@ -470,14 +461,14 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
|||||||
finisherChan := make(chan *sharedPullerState)
|
finisherChan := make(chan *sharedPullerState)
|
||||||
dbUpdateChan := make(chan dbUpdateJob, 1)
|
dbUpdateChan := make(chan dbUpdateJob, 1)
|
||||||
|
|
||||||
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
|
copyChan, copyWg := startCopier(t.Context(), f, pullChan, finisherBufferChan)
|
||||||
var pullWg sync.WaitGroup
|
var pullWg sync.WaitGroup
|
||||||
pullWg.Add(1)
|
pullWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
f.pullerRoutine(pullChan, finisherBufferChan)
|
f.pullerRoutine(t.Context(), pullChan, finisherBufferChan)
|
||||||
pullWg.Done()
|
pullWg.Done()
|
||||||
}()
|
}()
|
||||||
go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string))
|
go f.finisherRoutine(t.Context(), finisherChan, dbUpdateChan, make(chan string))
|
||||||
defer func() {
|
defer func() {
|
||||||
// Unblock copier and puller
|
// Unblock copier and puller
|
||||||
go func() {
|
go func() {
|
||||||
@@ -492,7 +483,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
|||||||
close(finisherChan)
|
close(finisherChan)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
f.handleFile(file, copyChan)
|
f.handleFile(t.Context(), file, copyChan)
|
||||||
|
|
||||||
// Receive at finisher, we should error out as puller has nowhere to pull
|
// Receive at finisher, we should error out as puller has nowhere to pull
|
||||||
// from.
|
// from.
|
||||||
@@ -553,8 +544,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue3164(t *testing.T) {
|
func TestIssue3164(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
ignDir := filepath.Join("issue3164", "oktodelete")
|
ignDir := filepath.Join("issue3164", "oktodelete")
|
||||||
@@ -566,7 +556,7 @@ func TestIssue3164(t *testing.T) {
|
|||||||
Name: "issue3164",
|
Name: "issue3164",
|
||||||
}
|
}
|
||||||
|
|
||||||
must(t, f.scanSubdirs(nil))
|
must(t, f.scanSubdirs(t.Context(), nil))
|
||||||
|
|
||||||
matcher := ignore.New(ffs)
|
matcher := ignore.New(ffs)
|
||||||
must(t, matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""))
|
must(t, matcher.Parse(bytes.NewBufferString("(?d)oktodelete"), ""))
|
||||||
@@ -583,8 +573,8 @@ func TestIssue3164(t *testing.T) {
|
|||||||
|
|
||||||
func TestDiff(t *testing.T) {
|
func TestDiff(t *testing.T) {
|
||||||
for i, test := range diffTestData {
|
for i, test := range diffTestData {
|
||||||
a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil)
|
a, _ := scanner.Blocks(t.Context(), bytes.NewBufferString(test.a), test.s, -1, nil)
|
||||||
b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil)
|
b, _ := scanner.Blocks(t.Context(), bytes.NewBufferString(test.b), test.s, -1, nil)
|
||||||
_, d := blockDiff(a, b)
|
_, d := blockDiff(a, b)
|
||||||
if len(d) != len(test.d) {
|
if len(d) != len(test.d) {
|
||||||
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
||||||
@@ -604,9 +594,9 @@ func TestDiff(t *testing.T) {
|
|||||||
func BenchmarkDiff(b *testing.B) {
|
func BenchmarkDiff(b *testing.B) {
|
||||||
testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
|
testCases := make([]struct{ a, b []protocol.BlockInfo }, 0, len(diffTestData))
|
||||||
for _, test := range diffTestData {
|
for _, test := range diffTestData {
|
||||||
a, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.a), test.s, -1, nil)
|
aBlocks, _ := scanner.Blocks(b.Context(), bytes.NewBufferString(test.a), test.s, -1, nil)
|
||||||
b, _ := scanner.Blocks(context.TODO(), bytes.NewBufferString(test.b), test.s, -1, nil)
|
bBlocks, _ := scanner.Blocks(b.Context(), bytes.NewBufferString(test.b), test.s, -1, nil)
|
||||||
testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{a, b})
|
testCases = append(testCases, struct{ a, b []protocol.BlockInfo }{aBlocks, bBlocks})
|
||||||
}
|
}
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@@ -643,8 +633,7 @@ func TestDiffEmpty(t *testing.T) {
|
|||||||
// option is true and the permissions do not match between the file on disk and
|
// option is true and the permissions do not match between the file on disk and
|
||||||
// in the db.
|
// in the db.
|
||||||
func TestDeleteIgnorePerms(t *testing.T) {
|
func TestDeleteIgnorePerms(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
f.IgnorePerms = true
|
f.IgnorePerms = true
|
||||||
|
|
||||||
@@ -682,7 +671,7 @@ func TestCopyOwner(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// This test hung on a regression, taking a long time to fail - speed that up.
|
// This test hung on a regression, taking a long time to fail - speed that up.
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
ctx, cancel := context.WithTimeout(t.Context(), 2*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
@@ -695,8 +684,7 @@ func TestCopyOwner(t *testing.T) {
|
|||||||
// Set up a folder with the CopyParentOwner bit and backed by a fake
|
// Set up a folder with the CopyParentOwner bit and backed by a fake
|
||||||
// filesystem.
|
// filesystem.
|
||||||
|
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
f.folder.FolderConfiguration = newFolderConfiguration(m.cfg, f.ID, f.Label, config.FilesystemTypeFake, "/TestCopyOwner")
|
f.folder.FolderConfiguration = newFolderConfiguration(m.cfg, f.ID, f.Label, config.FilesystemTypeFake, "/TestCopyOwner")
|
||||||
f.folder.FolderConfiguration.CopyOwnershipFromParent = true
|
f.folder.FolderConfiguration.CopyOwnershipFromParent = true
|
||||||
|
|
||||||
@@ -748,15 +736,15 @@ func TestCopyOwner(t *testing.T) {
|
|||||||
// comes the finisher is done.
|
// comes the finisher is done.
|
||||||
|
|
||||||
finisherChan := make(chan *sharedPullerState)
|
finisherChan := make(chan *sharedPullerState)
|
||||||
copierChan, copyWg := startCopier(f, nil, finisherChan)
|
copierChan, copyWg := startCopier(t.Context(), f, nil, finisherChan)
|
||||||
go f.finisherRoutine(finisherChan, dbUpdateChan, nil)
|
go f.finisherRoutine(t.Context(), finisherChan, dbUpdateChan, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
close(copierChan)
|
close(copierChan)
|
||||||
copyWg.Wait()
|
copyWg.Wait()
|
||||||
close(finisherChan)
|
close(finisherChan)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
f.handleFile(file, copierChan)
|
f.handleFile(t.Context(), file, copierChan)
|
||||||
<-dbUpdateChan
|
<-dbUpdateChan
|
||||||
|
|
||||||
info, err = f.mtimefs.Lstat("foo/bar/baz")
|
info, err = f.mtimefs.Lstat("foo/bar/baz")
|
||||||
@@ -794,8 +782,7 @@ func TestCopyOwner(t *testing.T) {
|
|||||||
// TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
|
// TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
|
||||||
// is replaced with a directory and versions are conflicting
|
// is replaced with a directory and versions are conflicting
|
||||||
func TestSRConflictReplaceFileByDir(t *testing.T) {
|
func TestSRConflictReplaceFileByDir(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
@@ -826,8 +813,7 @@ func TestSRConflictReplaceFileByDir(t *testing.T) {
|
|||||||
// TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
|
// TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
|
||||||
// is replaced with a link and versions are conflicting
|
// is replaced with a link and versions are conflicting
|
||||||
func TestSRConflictReplaceFileByLink(t *testing.T) {
|
func TestSRConflictReplaceFileByLink(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
@@ -859,8 +845,7 @@ func TestSRConflictReplaceFileByLink(t *testing.T) {
|
|||||||
// TestDeleteBehindSymlink checks that we don't delete or schedule a scan
|
// TestDeleteBehindSymlink checks that we don't delete or schedule a scan
|
||||||
// when trying to delete a file behind a symlink.
|
// when trying to delete a file behind a symlink.
|
||||||
func TestDeleteBehindSymlink(t *testing.T) {
|
func TestDeleteBehindSymlink(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
link := "link"
|
link := "link"
|
||||||
@@ -898,16 +883,14 @@ func TestDeleteBehindSymlink(t *testing.T) {
|
|||||||
|
|
||||||
// Reproduces https://github.com/syncthing/syncthing/issues/6559
|
// Reproduces https://github.com/syncthing/syncthing/issues/6559
|
||||||
func TestPullCtxCancel(t *testing.T) {
|
func TestPullCtxCancel(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
pullChan := make(chan pullBlockState)
|
pullChan := make(chan pullBlockState)
|
||||||
finisherChan := make(chan *sharedPullerState)
|
finisherChan := make(chan *sharedPullerState)
|
||||||
|
|
||||||
var cancel context.CancelFunc
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
f.ctx, cancel = context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
go f.pullerRoutine(pullChan, finisherChan)
|
go f.pullerRoutine(ctx, pullChan, finisherChan)
|
||||||
defer close(pullChan)
|
defer close(pullChan)
|
||||||
|
|
||||||
emptyState := func() pullBlockState {
|
emptyState := func() pullBlockState {
|
||||||
@@ -940,8 +923,7 @@ func TestPullCtxCancel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullDeleteUnscannedDir(t *testing.T) {
|
func TestPullDeleteUnscannedDir(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
dir := "foobar"
|
dir := "foobar"
|
||||||
@@ -969,14 +951,13 @@ func TestPullDeleteUnscannedDir(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullCaseOnlyPerformFinish(t *testing.T) {
|
func TestPullCaseOnlyPerformFinish(t *testing.T) {
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
contents := []byte("contents")
|
contents := []byte("contents")
|
||||||
writeFile(t, ffs, name, contents)
|
writeFile(t, ffs, name, contents)
|
||||||
must(t, f.scanSubdirs(nil))
|
must(t, f.scanSubdirs(t.Context(), nil))
|
||||||
|
|
||||||
var cur protocol.FileInfo
|
var cur protocol.FileInfo
|
||||||
hasCur := false
|
hasCur := false
|
||||||
@@ -1032,8 +1013,7 @@ func TestPullCaseOnlySymlink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
|
func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
ffs := f.Filesystem()
|
ffs := f.Filesystem()
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
@@ -1043,7 +1023,7 @@ func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
|
|||||||
must(t, ffs.CreateSymlink("target", name))
|
must(t, ffs.CreateSymlink("target", name))
|
||||||
}
|
}
|
||||||
|
|
||||||
must(t, f.scanSubdirs(nil))
|
must(t, f.scanSubdirs(t.Context(), nil))
|
||||||
var cur protocol.FileInfo
|
var cur protocol.FileInfo
|
||||||
hasCur := false
|
hasCur := false
|
||||||
it, errFn := m.LocalFiles(f.ID, protocol.LocalDeviceID)
|
it, errFn := m.LocalFiles(f.ID, protocol.LocalDeviceID)
|
||||||
@@ -1089,8 +1069,7 @@ func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullTempFileCaseConflict(t *testing.T) {
|
func TestPullTempFileCaseConflict(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
copyChan := make(chan copyBlocksState, 1)
|
copyChan := make(chan copyBlocksState, 1)
|
||||||
|
|
||||||
@@ -1106,7 +1085,7 @@ func TestPullTempFileCaseConflict(t *testing.T) {
|
|||||||
fd.Close()
|
fd.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
f.handleFile(file, copyChan)
|
f.handleFile(t.Context(), file, copyChan)
|
||||||
|
|
||||||
cs := <-copyChan
|
cs := <-copyChan
|
||||||
if _, err := cs.tempFile(); err != nil {
|
if _, err := cs.tempFile(); err != nil {
|
||||||
@@ -1117,8 +1096,7 @@ func TestPullTempFileCaseConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullCaseOnlyRename(t *testing.T) {
|
func TestPullCaseOnlyRename(t *testing.T) {
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
// tempNameConfl := fs.TempName(confl)
|
// tempNameConfl := fs.TempName(confl)
|
||||||
|
|
||||||
@@ -1132,7 +1110,7 @@ func TestPullCaseOnlyRename(t *testing.T) {
|
|||||||
fd.Close()
|
fd.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
must(t, f.scanSubdirs(nil))
|
must(t, f.scanSubdirs(t.Context(), nil))
|
||||||
|
|
||||||
cur, ok := m.testCurrentFolderFile(f.ID, name)
|
cur, ok := m.testCurrentFolderFile(f.ID, name)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -1158,8 +1136,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
|
|||||||
t.Skip()
|
t.Skip()
|
||||||
}
|
}
|
||||||
|
|
||||||
m, f, wcfgCancel := setupSendReceiveFolder(t)
|
m, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
conn := addFakeConn(m, device1, f.ID)
|
conn := addFakeConn(m, device1, f.ID)
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
@@ -1172,7 +1149,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
|
|||||||
fd.Close()
|
fd.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
must(t, f.scanSubdirs(nil))
|
must(t, f.scanSubdirs(t.Context(), nil))
|
||||||
|
|
||||||
file, ok := m.testCurrentFolderFile(f.ID, name)
|
file, ok := m.testCurrentFolderFile(f.ID, name)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -1182,7 +1159,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
|
|||||||
|
|
||||||
scanChan := make(chan string)
|
scanChan := make(chan string)
|
||||||
|
|
||||||
changed, err := f.pullerIteration(scanChan)
|
changed, err := f.pullerIteration(t.Context(), scanChan)
|
||||||
must(t, err)
|
must(t, err)
|
||||||
if changed != 1 {
|
if changed != 1 {
|
||||||
t.Error("Expected one change in pull, got", changed)
|
t.Error("Expected one change in pull, got", changed)
|
||||||
@@ -1200,8 +1177,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullDeleteCaseConflict(t *testing.T) {
|
func TestPullDeleteCaseConflict(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
fi := protocol.FileInfo{Name: "Foo"}
|
fi := protocol.FileInfo{Name: "Foo"}
|
||||||
@@ -1232,8 +1208,7 @@ func TestPullDeleteCaseConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPullDeleteIgnoreChildDir(t *testing.T) {
|
func TestPullDeleteIgnoreChildDir(t *testing.T) {
|
||||||
_, f, wcfgCancel := setupSendReceiveFolder(t)
|
_, f := setupSendReceiveFolder(t)
|
||||||
defer wcfgCancel()
|
|
||||||
|
|
||||||
parent := "parent"
|
parent := "parent"
|
||||||
del := "ignored"
|
del := "ignored"
|
||||||
@@ -1268,12 +1243,12 @@ func cleanupSharedPullerState(s *sharedPullerState) {
|
|||||||
s.writer.mut.Unlock()
|
s.writer.mut.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func startCopier(f *sendReceiveFolder, pullChan chan<- pullBlockState, finisherChan chan<- *sharedPullerState) (chan copyBlocksState, *sync.WaitGroup) {
|
func startCopier(ctx context.Context, f *sendReceiveFolder, pullChan chan<- pullBlockState, finisherChan chan<- *sharedPullerState) (chan copyBlocksState, *sync.WaitGroup) {
|
||||||
copyChan := make(chan copyBlocksState)
|
copyChan := make(chan copyBlocksState)
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
f.copierRoutine(copyChan, pullChan, finisherChan)
|
f.copierRoutine(ctx, copyChan, pullChan, finisherChan)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
return copyChan, wg
|
return copyChan, wg
|
||||||
|
|||||||
@@ -77,9 +77,8 @@ func addFolderDevicesToClusterConfig(cc *protocol.ClusterConfig, remote protocol
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequest(t *testing.T) {
|
func TestRequest(t *testing.T) {
|
||||||
wrapper, fcfg, cancel := newDefaultCfgWrapper()
|
wrapper, fcfg := newDefaultCfgWrapper(t)
|
||||||
ffs := fcfg.Filesystem()
|
ffs := fcfg.Filesystem()
|
||||||
defer cancel()
|
|
||||||
m := setupModel(t, wrapper)
|
m := setupModel(t, wrapper)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -164,8 +163,7 @@ func BenchmarkIndex_100(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkIndex(b *testing.B, nfiles int) {
|
func benchmarkIndex(b *testing.B, nfiles int) {
|
||||||
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
m, _, fcfg := setupModelWithConnection(b)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
files := genFiles(nfiles)
|
files := genFiles(nfiles)
|
||||||
@@ -191,8 +189,7 @@ func BenchmarkIndexUpdate_10000_1(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
|
func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
|
||||||
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
m, _, fcfg := setupModelWithConnection(b)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
files := genFiles(nfiles)
|
files := genFiles(nfiles)
|
||||||
@@ -223,7 +220,7 @@ func BenchmarkRequestOut(b *testing.B) {
|
|||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
data, err := m.RequestGlobal(context.Background(), device1, "default", files[i%n].Name, 0, 0, 32, nil, false)
|
data, err := m.RequestGlobal(b.Context(), device1, "default", files[i%n].Name, 0, 0, 32, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
@@ -1777,8 +1774,7 @@ func TestRWScanRecovery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalDirectoryTree(t *testing.T) {
|
func TestGlobalDirectoryTree(t *testing.T) {
|
||||||
m, conn, fcfg, wCancel := setupModelWithConnection(t)
|
m, conn, fcfg := setupModelWithConnection(t)
|
||||||
defer wCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
var seq int64
|
var seq int64
|
||||||
@@ -2084,8 +2080,7 @@ func BenchmarkTree_100_10(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkTree(b *testing.B, n1, n2 int) {
|
func benchmarkTree(b *testing.B, n1, n2 int) {
|
||||||
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
m, _, fcfg := setupModelWithConnection(b)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
m.ScanFolder(fcfg.ID)
|
m.ScanFolder(fcfg.ID)
|
||||||
@@ -2342,8 +2337,7 @@ func TestIssue3829(t *testing.T) {
|
|||||||
|
|
||||||
// TestIssue4573 tests that contents of an unavailable dir aren't marked deleted
|
// TestIssue4573 tests that contents of an unavailable dir aren't marked deleted
|
||||||
func TestIssue4573(t *testing.T) {
|
func TestIssue4573(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
testFs := fcfg.Filesystem()
|
testFs := fcfg.Filesystem()
|
||||||
defer os.RemoveAll(testFs.URI())
|
defer os.RemoveAll(testFs.URI())
|
||||||
|
|
||||||
@@ -2372,8 +2366,7 @@ func TestIssue4573(t *testing.T) {
|
|||||||
// TestInternalScan checks whether various fs operations are correctly represented
|
// TestInternalScan checks whether various fs operations are correctly represented
|
||||||
// in the db after scanning.
|
// in the db after scanning.
|
||||||
func TestInternalScan(t *testing.T) {
|
func TestInternalScan(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
testFs := fcfg.Filesystem()
|
testFs := fcfg.Filesystem()
|
||||||
defer os.RemoveAll(testFs.URI())
|
defer os.RemoveAll(testFs.URI())
|
||||||
|
|
||||||
@@ -2472,8 +2465,7 @@ func TestCustomMarkerName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoveDirWithContent(t *testing.T) {
|
func TestRemoveDirWithContent(t *testing.T) {
|
||||||
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, conn, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -2533,8 +2525,7 @@ func TestRemoveDirWithContent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue4475(t *testing.T) {
|
func TestIssue4475(t *testing.T) {
|
||||||
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, conn, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
testFs := fcfg.Filesystem()
|
testFs := fcfg.Filesystem()
|
||||||
|
|
||||||
@@ -2870,8 +2861,7 @@ func TestIssue4903(t *testing.T) {
|
|||||||
func TestIssue5002(t *testing.T) {
|
func TestIssue5002(t *testing.T) {
|
||||||
// recheckFile should not panic when given an index equal to the number of blocks
|
// recheckFile should not panic when given an index equal to the number of blocks
|
||||||
|
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
ffs := fcfg.Filesystem()
|
ffs := fcfg.Filesystem()
|
||||||
|
|
||||||
fd, err := ffs.Create("foo")
|
fd, err := ffs.Create("foo")
|
||||||
@@ -2899,8 +2889,7 @@ func TestIssue5002(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParentOfUnignored(t *testing.T) {
|
func TestParentOfUnignored(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
ffs := fcfg.Filesystem()
|
ffs := fcfg.Filesystem()
|
||||||
|
|
||||||
must(t, ffs.Mkdir("bar", 0o755))
|
must(t, ffs.Mkdir("bar", 0o755))
|
||||||
@@ -2979,15 +2968,13 @@ func TestFolderRestartZombies(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestLimit(t *testing.T) {
|
func TestRequestLimit(t *testing.T) {
|
||||||
wrapper, fcfg, cancel := newDefaultCfgWrapper()
|
wrapper, fcfg := newDefaultCfgWrapper(t)
|
||||||
ffs := fcfg.Filesystem()
|
ffs := fcfg.Filesystem()
|
||||||
|
|
||||||
file := "tmpfile"
|
file := "tmpfile"
|
||||||
fd, err := ffs.Create(file)
|
fd, err := ffs.Create(file)
|
||||||
must(t, err)
|
must(t, err)
|
||||||
fd.Close()
|
fd.Close()
|
||||||
|
|
||||||
defer cancel()
|
|
||||||
waiter, err := wrapper.Modify(func(cfg *config.Configuration) {
|
waiter, err := wrapper.Modify(func(cfg *config.Configuration) {
|
||||||
_, i, _ := cfg.Device(device1)
|
_, i, _ := cfg.Device(device1)
|
||||||
cfg.Devices[i].MaxRequestKiB = 1
|
cfg.Devices[i].MaxRequestKiB = 1
|
||||||
@@ -3037,8 +3024,7 @@ func TestConnCloseOnRestart(t *testing.T) {
|
|||||||
protocol.CloseTimeout = oldCloseTimeout
|
protocol.CloseTimeout = oldCloseTimeout
|
||||||
}()
|
}()
|
||||||
|
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
@@ -3079,8 +3065,7 @@ func TestConnCloseOnRestart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestModTimeWindow(t *testing.T) {
|
func TestModTimeWindow(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
tfs := modtimeTruncatingFS{
|
tfs := modtimeTruncatingFS{
|
||||||
trunc: 0,
|
trunc: 0,
|
||||||
Filesystem: fcfg.Filesystem(),
|
Filesystem: fcfg.Filesystem(),
|
||||||
@@ -3141,8 +3126,7 @@ func TestModTimeWindow(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDevicePause(t *testing.T) {
|
func TestDevicePause(t *testing.T) {
|
||||||
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, _, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
sub := m.evLogger.Subscribe(events.DevicePaused)
|
sub := m.evLogger.Subscribe(events.DevicePaused)
|
||||||
@@ -3171,8 +3155,7 @@ func TestDevicePause(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeviceWasSeen(t *testing.T) {
|
func TestDeviceWasSeen(t *testing.T) {
|
||||||
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, _, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
m.deviceWasSeen(device1)
|
m.deviceWasSeen(device1)
|
||||||
@@ -3216,8 +3199,7 @@ func TestNewLimitedRequestResponse(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSummaryPausedNoError(t *testing.T) {
|
func TestSummaryPausedNoError(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
pauseFolder(t, wcfg, fcfg.ID, true)
|
pauseFolder(t, wcfg, fcfg.ID, true)
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
@@ -3229,14 +3211,15 @@ func TestSummaryPausedNoError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFolderAPIErrors(t *testing.T) {
|
func TestFolderAPIErrors(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
pauseFolder(t, wcfg, fcfg.ID, true)
|
pauseFolder(t, wcfg, fcfg.ID, true)
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
methods := []func(folder string) error{
|
methods := []func(folder string) error{
|
||||||
m.ScanFolder,
|
func(folder string) error {
|
||||||
|
return m.ScanFolder(folder)
|
||||||
|
},
|
||||||
func(folder string) error {
|
func(folder string) error {
|
||||||
return m.ScanFolderSubdirs(folder, nil)
|
return m.ScanFolderSubdirs(folder, nil)
|
||||||
},
|
},
|
||||||
@@ -3261,8 +3244,7 @@ func TestFolderAPIErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRenameSequenceOrder(t *testing.T) {
|
func TestRenameSequenceOrder(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -3324,8 +3306,7 @@ func TestRenameSequenceOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRenameSameFile(t *testing.T) {
|
func TestRenameSameFile(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -3368,8 +3349,7 @@ func TestRenameSameFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockListMap(t *testing.T) {
|
func TestBlockListMap(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -3437,8 +3417,7 @@ func TestBlockListMap(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestScanRenameCaseOnly(t *testing.T) {
|
func TestScanRenameCaseOnly(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -3558,9 +3537,8 @@ func TestAddFolderCompletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestScanDeletedROChangedOnSR(t *testing.T) {
|
func TestScanDeletedROChangedOnSR(t *testing.T) {
|
||||||
m, conn, fcfg, wCancel := setupModelWithConnection(t)
|
m, conn, fcfg := setupModelWithConnection(t)
|
||||||
ffs := fcfg.Filesystem()
|
ffs := fcfg.Filesystem()
|
||||||
defer wCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
||||||
fcfg.Type = config.FolderTypeReceiveOnly
|
fcfg.Type = config.FolderTypeReceiveOnly
|
||||||
setFolder(t, m.cfg, fcfg)
|
setFolder(t, m.cfg, fcfg)
|
||||||
@@ -3600,8 +3578,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
|
|||||||
|
|
||||||
func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSecond bool, pre func(config.Wrapper), fn func(config.Wrapper)) {
|
func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSecond bool, pre func(config.Wrapper), fn func(config.Wrapper)) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
wcfg, _, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, _ := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
m := setupModel(t, wcfg)
|
m := setupModel(t, wcfg)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -3663,8 +3640,7 @@ func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSec
|
|||||||
// That then causes these files to be considered as needed, while they are not.
|
// That then causes these files to be considered as needed, while they are not.
|
||||||
// https://github.com/syncthing/syncthing/issues/6961
|
// https://github.com/syncthing/syncthing/issues/6961
|
||||||
func TestIssue6961(t *testing.T) {
|
func TestIssue6961(t *testing.T) {
|
||||||
wcfg, fcfg, wcfgCancel := newDefaultCfgWrapper()
|
wcfg, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
waiter, err := wcfg.Modify(func(cfg *config.Configuration) {
|
waiter, err := wcfg.Modify(func(cfg *config.Configuration) {
|
||||||
cfg.SetDevice(newDeviceConfiguration(cfg.Defaults.Device, device2, "device2"))
|
cfg.SetDevice(newDeviceConfiguration(cfg.Defaults.Device, device2, "device2"))
|
||||||
@@ -3728,8 +3704,7 @@ func TestIssue6961(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCompletionEmptyGlobal(t *testing.T) {
|
func TestCompletionEmptyGlobal(t *testing.T) {
|
||||||
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, conn, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
|
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
|
||||||
m.sdb.Update(fcfg.ID, protocol.LocalDeviceID, files)
|
m.sdb.Update(fcfg.ID, protocol.LocalDeviceID, files)
|
||||||
@@ -3743,8 +3718,7 @@ func TestCompletionEmptyGlobal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedMetaAfterIndexReset(t *testing.T) {
|
func TestNeedMetaAfterIndexReset(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
addDevice2(t, w, fcfg)
|
addDevice2(t, w, fcfg)
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Path)
|
defer cleanupModelAndRemoveDir(m, fcfg.Path)
|
||||||
@@ -3786,8 +3760,7 @@ func TestCcCheckEncryption(t *testing.T) {
|
|||||||
t.Skip("skipping on short testing - generating encryption tokens is slow")
|
t.Skip("skipping on short testing - generating encryption tokens is slow")
|
||||||
}
|
}
|
||||||
|
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
m.cancel()
|
m.cancel()
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
@@ -3927,8 +3900,7 @@ func TestCcCheckEncryption(t *testing.T) {
|
|||||||
|
|
||||||
func TestCCFolderNotRunning(t *testing.T) {
|
func TestCCFolderNotRunning(t *testing.T) {
|
||||||
// Create the folder, but don't start it.
|
// Create the folder, but don't start it.
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
m := newModel(t, w, myID, nil)
|
m := newModel(t, w, myID, nil)
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
@@ -3955,8 +3927,7 @@ func TestCCFolderNotRunning(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPendingFolder(t *testing.T) {
|
func TestPendingFolder(t *testing.T) {
|
||||||
w, _, wCancel := newDefaultCfgWrapper()
|
w, _ := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -4035,11 +4006,10 @@ func TestDeletedNotLocallyChangedReceiveEncrypted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func deletedNotLocallyChanged(t *testing.T, ft config.FolderType) {
|
func deletedNotLocallyChanged(t *testing.T, ft config.FolderType) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
fcfg.Type = ft
|
fcfg.Type = ft
|
||||||
setFolder(t, w, fcfg)
|
setFolder(t, w, fcfg)
|
||||||
defer wCancel()
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
|
|||||||
@@ -30,8 +30,7 @@ func TestRequestSimple(t *testing.T) {
|
|||||||
// Verify that the model performs a request and creates a file based on
|
// Verify that the model performs a request and creates a file based on
|
||||||
// an incoming index update.
|
// an incoming index update.
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -78,8 +77,7 @@ func TestSymlinkTraversalRead(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
// We listen for incoming index updates and trigger when we see one for
|
// We listen for incoming index updates and trigger when we see one for
|
||||||
@@ -121,8 +119,7 @@ func TestSymlinkTraversalWrite(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
// We listen for incoming index updates and trigger when we see one for
|
// We listen for incoming index updates and trigger when we see one for
|
||||||
@@ -180,8 +177,7 @@ func TestSymlinkTraversalWrite(t *testing.T) {
|
|||||||
func TestRequestCreateTmpSymlink(t *testing.T) {
|
func TestRequestCreateTmpSymlink(t *testing.T) {
|
||||||
// Test that an update for a temporary file is invalidated
|
// Test that an update for a temporary file is invalidated
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
// We listen for incoming index updates and trigger when we see one for
|
// We listen for incoming index updates and trigger when we see one for
|
||||||
@@ -356,8 +352,7 @@ func pullInvalidIgnored(t *testing.T, ft config.FolderType) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue4841(t *testing.T) {
|
func TestIssue4841(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
received := make(chan []protocol.FileInfo)
|
received := make(chan []protocol.FileInfo)
|
||||||
@@ -405,8 +400,7 @@ func TestIssue4841(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRescanIfHaveInvalidContent(t *testing.T) {
|
func TestRescanIfHaveInvalidContent(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -467,8 +461,7 @@ func TestRescanIfHaveInvalidContent(t *testing.T) {
|
|||||||
func TestParentDeletion(t *testing.T) {
|
func TestParentDeletion(t *testing.T) {
|
||||||
t.Skip("flaky")
|
t.Skip("flaky")
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
testFs := fcfg.Filesystem()
|
testFs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, testFs.URI())
|
defer cleanupModelAndRemoveDir(m, testFs.URI())
|
||||||
|
|
||||||
@@ -546,8 +539,7 @@ func TestRequestSymlinkWindows(t *testing.T) {
|
|||||||
t.Skip("windows specific test")
|
t.Skip("windows specific test")
|
||||||
}
|
}
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
||||||
|
|
||||||
received := make(chan []protocol.FileInfo)
|
received := make(chan []protocol.FileInfo)
|
||||||
@@ -623,8 +615,7 @@ func equalContents(fs fs.Filesystem, path string, contents []byte) error {
|
|||||||
func TestRequestRemoteRenameChanged(t *testing.T) {
|
func TestRequestRemoteRenameChanged(t *testing.T) {
|
||||||
t.Skip("flaky")
|
t.Skip("flaky")
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -756,8 +747,7 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestRemoteRenameConflict(t *testing.T) {
|
func TestRequestRemoteRenameConflict(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
@@ -846,8 +836,7 @@ func TestRequestRemoteRenameConflict(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestDeleteChanged(t *testing.T) {
|
func TestRequestDeleteChanged(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -925,8 +914,7 @@ func TestRequestDeleteChanged(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedFolderFiles(t *testing.T) {
|
func TestNeedFolderFiles(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
|
|
||||||
sub := m.evLogger.Subscribe(events.RemoteIndexUpdated)
|
sub := m.evLogger.Subscribe(events.RemoteIndexUpdated)
|
||||||
@@ -970,8 +958,7 @@ func TestNeedFolderFiles(t *testing.T) {
|
|||||||
// propagated upon un-ignoring.
|
// propagated upon un-ignoring.
|
||||||
// https://github.com/syncthing/syncthing/issues/6038
|
// https://github.com/syncthing/syncthing/issues/6038
|
||||||
func TestIgnoreDeleteUnignore(t *testing.T) {
|
func TestIgnoreDeleteUnignore(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
m := setupModel(t, w)
|
m := setupModel(t, w)
|
||||||
fss := fcfg.Filesystem()
|
fss := fcfg.Filesystem()
|
||||||
defer cleanupModel(m)
|
defer cleanupModel(m)
|
||||||
@@ -1065,8 +1052,7 @@ func TestIgnoreDeleteUnignore(t *testing.T) {
|
|||||||
// TestRequestLastFileProgress checks that the last pulled file (here only) is registered
|
// TestRequestLastFileProgress checks that the last pulled file (here only) is registered
|
||||||
// as in progress.
|
// as in progress.
|
||||||
func TestRequestLastFileProgress(t *testing.T) {
|
func TestRequestLastFileProgress(t *testing.T) {
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -1100,8 +1086,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
|
||||||
@@ -1213,8 +1198,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
dir1 := "foo"
|
dir1 := "foo"
|
||||||
dir2 := "bar"
|
dir2 := "bar"
|
||||||
@@ -1280,8 +1264,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
|
|||||||
t.Skip("skipping on short testing - scrypt is too slow")
|
t.Skip("skipping on short testing - scrypt is too slow")
|
||||||
}
|
}
|
||||||
|
|
||||||
w, fcfg, wCancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
defer wCancel()
|
|
||||||
tfs := fcfg.Filesystem()
|
tfs := fcfg.Filesystem()
|
||||||
fcfg.Type = config.FolderTypeReceiveEncrypted
|
fcfg.Type = config.FolderTypeReceiveEncrypted
|
||||||
setFolder(t, w, fcfg)
|
setFolder(t, w, fcfg)
|
||||||
@@ -1375,8 +1358,7 @@ func TestRequestGlobalInvalidToValid(t *testing.T) {
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
m, fc, fcfg, wcfgCancel := setupModelWithConnection(t)
|
m, fc, fcfg := setupModelWithConnection(t)
|
||||||
defer wcfgCancel()
|
|
||||||
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
|
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
|
||||||
waiter, err := m.cfg.Modify(func(cfg *config.Configuration) {
|
waiter, err := m.cfg.Modify(func(cfg *config.Configuration) {
|
||||||
cfg.SetDevice(newDeviceConfiguration(cfg.Defaults.Device, device2, "device2"))
|
cfg.SetDevice(newDeviceConfiguration(cfg.Defaults.Device, device2, "device2"))
|
||||||
|
|||||||
@@ -85,19 +85,24 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newConfigWrapper(cfg config.Configuration) (config.Wrapper, context.CancelFunc) {
|
func newConfigWrapper(cfg config.Configuration) (config.Wrapper, context.CancelFunc) {
|
||||||
wrapper := config.Wrap("", cfg, myID, events.NoopLogger)
|
return newConfigWrapperFromContext(context.Background(), cfg)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
go wrapper.Serve(ctx)
|
|
||||||
return wrapper, cancel
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDefaultCfgWrapper() (config.Wrapper, config.FolderConfiguration, context.CancelFunc) {
|
func newDefaultCfgWrapper(t testing.TB) (config.Wrapper, config.FolderConfiguration) {
|
||||||
w, cancel := newConfigWrapper(defaultCfgWrapper.RawCopy())
|
w, cancel := newConfigWrapperFromContext(t.Context(), defaultCfgWrapper.RawCopy())
|
||||||
|
t.Cleanup(cancel)
|
||||||
fcfg := newFolderConfig()
|
fcfg := newFolderConfig()
|
||||||
_, _ = w.Modify(func(cfg *config.Configuration) {
|
_, _ = w.Modify(func(cfg *config.Configuration) {
|
||||||
cfg.SetFolder(fcfg)
|
cfg.SetFolder(fcfg)
|
||||||
})
|
})
|
||||||
return w, fcfg, cancel
|
return w, fcfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConfigWrapperFromContext(ctx context.Context, cfg config.Configuration) (config.Wrapper, context.CancelFunc) {
|
||||||
|
wrapper := config.Wrap("", cfg, myID, events.NoopLogger)
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
go wrapper.Serve(ctx)
|
||||||
|
return wrapper, cancel
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFolderConfig() config.FolderConfiguration {
|
func newFolderConfig() config.FolderConfiguration {
|
||||||
@@ -108,11 +113,11 @@ func newFolderConfig() config.FolderConfiguration {
|
|||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupModelWithConnection(t testing.TB) (*testModel, *fakeConnection, config.FolderConfiguration, context.CancelFunc) {
|
func setupModelWithConnection(t testing.TB) (*testModel, *fakeConnection, config.FolderConfiguration) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
w, fcfg, cancel := newDefaultCfgWrapper()
|
w, fcfg := newDefaultCfgWrapper(t)
|
||||||
m, fc := setupModelWithConnectionFromWrapper(t, w)
|
m, fc := setupModelWithConnectionFromWrapper(t, w)
|
||||||
return m, fc, fcfg, cancel
|
return m, fc, fcfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupModelWithConnectionFromWrapper(t testing.TB, w config.Wrapper) (*testModel, *fakeConnection) {
|
func setupModelWithConnectionFromWrapper(t testing.TB, w config.Wrapper) (*testModel, *fakeConnection) {
|
||||||
@@ -157,7 +162,7 @@ func newModel(t testing.TB, cfg config.Wrapper, id protocol.DeviceID, protectedF
|
|||||||
mdb.Close()
|
mdb.Close()
|
||||||
})
|
})
|
||||||
m := NewModel(cfg, id, mdb, protectedFiles, evLogger, protocol.NewKeyGenerator()).(*model)
|
m := NewModel(cfg, id, mdb, protectedFiles, evLogger, protocol.NewKeyGenerator()).(*model)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
go evLogger.Serve(ctx)
|
go evLogger.Serve(ctx)
|
||||||
return &testModel{
|
return &testModel{
|
||||||
model: m,
|
model: m,
|
||||||
|
|||||||
Reference in New Issue
Block a user