mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-05 04:19:10 -05:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f40879a75 | ||
|
|
0b65a616ba | ||
|
|
6b4fe5c063 | ||
|
|
3065b127b5 | ||
|
|
74ea9c5f67 | ||
|
|
46536509d7 | ||
|
|
607bcc0b0e | ||
|
|
1950efb790 | ||
|
|
1b77ab2b52 | ||
|
|
4f367e4376 | ||
|
|
98418c9b5c | ||
|
|
3e4a90a2ba | ||
|
|
fac4dec840 | ||
|
|
79bf1f1056 | ||
|
|
9ef17322be | ||
|
|
c80e0bfc28 | ||
|
|
28d5c84599 | ||
|
|
b033c36b31 | ||
|
|
bfc9478965 | ||
|
|
d9cb7e2739 | ||
|
|
1f8e6c55f6 | ||
|
|
1eea076f5c | ||
|
|
94beed5c10 | ||
|
|
ed6bfc5417 | ||
|
|
04ff890263 | ||
|
|
9c0825c0d9 | ||
|
|
f78133b8e9 | ||
|
|
b784f5b9e3 | ||
|
|
1c089a4d11 | ||
|
|
baa38eea7a | ||
|
|
c3b5eba205 | ||
|
|
cf75329067 | ||
|
|
8343db6766 | ||
|
|
8f344d0915 | ||
|
|
77dd874383 | ||
|
|
5b34c31cb3 | ||
|
|
668979605b | ||
|
|
5ffa012410 | ||
|
|
5c54d879a1 | ||
|
|
1c5af3a4bd | ||
|
|
651ee2ce74 | ||
|
|
5c9dc4c883 | ||
|
|
a1c5b44c74 | ||
|
|
de9489585f | ||
|
|
438f687591 | ||
|
|
7a8cc5fc99 | ||
|
|
f05ccd775a | ||
|
|
e5cc55ce09 | ||
|
|
299b9d8883 | ||
|
|
074097a8e7 | ||
|
|
ee445e35a0 | ||
|
|
3ad049184e | ||
|
|
974551375e | ||
|
|
531ceb2b0f | ||
|
|
ba4462a70b | ||
|
|
8419c05794 | ||
|
|
c84f60f949 |
5
AUTHORS
5
AUTHORS
@@ -124,9 +124,11 @@ Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan <artback@protonmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
@@ -179,6 +181,7 @@ Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Nicolas Perraut <n.perraut@gmail.com>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
|
||||
NinoM4ster <ninom4ster@gmail.com>
|
||||
Nitroretro <43112364+Nitroretro@users.noreply.github.com>
|
||||
NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oliver Freyermuth <o.freyermuth@googlemail.com>
|
||||
@@ -212,6 +215,7 @@ Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
@@ -238,6 +242,7 @@ Vladimir Rusinov <vrusinov@google.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
xarx00 <xarx00@users.noreply.github.com>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
|
||||
@@ -15,12 +15,15 @@ EXPOSE 8384 22000 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
RUN apk add --no-cache ca-certificates su-exec tzdata
|
||||
|
||||
COPY --from=builder /src/syncthing /bin/syncthing
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z 127.0.0.1 8384 || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
|
||||
77
build.go
77
build.go
@@ -49,6 +49,7 @@ var (
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
timeout = "120s"
|
||||
numVersions = 5
|
||||
)
|
||||
|
||||
type target struct {
|
||||
@@ -333,6 +334,20 @@ func runCommand(cmd string, target target) {
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
|
||||
case "changelog":
|
||||
vers, err := currentAndLatestVersions(numVersions)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ver := range vers {
|
||||
underline := strings.Repeat("=", len(ver))
|
||||
msg, err := tagMessage(ver)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s\n%s\n\n%s\n\n", ver, underline, msg)
|
||||
}
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
}
|
||||
@@ -351,6 +366,7 @@ func parseFlags() {
|
||||
flag.StringVar(&cc, "cc", os.Getenv("CC"), "Set CC environment variable for `go build`")
|
||||
flag.BoolVar(&debugBinary, "debug-binary", debugBinary, "Create unoptimized binary to use with delve, set -gcflags='-N -l' and omit -ldflags")
|
||||
flag.BoolVar(&coverage, "coverage", coverage, "Write coverage profile of tests to coverage.txt")
|
||||
flag.IntVar(&numVersions, "num-versions", numVersions, "Number of versions for changelog command")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -691,7 +707,7 @@ func rebuildAssets() {
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
if shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/auto/gui") {
|
||||
if shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui") {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
@@ -1240,3 +1256,62 @@ func protobufVersion() string {
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(bs), "\n")
|
||||
reverseStrings(lines)
|
||||
|
||||
// The one at the head is the latest version. We always keep that one.
|
||||
// Then we filter out remaining ones with dashes (pre-releases etc).
|
||||
|
||||
latest := lines[:1]
|
||||
nonPres := filterStrings(lines[1:], func(s string) bool { return !strings.Contains(s, "-") })
|
||||
vers := append(latest, nonPres...)
|
||||
return vers[:n], nil
|
||||
}
|
||||
|
||||
func reverseStrings(ss []string) {
|
||||
for i := 0; i < len(ss)/2; i++ {
|
||||
ss[i], ss[len(ss)-1-i] = ss[len(ss)-1-i], ss[i]
|
||||
}
|
||||
}
|
||||
|
||||
func filterStrings(ss []string, op func(string) bool) []string {
|
||||
n := ss[:0]
|
||||
for _, s := range ss {
|
||||
if op(s) {
|
||||
n = append(n, s)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func tagMessage(tag string) (string, error) {
|
||||
hash, err := runError("git", "rev-parse", tag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
obj, err := runError("git", "cat-file", "-p", string(hash))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return trimTagMessage(string(obj), tag), nil
|
||||
}
|
||||
|
||||
func trimTagMessage(msg, tag string) string {
|
||||
firstBlank := strings.Index(msg, "\n\n")
|
||||
if firstBlank > 0 {
|
||||
msg = msg[firstBlank+2:]
|
||||
}
|
||||
msg = strings.TrimPrefix(msg, tag)
|
||||
beginSig := strings.Index(msg, "-----BEGIN PGP")
|
||||
if beginSig > 0 {
|
||||
msg = msg[:beginSig]
|
||||
}
|
||||
return strings.TrimSpace(msg)
|
||||
}
|
||||
|
||||
20
build.ps1
Normal file
20
build.ps1
Normal file
@@ -0,0 +1,20 @@
|
||||
function build {
|
||||
go run build.go @args
|
||||
}
|
||||
|
||||
$cmd, $rest = $args
|
||||
switch ($cmd) {
|
||||
"test" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build test
|
||||
}
|
||||
|
||||
"bench" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build bench
|
||||
}
|
||||
|
||||
default {
|
||||
build @rest
|
||||
}
|
||||
}
|
||||
73
build.sh
73
build.sh
@@ -2,8 +2,6 @@
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
STTRACE=${STTRACE:-}
|
||||
|
||||
script() {
|
||||
name="$1"
|
||||
shift
|
||||
@@ -15,88 +13,23 @@ build() {
|
||||
}
|
||||
|
||||
case "${1:-default}" in
|
||||
default)
|
||||
build
|
||||
;;
|
||||
|
||||
clean)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
tar)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
assets)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
xdr)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
translate)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
deb)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
setup)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
test)
|
||||
LOGGER_DISCARD=1 build test
|
||||
;;
|
||||
|
||||
bench)
|
||||
LOGGER_DISCARD=1 build bench | script benchfilter
|
||||
LOGGER_DISCARD=1 build bench
|
||||
;;
|
||||
|
||||
prerelease)
|
||||
go run script/authors.go
|
||||
script authors
|
||||
build transifex
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
noupgrade)
|
||||
build -no-upgrade tar
|
||||
;;
|
||||
|
||||
all)
|
||||
platforms=(
|
||||
darwin-amd64 dragonfly-amd64 freebsd-amd64 linux-amd64 netbsd-amd64 openbsd-amd64 solaris-amd64 windows-amd64
|
||||
freebsd-386 linux-386 netbsd-386 openbsd-386 windows-386
|
||||
linux-arm linux-arm64 linux-ppc64 linux-ppc64le
|
||||
)
|
||||
|
||||
for plat in "${platforms[@]}"; do
|
||||
echo Building "$plat"
|
||||
|
||||
goos="${plat%-*}"
|
||||
goarch="${plat#*-}"
|
||||
dist="tar"
|
||||
|
||||
if [[ $goos == "windows" ]]; then
|
||||
dist="zip"
|
||||
fi
|
||||
|
||||
build -goos "$goos" -goarch "$goarch" "$dist"
|
||||
echo
|
||||
done
|
||||
;;
|
||||
|
||||
test-xunit)
|
||||
|
||||
(GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -v -race ./lib/... ./cmd/... || true) > tests.out
|
||||
go2xunit -output tests.xml -fail < tests.out
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build command $1"
|
||||
build "$@"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -148,22 +148,26 @@ func parseReport(path string, report []byte) (*raven.Packet, error) {
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
}
|
||||
for _, tag := range version.extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
|
||||
return pkt, nil
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC
|
||||
var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)`)
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
@@ -193,6 +197,7 @@ func parseVersion(line string) (version, error) {
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
parts := strings.Split(v.version, "+")
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
@@ -202,5 +207,13 @@ func parseVersion(line string) (version, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
@@ -30,16 +30,30 @@ func TestParseVersion(t *testing.T) {
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if v != tc.parsed {
|
||||
t.Error(v)
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,10 +52,10 @@ func dump(ldb backend.Backend) {
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
@@ -67,46 +67,59 @@ func dump(ldb backend.Backend) {
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
val := it.Value()
|
||||
if len(val) == 0 {
|
||||
fmt.Printf("[deviceidx] K:%d V:<nil>\n", key)
|
||||
} else {
|
||||
dev := protocol.DeviceIDFromBytes(val)
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, dev)
|
||||
device := "<nil>"
|
||||
if len(val) > 0 {
|
||||
dev, err := protocol.DeviceIDFromBytes(val)
|
||||
if err != nil {
|
||||
device = fmt.Sprintf("<invalid %d bytes>", len(val))
|
||||
} else {
|
||||
device = dev.String()
|
||||
}
|
||||
}
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, device)
|
||||
|
||||
case db.KeyTypeIndexID:
|
||||
device := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
folder := binary.BigEndian.Uint32(it.Key()[5:])
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := binary.BigEndian.Uint32(key[5:])
|
||||
fmt.Printf("[indexid] D:%d F:%d I:%x\n", device, folder, it.Value())
|
||||
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d V:%x\n", folder, it.Value())
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
fmt.Printf("[miscdata] K:%q V:%q\n", it.Key()[1:], it.Value())
|
||||
fmt.Printf("[miscdata] K:%q V:%q\n", key[1:], it.Value())
|
||||
|
||||
case db.KeyTypeSequence:
|
||||
folder := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
seq := binary.BigEndian.Uint64(it.Key()[5:])
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
seq := binary.BigEndian.Uint64(key[5:])
|
||||
fmt.Printf("[sequence] F:%d S:%d V:%q\n", folder, seq, it.Value())
|
||||
|
||||
case db.KeyTypeNeed:
|
||||
folder := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
file := string(it.Key()[5:])
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
file := string(key[5:])
|
||||
fmt.Printf("[need] F:%d V:%q\n", folder, file)
|
||||
|
||||
case db.KeyTypeBlockList:
|
||||
fmt.Printf("[blocklist] H:%x\n", it.Key()[1:])
|
||||
fmt.Printf("[blocklist] H:%x\n", key[1:])
|
||||
|
||||
case db.KeyTypeBlockListMap:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[5:37]
|
||||
fileName := string(key[37:])
|
||||
fmt.Printf("[blocklistmap] F:%d H:%x N:%s\n", folder, hash, fileName)
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x\n", key[1:])
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,9 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]protocol.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
success = true
|
||||
|
||||
@@ -106,6 +108,16 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
case db.KeyTypeBlockList:
|
||||
hash := string(key[1:])
|
||||
blocklists[hash] = struct{}{}
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,6 +169,23 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
usedBlocklists[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if fi.VersionHash != nil {
|
||||
key := string(fi.VersionHash)
|
||||
if _, ok := versions[key]; !ok {
|
||||
fmt.Printf("Missing version vector for file %q, version hash %x\n", fi.Name, fi.VersionHash)
|
||||
success = false
|
||||
} else {
|
||||
usedVersions[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := globals[globalKey{fk.folder, fk.name}]
|
||||
if !ok {
|
||||
fmt.Printf("Missing global for file %q\n", fi.Name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
@@ -190,10 +219,10 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
fmt.Printf("Unknown folder ID %d for VersionList %q\n", gk.folder, gk.name)
|
||||
success = false
|
||||
}
|
||||
for i, fv := range vl.Versions {
|
||||
dev, ok := deviceToIDs[string(fv.Device)]
|
||||
checkGlobal := func(i int, device []byte, version protocol.Vector, invalid, deleted bool) {
|
||||
dev, ok := deviceToIDs[string(device)]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, fv.Device)
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, device)
|
||||
success = false
|
||||
}
|
||||
fi, ok := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
@@ -201,14 +230,31 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d refers to unknown FileInfo\n", gk.name, folder, i)
|
||||
success = false
|
||||
}
|
||||
if !fi.Version.Equal(fv.Version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Version, fi.Version)
|
||||
|
||||
fiv := fi.Version
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !fiv.Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != fv.Invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Invalid, fi.IsInvalid())
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.RawVersions {
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
// If we need this file we should have a need entry for it. False
|
||||
@@ -217,7 +263,9 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
dev := deviceToIDs[string(vl.Versions[0].Device)]
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
@@ -277,20 +325,18 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
if d := len(blocklists) - len(usedBlocklists); d > 0 {
|
||||
fmt.Printf("%d block list entries out of %d needs GC\n", d, len(blocklists))
|
||||
}
|
||||
if d := len(versions) - len(usedVersions); d > 0 {
|
||||
fmt.Printf("%d version entries out of %d needs GC\n", d, len(versions))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
var lv *protocol.Vector
|
||||
for _, fv := range vl.Versions {
|
||||
if bytes.Equal(fv.Device, protocol.LocalDeviceID[:]) {
|
||||
lv = &fv.Version
|
||||
break
|
||||
}
|
||||
}
|
||||
if lv == nil {
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
if !ok {
|
||||
return true // proviosinally, it looks like we need the file
|
||||
}
|
||||
return !lv.GreaterEqual(vl.Versions[0].Version)
|
||||
gfv, _ := vl.GetGlobal() // Can't not have a global if we got something above
|
||||
return !fv.Version.GreaterEqual(gfv.Version)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,13 @@ func main() {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
ldb, err := backend.OpenLevelDBRO(path)
|
||||
var ldb backend.Backend
|
||||
var err error
|
||||
if looksLikeBadger(path) {
|
||||
ldb, err = backend.OpenBadger(path)
|
||||
} else {
|
||||
ldb, err = backend.OpenLevelDBRO(path)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -50,3 +56,8 @@ func main() {
|
||||
fmt.Println("Unknown mode")
|
||||
}
|
||||
}
|
||||
|
||||
func looksLikeBadger(path string) bool {
|
||||
_, err := os.Stat(filepath.Join(path, "KEYREGISTRY"))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,9 @@
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
Currently {{ relays.length }} relays online ({{ totals.goMaxProcs }} cores in total).
|
||||
The relays listed on this page are not managed or vetted by the Syncthing project.
|
||||
Each relay is the responsibility of the relay operator.
|
||||
Currently {{ relays.length }} relays online.
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
|
||||
@@ -268,17 +268,13 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
path = "index.html"
|
||||
}
|
||||
|
||||
content, ok := auto.Assets()[path]
|
||||
as, ok := auto.Assets()[path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
assets.Serve(w, r, assets.Asset{
|
||||
ContentGz: content,
|
||||
Filename: path,
|
||||
Modified: time.Unix(auto.Generated, 0).UTC(),
|
||||
})
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -149,7 +149,15 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
protocol.WriteMessage(conn, protocol.ResponseSuccess)
|
||||
|
||||
case protocol.ConnectRequest:
|
||||
requestedPeer := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
requestedPeer, err := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(id, "is looking for an invalid peer ID")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
outboxesMut.RLock()
|
||||
peerOutbox, ok := outboxes[requestedPeer]
|
||||
outboxesMut.RUnlock()
|
||||
|
||||
@@ -527,7 +527,10 @@ func (e errNoUpgrade) Error() string {
|
||||
}
|
||||
|
||||
func checkUpgrade() (upgrade.Release, error) {
|
||||
cfg, _ := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
if err != nil {
|
||||
return upgrade.Release{}, err
|
||||
}
|
||||
opts := cfg.Options()
|
||||
release, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
|
||||
if err != nil {
|
||||
|
||||
@@ -7,6 +7,7 @@ After=network.target
|
||||
User=%i
|
||||
ExecStart=/usr/bin/syncthing -no-browser -no-restart -logflags=0
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
SuccessExitStatus=3 4
|
||||
RestartForceExitStatus=3 4
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ Documentation=man:syncthing(1)
|
||||
[Service]
|
||||
ExecStart=/usr/bin/syncthing -no-browser -no-restart -logflags=0
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
SuccessExitStatus=3 4
|
||||
RestartForceExitStatus=3 4
|
||||
|
||||
|
||||
5
go.mod
5
go.mod
@@ -10,6 +10,7 @@ require (
|
||||
github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67 // indirect
|
||||
github.com/chmduquesne/rollinghash v0.0.0-20180912150627-a60f8e7142b5
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/dgraph-io/badger/v2 v2.0.3
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||
github.com/getsentry/raven-go v0.2.0
|
||||
github.com/go-ldap/ldap/v3 v3.1.10
|
||||
@@ -17,14 +18,13 @@ require (
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
|
||||
github.com/golang/protobuf v1.4.0 // indirect
|
||||
github.com/greatroar/blobloom v0.2.1
|
||||
github.com/jackpal/gateway v1.0.6
|
||||
github.com/jackpal/go-nat-pmp v1.0.2
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/kr/pretty v0.2.0 // indirect
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/lucas-clemente/quic-go v0.15.7
|
||||
github.com/lucas-clemente/quic-go v0.16.0
|
||||
github.com/maruel/panicparse v1.3.0
|
||||
github.com/mattn/go-isatty v0.0.11
|
||||
github.com/minio/sha256-simd v0.1.1
|
||||
@@ -45,7 +45,6 @@ require (
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
||||
44
go.sum
44
go.sum
@@ -12,6 +12,9 @@ github.com/AudriusButkevicius/pfilter v0.0.0-20190627213056-c55ef6137fc6/go.mod
|
||||
github.com/AudriusButkevicius/recli v0.0.5 h1:xUa55PvWTHBm17T6RvjElRO3y5tALpdceH86vhzQ5wg=
|
||||
github.com/AudriusButkevicius/recli v0.0.5/go.mod h1:Q2E26yc6RvWWEz/TJ/goUp6yXvipYdJI096hpoaqsNs=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||
@@ -25,6 +28,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
@@ -42,6 +46,8 @@ github.com/ccding/go-stun v0.0.0-20180726100737-be486d185f3d h1:As4937T5NVbJ/DmZ
|
||||
github.com/ccding/go-stun v0.0.0-20180726100737-be486d185f3d/go.mod h1:3FK1bMar37f7jqVY7q/63k3OMX1c47pGCufzt3X0sYE=
|
||||
github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67 h1:8k9FLYBLKT+9v2HQJ/a95ZemmTx+/ltJcAiRhVushG8=
|
||||
github.com/certifi/gocertifi v0.0.0-20190905060710-a5e0173ced67/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
@@ -49,7 +55,12 @@ github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wX
|
||||
github.com/chmduquesne/rollinghash v0.0.0-20180912150627-a60f8e7142b5 h1:Wg96Dh0MLTanEaPO0OkGtUIaa2jOnShAIOVUIzRHUxo=
|
||||
github.com/chmduquesne/rollinghash v0.0.0-20180912150627-a60f8e7142b5/go.mod h1:Uc2I36RRfTAf7Dge82bi3RU0OQUmXT9iweIcPqvr8A0=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U=
|
||||
@@ -57,6 +68,14 @@ github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkE
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI=
|
||||
github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU=
|
||||
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI=
|
||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
|
||||
@@ -132,8 +151,10 @@ github.com/greatroar/blobloom v0.2.1 h1:Ie7+kTQFhcvfFHhzOYSJA2XE5sp8c9iB5iapu2tI
|
||||
github.com/greatroar/blobloom v0.2.1/go.mod h1:we9vO6GNYMmsNvCWINtZnQbcGEHUT6hGBAznNHd6RlE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jackpal/gateway v1.0.6 h1:/MJORKvJEwNVldtGVJC2p2cwCnsSoLn3hl3zxmZT7tk=
|
||||
github.com/jackpal/gateway v1.0.6/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
@@ -162,7 +183,10 @@ github.com/lucas-clemente/quic-go v0.15.6 h1:2l7g4fjjjwqmM3NirLiBtszMaR7aDTSsiiX
|
||||
github.com/lucas-clemente/quic-go v0.15.6/go.mod h1:Myi1OyS0FOjL3not4BxT7KN29bRkcMUV5JVVFLKtDp8=
|
||||
github.com/lucas-clemente/quic-go v0.15.7 h1:Pu7To5/G9JoP1mwlrcIvfV8ByPBlCzif3MCl8+1W83I=
|
||||
github.com/lucas-clemente/quic-go v0.15.7/go.mod h1:Myi1OyS0FOjL3not4BxT7KN29bRkcMUV5JVVFLKtDp8=
|
||||
github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI=
|
||||
github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI=
|
||||
github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ=
|
||||
@@ -183,6 +207,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -204,6 +230,7 @@ github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9Pvr
|
||||
github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng=
|
||||
github.com/oschwald/maxminddb-golang v1.6.0 h1:KAJSjdHQ8Kv45nFIbtoLGrGWqHFajOIm7skTyz/+Dls=
|
||||
github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -276,6 +303,14 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
@@ -293,6 +328,7 @@ github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJ
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/thejerf/suture v3.0.2+incompatible h1:GtMydYcnK4zBJ0KL6Lx9vLzl6Oozb65wh252FTBxrvM=
|
||||
github.com/thejerf/suture v3.0.2+incompatible/go.mod h1:ibKwrVj+Uzf3XZdAiNWUouPaAbSoemxOHLmJmwheEMc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||
@@ -301,11 +337,13 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 h1:okhMind4q9H1OxF44gNegWkiP4H/gsTFLalHFa4OOUI=
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0/go.mod h1:TTbGUfE+cXXceWtbTHq6lqcTvYPBKLNejBEbnUsQJtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -330,6 +368,7 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -349,6 +388,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||
@@ -356,6 +396,7 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
@@ -406,11 +447,14 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
|
||||
@@ -31,12 +31,12 @@
|
||||
"Are you sure you want to remove device {%name%}?": "Σίγουρα επιθυμείτε να αφαιρέσετε τη συσκευή {{name}};",
|
||||
"Are you sure you want to remove folder {%label%}?": "Σίγουρα επιθυμείτε να αφαιρέσετε τον φάκελο {{label}};",
|
||||
"Are you sure you want to restore {%count%} files?": "Σίγουρα επιθυμείτε να επαναφέρετε {{count}} αρχεία;",
|
||||
"Are you sure you want to upgrade?": "Are you sure you want to upgrade?",
|
||||
"Are you sure you want to upgrade?": "Σίγουρα επιθυμείτε να αναβαθμίσετε;",
|
||||
"Auto Accept": "Αυτόματη αποδοχή",
|
||||
"Automatic Crash Reporting": "Automatic Crash Reporting",
|
||||
"Automatic Crash Reporting": "Αυτόματη αναφορά σφαλμάτων",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "Για τις αυτόματες αναβαθμίσεις μπορείτε πλέον να επιλέξετε μεταξύ σταθερών εκδόσεων και υποψήφιων εκδόσεων.",
|
||||
"Automatic upgrades": "Αυτόματη αναβάθμιση",
|
||||
"Automatic upgrades are always enabled for candidate releases.": "Automatic upgrades are always enabled for candidate releases.",
|
||||
"Automatic upgrades are always enabled for candidate releases.": "Η αυτόματη αναβάθμιση είναι πάντα ενεργοποιημένη στις υποψήφιες εκδόσεις.",
|
||||
"Automatically create or share folders that this device advertises at the default path.": "Αυτόματη δημιουργία ή κοινή χρήση φακέλων τους οποίους ανακοινώνει αυτή η συσκευή στην προκαθορισμένη διαδρομή.",
|
||||
"Available debug logging facilities:": "Διαθέσιμες επιλογές μηνυμάτων αποσφαλμάτωσης:",
|
||||
"Be careful!": "Με προσοχή!",
|
||||
@@ -50,7 +50,7 @@
|
||||
"Comment, when used at the start of a line": "Σχόλιο, όταν χρησιμοποιείται στην αρχή μιας γραμμής",
|
||||
"Compression": "Συμπίεση",
|
||||
"Configured": "Βάσει ρύθμισης",
|
||||
"Connected (Unused)": "Connected (Unused)",
|
||||
"Connected (Unused)": "Συνδεδεμένη (εκτός χρήσης)",
|
||||
"Connection Error": "Σφάλμα σύνδεσης",
|
||||
"Connection Type": "Τύπος Σύνδεσης",
|
||||
"Connections": "Συνδέσεις",
|
||||
@@ -59,15 +59,15 @@
|
||||
"Copied from original": "Έχει αντιγραφεί από το πρωτότυπο",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 για τους παρακάτω συνεισφέροντες:",
|
||||
"Copyright © 2014-2017 the following Contributors:": "Copyright © 2014-2017 για τους παρακάτω συνεισφέροντες:",
|
||||
"Copyright © 2014-2019 the following Contributors:": "Copyright © 2014-2019 the following Contributors:",
|
||||
"Copyright © 2014-2019 the following Contributors:": "Copyright © 2014-2019 για τους παρακάτω συνεισφέροντες:",
|
||||
"Creating ignore patterns, overwriting an existing file at {%path%}.": "Δημιουργία προτύπων αγνόησης, αντικατάσταση του υπάρχοντος αρχείου στο {{path}}.",
|
||||
"Currently Shared With Devices": "Currently Shared With Devices",
|
||||
"Currently Shared With Devices": "Διαμοιράζεται με αυτές τις συσκευές",
|
||||
"Danger!": "Προσοχή!",
|
||||
"Debugging Facilities": "Εργαλεία αποσφαλμάτωσης",
|
||||
"Default Folder Path": "Προκαθορισμένη διαδρομή φακέλων",
|
||||
"Deleted": "Διαγραμμένα",
|
||||
"Deselect All": "Deselect All",
|
||||
"Deselect devices to stop sharing this folder with.": "Deselect devices to stop sharing this folder with.",
|
||||
"Deselect All": "Αποεπιλογή όλων",
|
||||
"Deselect devices to stop sharing this folder with.": "Αποεπιλέξτε συσκευές για να σταματήσει ο διαμοιρασμός του φακέλου με αυτές.",
|
||||
"Device": "Συσκευή",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Η συσκευή \"{{name}}\" ({{device}} στη διεύθυνση {{address}}) επιθυμεί να συνδεθεί. Προσθήκη της νέας συσκευής;",
|
||||
"Device ID": "Ταυτότητα συσκευής",
|
||||
@@ -76,14 +76,14 @@
|
||||
"Device rate limits": "Όρια ταχύτητας συσκευών",
|
||||
"Device that last modified the item": "Συσκευή από την οποία πραγματοποιήθηκε η τελευταία τροποποίηση του στοιχείου",
|
||||
"Devices": "Συσκευές",
|
||||
"Disable Crash Reporting": "Disable Crash Reporting",
|
||||
"Disable Crash Reporting": "Απενεργοποίηση αναφοράς σφαλμάτων",
|
||||
"Disabled": "Απενεργοποιημένη",
|
||||
"Disabled periodic scanning and disabled watching for changes": "Έχουν απενεργοποιηθεί η τακτική σάρωση και η επιτήρηση αλλαγών",
|
||||
"Disabled periodic scanning and enabled watching for changes": "Έχει απενεργοποιηθεί η τακτική σάρωση και ενεργοποιηθεί η επιτήρηση αλλαγών",
|
||||
"Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:": "Έχει απενεργοποιηθεί η τακτική σάρωση και απέτυχε η ενεργοποίηση επιτήρησης αλλαγών. Γίνεται νέα προσπάθεια κάθε 1m:",
|
||||
"Discard": "Discard",
|
||||
"Discard": "Απόρριψη",
|
||||
"Disconnected": "Αποσυνδεδεμένη",
|
||||
"Disconnected (Unused)": "Disconnected (Unused)",
|
||||
"Disconnected (Unused)": "Αποσυνδεδεμένη (εκτός χρήσης)",
|
||||
"Discovered": "Βάσει ανεύρεσης",
|
||||
"Discovery": "Ανεύρεση συσκευών",
|
||||
"Discovery Failures": "Αποτυχίες ανεύρεσης συσκευών",
|
||||
@@ -99,16 +99,16 @@
|
||||
"Edit Folder": "Επεξεργασία φακέλου",
|
||||
"Editing": "Επεξεργασία σε εξέλιξη",
|
||||
"Editing {%path%}.": "Επεξεργασία του {{path}}.",
|
||||
"Enable Crash Reporting": "Enable Crash Reporting",
|
||||
"Enable Crash Reporting": "Ενεργοποίηση αναφοράς σφαλμάτων",
|
||||
"Enable NAT traversal": "Ενεργοποίηση διάσχισης NAT",
|
||||
"Enable Relaying": "Ενεργοποίηση αναμετάδοσης",
|
||||
"Enabled": "Ενεργοποιημένη",
|
||||
"Enter a non-negative number (e.g., \"2.35\") and select a unit. Percentages are as part of the total disk size.": "Εισάγετε έναν μη αρνητικό αριθμό (π.χ. «2.35») και επιλέξτε μια μονάδα μέτρησης. Τα ποσοστά ισχύουν ως προς το συνολικό μέγεθος του δίσκου.",
|
||||
"Enter a non-privileged port number (1024 - 65535).": "Εισάγετε τον αριθμό μιας μη δεσμευμένης θύρας (1024 - 65535).",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Εισάγετε τις διευθύνσεις χωρισμένες με κόμμα (\"tcp://ip:port\", \"tcp://host:port\") ή γράψτε \"dynamic\" για την αυτόματη ανεύρεση τους.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Εισάγετε τις διευθύνσεις χωρισμένες με κόμμα (\"tcp://ip:θύρα\", \"tcp://όνομα:θύρα\") ή γράψτε \"dynamic\" για την αυτόματη ανεύρεση τους.",
|
||||
"Enter ignore patterns, one per line.": "Δώσε τα πρότυπα που θα αγνοηθούν, ένα σε κάθε γραμμή.",
|
||||
"Enter up to three octal digits.": "Enter up to three octal digits.",
|
||||
"Enter up to three octal digits.": "Εισάγετε έως τρία οκταδικά ψηφία.",
|
||||
"Error": "Σφάλμα",
|
||||
"External File Versioning": "Εξωτερική τήρηση εκδόσεων",
|
||||
"Failed Items": "Αρχεία που απέτυχαν",
|
||||
@@ -150,13 +150,13 @@
|
||||
"Global State": "Καθολική κατάσταση",
|
||||
"Help": "Βοήθεια",
|
||||
"Home page": "Αρχική σελίδα",
|
||||
"However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.": "However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.",
|
||||
"However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.": "Ωστόσο, σύμφωνα με τις τρέχουσες ρυθμίσεις σας, μάλλον δεν επιθυμείτε αυτή τη λειτουργία. Οι αναφορές σφαλμάτων απενεργοποιήθηκαν.",
|
||||
"Ignore": "Αγνόησε",
|
||||
"Ignore Patterns": "Πρότυπο για αγνόηση",
|
||||
"Ignore Permissions": "Αγνόησε τα δικαιώματα",
|
||||
"Ignored Devices": "Ignored Devices",
|
||||
"Ignored Folders": "Ignored Folders",
|
||||
"Ignored at": "Ignored at",
|
||||
"Ignored Devices": "Αγνοηθείσες συσκευές",
|
||||
"Ignored Folders": "Αγνοηθέντες φάκελοι",
|
||||
"Ignored at": "Αγνοήθηκε στην",
|
||||
"Incoming Rate Limit (KiB/s)": "Περιορισμός ταχύτητας λήψης (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Με μια εσφαλμένη ρύθμιση μπορεί να προκληθεί ζημιά στα περιεχόμενα των φακέλων και το Syncthing να σταματήσει να λειτουργεί.",
|
||||
"Introduced By": "Προτάθηκε από",
|
||||
@@ -171,19 +171,19 @@
|
||||
"Later": "Αργότερα",
|
||||
"Latest Change": "Τελευταία αλλαγή",
|
||||
"Learn more": "Μάθετε περισσότερα",
|
||||
"Limit": "Limit",
|
||||
"Limit": "Όριο",
|
||||
"Listeners": "Ακροατές",
|
||||
"Loading data...": "Φόρτωση δεδομένων...",
|
||||
"Loading...": "Φόρτωση...",
|
||||
"Local Additions": "Local Additions",
|
||||
"Local Additions": "Τοπικές προσθήκες",
|
||||
"Local Discovery": "Τοπική ανεύρεση",
|
||||
"Local State": "Τοπική κατάσταση",
|
||||
"Local State (Total)": "Τοπική κατάσταση (συνολικά)",
|
||||
"Locally Changed Items": "Locally Changed Items",
|
||||
"Locally Changed Items": "Τοπικές αλλαγές",
|
||||
"Log": "Αρχείο καταγραφής",
|
||||
"Log tailing paused. Click here to continue.": "Η αυτόματη κύλιση έχει διακοπεί. Πατήστε εδώ για να συνεχιστεί.",
|
||||
"Log tailing paused. Scroll to bottom continue.": "Η αυτόματη ακολούθηση του αρχείου καταγραφής είναι σε παύση. Κυλίστε στο τέλος της οθόνης για να συνεχίσετε.",
|
||||
"Log tailing paused. Scroll to the bottom to continue.": "Log tailing paused. Scroll to the bottom to continue.",
|
||||
"Log tailing paused. Scroll to the bottom to continue.": "Η αυτόματη παρακολούθηση του αρχείου καταγραφής είναι σε παύση. Κυλίστε στο τέλος της οθόνης για να συνεχίσετε.",
|
||||
"Logs": "Αρχεία καταγραφής",
|
||||
"Major Upgrade": "Σημαντική αναβάθμιση",
|
||||
"Mass actions": "Μαζικές ενέργειες",
|
||||
@@ -222,8 +222,8 @@
|
||||
"Pause": "Παύση",
|
||||
"Pause All": "Παύση όλων",
|
||||
"Paused": "Σε παύση",
|
||||
"Paused (Unused)": "Paused (Unused)",
|
||||
"Pending changes": "Pending changes",
|
||||
"Paused (Unused)": "Σε παύση (εκτός χρήσης)",
|
||||
"Pending changes": "Εκκρεμείς αλλαγές",
|
||||
"Periodic scanning at given interval and disabled watching for changes": "Τακτική σάρωση ανά καθορισμένο διάστημα και απενεργοποίηση επιτήρησης αλλαγών",
|
||||
"Periodic scanning at given interval and enabled watching for changes": "Τακτική σάρωση ανά καθορισμένο διάστημα και ενεργοποίηση επιτήρησης αλλαγών",
|
||||
"Periodic scanning at given interval and failed setting up watching for changes, retrying every 1m:": "Τακτική σάρωση ανά καθορισμένο διάστημα και αποτυχία ενεργοποίησης επιτήρησης αλλαγών. Γίνεται νέα προσπάθεια κάθε 1m:",
|
||||
@@ -233,7 +233,7 @@
|
||||
"Please wait": "Παρακαλώ περιμένετε",
|
||||
"Prefix indicating that the file can be deleted if preventing directory removal": "Πρόθεμα που δείχνει ότι το αρχείο θα μπορεί να διαγραφεί αν εμποδίζει τη διαγραφή καταλόγου",
|
||||
"Prefix indicating that the pattern should be matched without case sensitivity": "Πρόθεμα που δείχνει ότι η αντιστοίχιση προτύπου θα γίνεται χωρίς διάκριση πεζών και κεφαλαίων χαρακτήρων",
|
||||
"Preparing to Sync": "Preparing to Sync",
|
||||
"Preparing to Sync": "Προετοιμασία συγχρονισμού",
|
||||
"Preview": "Προεπισκόπηση",
|
||||
"Preview Usage Report": "Προεπισκόπηση αναφοράς χρήσης",
|
||||
"Quick guide to supported patterns": "Σύντομη βοήθεια σχετικά με τα πρότυπα αναζήτησης που υποστηρίζονται",
|
||||
@@ -268,9 +268,9 @@
|
||||
"Scanning": "Έλεγχος για αλλαγές",
|
||||
"See external versioner help for supported templated command line parameters.": "Ανατρέξτε στην τεκμηρίωση της εξωτερικής τήρησης εκδόσεων για πληροφορίες σχετικά με τις υποστηριζόμενες παραμέτρους της γραμμής εντολών.",
|
||||
"See external versioning help for supported templated command line parameters.": "Ανατρέξτε στην τεκμηρίωση της εξωτερικής τήρησης εκδόσεων για πληροφορίες σχετικά με τις υποστηριζόμενες παραμέτρους της γραμμής εντολών.",
|
||||
"Select All": "Select All",
|
||||
"Select All": "Επιλογή όλων",
|
||||
"Select a version": "Επιλογή έκδοσης",
|
||||
"Select additional devices to share this folder with.": "Select additional devices to share this folder with.",
|
||||
"Select additional devices to share this folder with.": "Επιλέξτε επιπλέον συσκευές με τις οποίες θα διαμοιράζεται αυτός ο φάκελος.",
|
||||
"Select latest version": "Επιλογή τελευταίας έκδοσης",
|
||||
"Select oldest version": "Επιλογή παλαιότερης έκδοσης",
|
||||
"Select the devices to share this folder with.": "Διάλεξε τις συσκευές προς τις οποίες θα διαμοιράζεται αυτός ο φάκελος.",
|
||||
@@ -306,19 +306,19 @@
|
||||
"Statistics": "Στατιστικά",
|
||||
"Stopped": "Απενεργοποιημένο",
|
||||
"Support": "Υποστήριξη",
|
||||
"Support Bundle": "Support Bundle",
|
||||
"Support Bundle": "Πακέτο υποστήριξης",
|
||||
"Sync Protocol Listen Addresses": "Διευθύνσεις για το πρωτόκολλο συγχρονισμού",
|
||||
"Syncing": "Συγχρονίζω",
|
||||
"Syncthing has been shut down.": "Το Syncthing έχει απενεργοποιηθεί.",
|
||||
"Syncthing includes the following software or portions thereof:": "Το Syncthing περιλαμβάνει τα παρακάτω λογισμικά ή μέρη αυτών:",
|
||||
"Syncthing is Free and Open Source Software licensed as MPL v2.0.": "Syncthing is Free and Open Source Software licensed as MPL v2.0.",
|
||||
"Syncthing is Free and Open Source Software licensed as MPL v2.0.": "Το Syncthing είναι ελεύθερο λογισμικό και ανοικτού κώδικα, με άδεια MPL v2.0.",
|
||||
"Syncthing is restarting.": "Το Syncthing επανεκκινείται.",
|
||||
"Syncthing is upgrading.": "Το Syncthing αναβαθμίζεται.",
|
||||
"Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.",
|
||||
"Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "Το Syncthing επιτρέπει την αυτόματη υποβολή αναφορών σφαλμάτων στους προγραμματιστές. Η προεπιλεγμένη ρύθμιση είναι να αποστέλλονται οι αναφορές.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Το Syncthing φαίνεται πως είναι απενεργοποιημένο ή υπάρχει πρόβλημα στη σύνδεσή σου στο διαδίκτυο. Προσπαθώ πάλι…",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Το Syncthing φαίνεται να αντιμετωπίζει ένα πρόβλημα με την επεξεργασία του αιτήματός σου. Παρακαλούμε, αν το πρόβλημα συνεχίζει, ανανέωσε την σελίδα ή επανεκκίνησε το Syncthing.",
|
||||
"Take me back": "Take me back",
|
||||
"The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.": "The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.",
|
||||
"Take me back": "Επιστροφή",
|
||||
"The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.": "Η διεύθυνση του GUI έχει τροποποιηθεί μέσω παραμέτρων εκκίνησης. Οι αλλαγές εδώ δεν θα ισχύσουν όσο είναι ενεργές αυτές οι παράμετροι.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "Η διεπαφή διαχείρισης του Syncthing είναι ρυθμισμένη να επιτρέπει την πρόσβαση χωρίς κωδικό.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Τα στατιστικά που έχουν συλλεγεί είναι δημόσια διαθέσιμα στη παρακάτω διεύθυνση.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Οι ρυθμίσεις έχουν αποθηκευτεί αλλά δεν έχουν ενεργοποιηθεί. Πρέπει να επανεκκινήσεις το Syncthing για να ισχύσουν οι νέες ρυθμίσεις.",
|
||||
@@ -332,7 +332,7 @@
|
||||
"The folder path cannot be blank.": "Το μονοπάτι του φακέλου δεν μπορεί να είναι κενό.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Θα χρησιμοποιούνται τα εξής διαστήματα: Την πρώτη ώρα θα τηρείται μια έκδοση κάθε 30 δευτερόλεπτα. Την πρώτη ημέρα, μια έκδοση κάθε μια ώρα. Τις πρώτες 30 ημέρες, μία έκδοση κάθε ημέρα. Από εκεί και έπειτα μέχρι τη μέγιστη ηλικία, θα τηρείται μια έκδοση κάθε εβδομάδα.",
|
||||
"The following items could not be synchronized.": "Δεν ήταν δυνατόν να συγχρονιστούν τα παρακάτω αρχεία.",
|
||||
"The following items were changed locally.": "The following items were changed locally.",
|
||||
"The following items were changed locally.": "Τα παρακάτω στοιχεία τροποποιήθηκαν τοπικά.",
|
||||
"The maximum age must be a number and cannot be blank.": "Η μέγιστη ηλικία πρέπει να είναι αριθμός και σίγουρα όχι κενό.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Η μέγιστη ηλικία παλιότερων εκδόσεων (σε ημέρες, αν δώσεις 0 οι παλιότερες εκδόσεις θα διατηρούνται για πάντα).",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "Το ποσοστό του ελάχιστου διαθέσιμου αποθηκευτικόυ χώρου πρέπει να είναι έναν μη-αρνητικός αριθμός μεταξύ του 0 και του 100 (συμπεριλαμβανομένων)",
|
||||
@@ -343,7 +343,7 @@
|
||||
"The path cannot be blank.": "Το μονοπάτι δεν μπορεί να είναι κενό.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Το όριο ταχύτητας πρέπει να είναι ένας μη-αρνητικός αριθμός (0: χωρίς όριο)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Ο χρόνος επανελέγχου για αλλαγές είναι σε δευτερόλεπτα (δηλ. θετικός αριθμός).",
|
||||
"There are no devices to share this folder with.": "There are no devices to share this folder with.",
|
||||
"There are no devices to share this folder with.": "Δεν υπάρχουν συσκευές με τις οποίες διαμοιράζεται αυτός ο φάκελος.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "Όταν επιλυθεί το σφάλμα θα κατεβούν και θα συχρονιστούν αυτόματα.",
|
||||
"This Device": "Αυτή η συσκευή",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Αυτό μπορεί εύκολα να δώσει πρόσβαση ανάγνωσης και επεξεργασίας αρχείων του υπολογιστή σας σε χάκερς.",
|
||||
@@ -353,14 +353,14 @@
|
||||
"Time the item was last modified": "Ώρα τελευταίας τροποποίησης του στοιχείου",
|
||||
"Trash Can File Versioning": "Τήρηση εκδόσεων κάδου ανακύκλωσης",
|
||||
"Type": "Τύπος",
|
||||
"UNIX Permissions": "UNIX Permissions",
|
||||
"UNIX Permissions": "Άδειες αρχείων UNIX",
|
||||
"Unavailable": "Μη διαθέσιμο",
|
||||
"Unavailable/Disabled by administrator or maintainer": "Μη διαθέσιμο/απενεργοποιημένο από τον διαχειριστή ή υπεύθυνο διανομής",
|
||||
"Undecided (will prompt)": "Μη καθορισμένη (θα γίνει ερώτηση)",
|
||||
"Unignore": "Unignore",
|
||||
"Unignore": "Αναίρεση αγνόησης",
|
||||
"Unknown": "Άγνωστο",
|
||||
"Unshared": "Δε μοιράζεται",
|
||||
"Unshared Devices": "Unshared Devices",
|
||||
"Unshared Devices": "Συσκευές χωρίς διαμοιρασμό",
|
||||
"Unused": "Δε χρησιμοποιείται",
|
||||
"Up to Date": "Ενημερωμένη",
|
||||
"Updated": "Ενημερωμένο",
|
||||
@@ -371,16 +371,16 @@
|
||||
"Uptime": "Χρόνος απρόσκοπτης λειτουργίας",
|
||||
"Usage reporting is always enabled for candidate releases.": "Η αποστολή αναφορών χρήσης είναι πάντα ενεργοποιημένη στις υποψήφιες εκδόσεις.",
|
||||
"Use HTTPS for GUI": "Χρήση HTTPS για τη διεπαφή",
|
||||
"Use notifications from the filesystem to detect changed items.": "Use notifications from the filesystem to detect changed items.",
|
||||
"Variable Size Blocks": "Variable Size Blocks",
|
||||
"Variable size blocks (also \"large blocks\") are more efficient for large files.": "Variable size blocks (also \"large blocks\") are more efficient for large files.",
|
||||
"Use notifications from the filesystem to detect changed items.": "Χρήση ειδοποιήσεων από το σύστημα αρχείων για την ανίχνευση αλλαγών.",
|
||||
"Variable Size Blocks": "Τμήματα μεταβλητού μεγέθους",
|
||||
"Variable size blocks (also \"large blocks\") are more efficient for large files.": "Τα τμήματα μεταβλητού μεγέθους (ή «μεγάλα τμήματα») αποδίδουν καλύτερα στα μεγάλα αρχεία.",
|
||||
"Version": "Έκδοση",
|
||||
"Versions": "Εκδόσεις",
|
||||
"Versions Path": "Φάκελος τήρησης εκδόσεων",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Οι παλιές εκδόσεις θα σβήνονται αυτόματα όταν ξεπεράσουν τη μέγιστη ηλικία ή όταν ξεπεραστεί ο μέγιστος αριθμός αρχείων ανά περίοδο.",
|
||||
"Waiting to Scan": "Waiting to Scan",
|
||||
"Waiting to Sync": "Waiting to Sync",
|
||||
"Waiting to scan": "Waiting to scan",
|
||||
"Waiting to Scan": "Αναμονή σάρωσης",
|
||||
"Waiting to Sync": "Αναμονή συγχρονισμού",
|
||||
"Waiting to scan": "Αναμονή σάρωσης",
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolder%}\".": "Προσοχή, αυτό το μονοπάτι είναι γονικός φάκελος ενός υπάρχοντος φακέλου \"{{otherFolder}}\".",
|
||||
"Warning, this path is a parent directory of an existing folder \"{%otherFolderLabel%}\" ({%otherFolder%}).": "Προσοχή, αυτό το μονοπάτι είναι γονικός φάκελος ενός υπάρχοντος φακέλου \"{{otherFolderLabel}}\" ({{otherFolder}}).",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Προσοχή, αυτό το μονοπάτι είναι υποφάκελος του υπάρχοντος φακέλου \"{{otherFolder}}\".",
|
||||
@@ -388,16 +388,16 @@
|
||||
"Warning: If you are using an external watcher like {%syncthingInotify%}, you should make sure it is deactivated.": "Προσοχή: αν χρησιμοποιείτε ένα εξωτερικό εργαλείο επιτήρησης, όπως το {{syncthingInotify}}, σιγουρευτείτε ότι έχει απενεργοποιηθεί.",
|
||||
"Watch for Changes": "Επιτήρηση αλλαγών",
|
||||
"Watching for Changes": "Εκτελείται η επιτήρηση αλλαγών",
|
||||
"Watching for changes discovers most changes without periodic scanning.": "Watching for changes discovers most changes without periodic scanning.",
|
||||
"Watching for changes discovers most changes without periodic scanning.": "Με την επιτήρηση αλλαγών ανιχνεύονται οι περισσότερες αλλαγές χωρίς τακτικές σαρώσεις.",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Θυμήσου πως όταν προσθέτεις μια νέα συσκευή, ετούτη η συσκευή θα πρέπει να προστεθεί και στην άλλη πλευρά.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Όταν προσθέτεις έναν νέο φάκελο, θυμήσου πως η ταυτότητα ενός φακέλου χρησιμοποιείται για να να συσχετίσει φακέλους μεταξύ συσκευών. Η ταυτότητα του φακέλου θα πρέπει να είναι η ίδια σε όλες τις συσκευές και έχουν σημασία τα πεζά ή κεφαλαία γράμματα.",
|
||||
"Yes": "Ναι",
|
||||
"You can also select one of these nearby devices:": "Μπορείτε επίσης να επιλέξετε μια από αυτές τις γειτονικές συσκευές:",
|
||||
"You can change your choice at any time in the Settings dialog.": "Μπορείτε να αλλάξετε τη ρύθμιση αυτή ανά πάσα στιγμή στο παράθυρο «Ρυθμίσεις».",
|
||||
"You can read more about the two release channels at the link below.": "Μπορείτε να διαβάσετε περισσότερα για τα δύο κανάλια εκδόσεων στον παρακάτω σύνδεσμο.",
|
||||
"You have no ignored devices.": "You have no ignored devices.",
|
||||
"You have no ignored folders.": "You have no ignored folders.",
|
||||
"You have unsaved changes. Do you really want to discard them?": "You have unsaved changes. Do you really want to discard them?",
|
||||
"You have no ignored devices.": "Δεν έχετε αγνοηθείσες συσκευές.",
|
||||
"You have no ignored folders.": "Δεν έχετε αγνοηθέντες φακέλους.",
|
||||
"You have unsaved changes. Do you really want to discard them?": "Έχετε μη αποθηκευμένες αλλαγές. Σίγουρα επιθυμείτε να τις απορρίψετε;",
|
||||
"You must keep at least one version.": "Πρέπει να τηρήσεις τουλάχιστον μια έκδοση.",
|
||||
"days": "Μέρες",
|
||||
"directories": "κατάλογοι",
|
||||
|
||||
@@ -81,6 +81,7 @@
|
||||
"Disabled periodic scanning and disabled watching for changes": "Disabled periodic scanning and disabled watching for changes",
|
||||
"Disabled periodic scanning and enabled watching for changes": "Disabled periodic scanning and enabled watching for changes",
|
||||
"Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:": "Disabled periodic scanning and failed setting up watching for changes, retrying every 1m:",
|
||||
"Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).": "Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).",
|
||||
"Discard": "Discard",
|
||||
"Disconnected": "Disconnected",
|
||||
"Disconnected (Unused)": "Disconnected (Unused)",
|
||||
@@ -139,6 +140,7 @@
|
||||
"GUI": "GUI",
|
||||
"GUI Authentication Password": "GUI Authentication Password",
|
||||
"GUI Authentication User": "GUI Authentication User",
|
||||
"GUI Authentication: Set User and Password": "GUI Authentication: Set User and Password",
|
||||
"GUI Listen Address": "GUI Listen Address",
|
||||
"GUI Listen Addresses": "GUI Listen Addresses",
|
||||
"GUI Theme": "GUI Theme",
|
||||
@@ -151,6 +153,7 @@
|
||||
"Help": "Help",
|
||||
"Home page": "Home page",
|
||||
"However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.": "However, your current settings indicate you might not want it enabled. We have disabled automatic crash reporting for you.",
|
||||
"If you want to prevent other users on this computer from accessing Syncthing and through it your files, consider setting up authentication.": "If you want to prevent other users on this computer from accessing Syncthing and through it your files, consider setting up authentication.",
|
||||
"Ignore": "Ignore",
|
||||
"Ignore Patterns": "Ignore Patterns",
|
||||
"Ignore Permissions": "Ignore Permissions",
|
||||
@@ -372,6 +375,7 @@
|
||||
"Usage reporting is always enabled for candidate releases.": "Usage reporting is always enabled for candidate releases.",
|
||||
"Use HTTPS for GUI": "Use HTTPS for GUI",
|
||||
"Use notifications from the filesystem to detect changed items.": "Use notifications from the filesystem to detect changed items.",
|
||||
"Username/Password has not been set for the GUI authentication. Please consider setting it up.": "Username/Password has not been set for the GUI authentication. Please consider setting it up.",
|
||||
"Variable Size Blocks": "Variable Size Blocks",
|
||||
"Variable size blocks (also \"large blocks\") are more efficient for large files.": "Variable size blocks (also \"large blocks\") are more efficient for large files.",
|
||||
"Version": "Version",
|
||||
|
||||
@@ -119,9 +119,9 @@
|
||||
"File Versioning": "Préservation des fichiers",
|
||||
"File permission bits are ignored when looking for changes. Use on FAT file systems.": "Les bits de permission de fichier sont ignorés lors de la recherche de changements. Utilisé sur les systèmes de fichiers FAT.",
|
||||
"Files are moved to .stversions directory when replaced or deleted by Syncthing.": "Les fichiers sont déplacés dans le sous-répertoire .stversions quand ils sont remplacés ou supprimés par Syncthing. Leurs chemins d'accès relatifs y sont recréés si besoin.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Les fichiers sont déplacés dans le sous-répertoire .stversions quand ils sont remplacés ou supprimés par Syncthing. Leurs chemins d'accès relatifs y sont recréés si besoin.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Quand ils sont remplacés ou supprimés par Syncthing, les fichiers sont déplacés et horodatés vers le sous-répertoire .stversions dans une arborescence relative identique à celle de l'original.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Quand ils sont remplacés ou supprimés par Syncthing, les fichiers sont déplacés et horodatés vers le sous-répertoire .stversions dans une arborescence relative identique à celle de l'original.",
|
||||
"Files are moved to .stversions folder when replaced or deleted by Syncthing.": "Quand Syncthing supprime et/ou remplace (renomme) ou déplace des fichiers, les originaux sont déplacés dans le sous-répertoire (caché) .stversions du partage en y reproduisant leur chemin d'accès relatif d'origine.",
|
||||
"Files are moved to date stamped versions in a .stversions directory when replaced or deleted by Syncthing.": "Quand Syncthing supprime et/ou remplace (renomme) ou déplace des fichiers, les originaux sont déplacés et horodatés dans le sous-répertoire (caché) .stversions du partage en y reproduisant leur chemin d'accès relatif d'origine.",
|
||||
"Files are moved to date stamped versions in a .stversions folder when replaced or deleted by Syncthing.": "Quand Syncthing supprime et/ou remplace (renomme) ou déplace des fichiers, les originaux sont déplacés et horodatés dans le sous-répertoire (caché) .stversions du partage en y reproduisant leur chemin d'accès relatif d'origine.",
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Les fichiers sont protégés des changements réalisés sur les autres appareils, mais les changements réalisés sur celui-ci seront transférés aux autres.",
|
||||
"Files are synchronized from the cluster, but any changes made locally will not be sent to other devices.": "Les fichiers sont synchronisés à partir des autres participants, mais les modifications apportées localement ne leur seront pas envoyées.",
|
||||
"Filesystem Notifications": "Notifications du système de fichiers",
|
||||
@@ -162,7 +162,7 @@
|
||||
"Introduced By": "Introduit par",
|
||||
"Introducer": "Appareil introducteur",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inverser la condition donnée (i.e. ne pas exclure)",
|
||||
"Keep Versions": "Combien de versions conserver",
|
||||
"Keep Versions": "Nombre de versions à conserver",
|
||||
"LDAP": "LDAP",
|
||||
"Largest First": "Les plus volumineux en premier",
|
||||
"Last File Received": "Dernier changement",
|
||||
@@ -217,8 +217,8 @@
|
||||
"Path": "Chemin",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin vers le répertoire à partager dans l'appareil local. Il sera créé s'il n'existe pas. Vous pouvez entrer un chemin absolu (p.ex \"/home/moi/Sync/Exemple\") ou relatif à celui du programme (p.ex \"..\\Partages\\Exemple\" - utile pour installation portable). Le caractère tilde (~, ou ~+Espace sous Windows XP+Azerty) peut être utilisé comme raccourci vers",
|
||||
"Path where new auto accepted folders will be created, as well as the default suggested path when adding new folders via the UI. Tilde character (~) expands to {%tilde%}.": "Chemin dans lequel les partages acceptés automatiquement seront créés, ainsi que chemin suggéré lors de l'enregistrement des nouveaux partages via cette interface graphique. Le caractère tilde (~) est un raccourci pour {{tilde}}.",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Chemin où les versions doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire partagé).\nChemin relatif ou absolu (recommandé), mais dans un répertoire non synchronisé (par masque ou hors du chemin du partage).\nSur la même partition ou système de fichiers (recommandé).",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Chemin où les versions doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire partagé).\nChemin relatif ou absolu (recommandé), mais dans un répertoire non synchronisé (par masque ou hors du chemin du partage).\nSur la même partition ou système de fichiers (recommandé).",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Chemin où les versions seront conservées (laisser vide pour le chemin par défaut de .stversions (caché) dans le partage).\nChemin relatif ou absolu (recommandé), mais dans un répertoire non synchronisé (par masque ou hors du chemin du partage).\nSur la même partition ou système de fichiers (recommandé).",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Chemin où les versions seront conservées (laisser vide pour le chemin par défaut de .stversions (caché) dans le répertoire partagé).\nChemin relatif ou absolu (recommandé), mais dans un répertoire non synchronisé (par masque ou hors du chemin du partage).\nSur la même partition ou système de fichiers (recommandé).",
|
||||
"Pause": "Pause",
|
||||
"Pause All": "Tout suspendre",
|
||||
"Paused": "En pause",
|
||||
@@ -257,7 +257,7 @@
|
||||
"Restart Needed": "Redémarrage nécessaire",
|
||||
"Restarting": "Redémarrage en cours",
|
||||
"Restore": "Restaurer",
|
||||
"Restore Versions": "Restaurer par versions",
|
||||
"Restore Versions": "Restaurer des versions",
|
||||
"Resume": "Reprise",
|
||||
"Resume All": "Tout libérer",
|
||||
"Reused": "Réutilisé",
|
||||
@@ -266,8 +266,8 @@
|
||||
"Save": "Enregistrer",
|
||||
"Scan Time Remaining": "Temps d'analyse restant",
|
||||
"Scanning": "Analyse",
|
||||
"See external versioner help for supported templated command line parameters.": "Voir l'aide sur la préservation externe des fichiers pour les paramètres supportés en lignes de commande dans les modèles.",
|
||||
"See external versioning help for supported templated command line parameters.": "Consulter l'aide à la gestion externe des versions pour voir les paramètres de ligne de commande supportés.",
|
||||
"See external versioner help for supported templated command line parameters.": "Voir l'aide du système de préservation externe des fichiers pour les paramètres supportés de modèles de lignes de commande.",
|
||||
"See external versioning help for supported templated command line parameters.": "Consulter l'aide de la gestion externe des versions pour les paramètres supportés de modèles de lignes de commande.",
|
||||
"Select All": "Tout sélectionner",
|
||||
"Select a version": "Choisissez une version",
|
||||
"Select additional devices to share this folder with.": "Sélectionnez des appareils membres supplémentaires pour ce partage.",
|
||||
@@ -324,21 +324,21 @@
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuration a été enregistrée mais pas activée. Syncthing doit redémarrer afin d'activer la nouvelle configuration.",
|
||||
"The device ID cannot be blank.": "L'ID de l'appareil ne peut être vide.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "L'ID d'appareil à saisir ici se trouve dans le menu \"Actions > Afficher mon ID\" de l'appareil distant. Espaces et tirets sont optionnels (ignorés).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Le rapport d'utilisation chiffré est envoyé quotidiennement. Il sert à répertorier les plates-formes utilisées, la taille des partages et les versions de l'application. Si le jeu de données rapportées devait être changé, il vous serait demandé de valider de nouveau son envoi via ce message. Vous pouvez revenir sur votre décision via Actions/Configuration, et agir sur la fréquence d'envoi via Actions/Avancé/Options (urInitialDelayS).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Le rapport d'utilisation chiffré est envoyé quotidiennement. Il sert à répertorier les plates-formes utilisées, la taille des partages et les versions de l'application. Si le jeu de données rapportées devait être changé, il vous serait demandé de valider de nouveau son envoi via ce message. Vous pouvez revenir sur votre décision via Actions/Configuration, et agir sur la fréquence d'envoi via Actions/Avancé/Options (Ur Initial Delay (seconds)).",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID de l'appareil inséré ne semble pas valide. Il devrait ressembler à une chaîne de 52 ou 56 caractères comprenant des lettres, des chiffres et potentiellement des espaces et des traits d'union.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Le premier paramètre de ligne de commande est le chemin du répertoire partagé, et le second est le chemin relatif dans le répertoire.",
|
||||
"The folder ID cannot be blank.": "L'ID du partage ne peut être vide.",
|
||||
"The folder ID must be unique.": "L'ID du partage doit être unique.",
|
||||
"The folder path cannot be blank.": "Le chemin vers le répertoire ne peut pas être vide.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Les seuils de durée suivants définissent le nombre maximum de versions pour chaque fichier : pendant la première heure une version peut être conservée toutes les 30 secondes. Jusqu'à un jour, jusqu'à une version par heure - des versions de la première heure sont alors progressivement effacées pour n'en garder qu'une par heure. Jusqu'à 30 jours, jusqu'à une version par jour - des versions horaires du premier jour sont alors progressivement effacées pour n'en garder qu'une par jour. Au-delà, jusqu'à la limite d'âge, jusqu'à une version est conservée par semaine - des versions journalières du premier mois sont alors progressivement effacées pour n'en garder qu'une par semaine.",
|
||||
"The following intervals are used: for the first hour a version is kept every 30 seconds, for the first day a version is kept every hour, for the first 30 days a version is kept every day, until the maximum age a version is kept every week.": "Les seuils de durée suivants définissent le nombre maximum de versions pour chaque fichier : pendant la première heure une version est conservée toutes les 30 secondes. Le premier jour, une version par heure - des versions de la première heure sont alors progressivement effacées pour finir par n'en garder que la dernière. Pour les 30 jours passés, une version par jour - des versions horaires du premier jour sont alors progressivement effacées pour n'en garder qu'une par jour. Au-delà et jusqu'à la limite d'âge, une version est conservée par semaine - des versions journalières du premier mois sont alors progressivement effacées pour n'en garder qu'une.",
|
||||
"The following items could not be synchronized.": "Les fichiers suivants n'ont pas pu être synchronisés.",
|
||||
"The following items were changed locally.": "Les éléments suivants ont été modifiés localement.",
|
||||
"The maximum age must be a number and cannot be blank.": "L'âge maximum doit être un nombre et ne peut être vide.",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Durée maximum de conservation d'une version (en jours, 0 pour conserver les versions indéfiniment)",
|
||||
"The maximum time to keep a version (in days, set to 0 to keep versions forever).": "Durée maximale de conservation d'une version (en jours, 0 pour conservation éternelle)",
|
||||
"The minimum free disk space percentage must be a non-negative number between 0 and 100 (inclusive).": "Le pourcentage d'espace disque libre doit être un nombre positif compris entre 0 et 100 (inclus).",
|
||||
"The number of days must be a number and cannot be blank.": "Le nombre de jours doit être numérique et ne peut pas être vide.",
|
||||
"The number of days to keep files in the trash can. Zero means forever.": "Nombre de jours de conservation des fichiers dans la poubelle. 0 signifie \"indéfiniment\".",
|
||||
"The number of old versions to keep, per file.": "Le nombre maximum d'anciennes versions à garder indéfiniment, par fichier.",
|
||||
"The number of old versions to keep, per file.": "Nombre maximal d'anciennes versions à conserver indéfiniment, par fichier.",
|
||||
"The number of versions must be a number and cannot be blank.": "Le nombre de versions doit être numérique, et ne peut pas être vide.",
|
||||
"The path cannot be blank.": "Le chemin ne peut pas être vide.",
|
||||
"The rate limit must be a non-negative number (0: no limit)": "La limite de débit ne doit pas être négative (0 = pas de limite)",
|
||||
@@ -377,7 +377,7 @@
|
||||
"Version": "Version",
|
||||
"Versions": "Restauration...",
|
||||
"Versions Path": "Emplacement des versions",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les plus anciennes versions seront supprimées automatiquement quand elles dépassent la durée maximum de conservation ou si leur nombre (par fichier) est supérieur à la limite prédéfinie pour l'intervalle.",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions seront supprimées automatiquement quand elles dépassent la durée maximum de conservation ou si leur nombre (par fichier) est supérieur à la limite prédéfinie pour l'intervalle.",
|
||||
"Waiting to Scan": "En attente d'analyse",
|
||||
"Waiting to Sync": "En attente de synchronisation",
|
||||
"Waiting to scan": "En attente d'analyse",
|
||||
|
||||
@@ -353,7 +353,7 @@
|
||||
"Time the item was last modified": "Time the item was last modified",
|
||||
"Trash Can File Versioning": "ゴミ箱によるバージョン管理",
|
||||
"Type": "タイプ",
|
||||
"UNIX Permissions": "UNIX Permissions",
|
||||
"UNIX Permissions": "UNIX パーミッション",
|
||||
"Unavailable": "Unavailable",
|
||||
"Unavailable/Disabled by administrator or maintainer": "Unavailable/Disabled by administrator or maintainer",
|
||||
"Undecided (will prompt)": "未決定(再確認する)",
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"Are you sure you want to remove folder {%label%}?": "Weet u zeker dat u map {{label}} wilt verwijderen?",
|
||||
"Are you sure you want to restore {%count%} files?": "Weet u zeker dat u {{count}} bestanden wilt herstellen?",
|
||||
"Are you sure you want to upgrade?": "Weet u zeker dat u wilt bijwerken?",
|
||||
"Auto Accept": "Automatisch aanvaarden",
|
||||
"Auto Accept": "Instagenieten",
|
||||
"Automatic Crash Reporting": "Automatische crashrapportage",
|
||||
"Automatic upgrade now offers the choice between stable releases and release candidates.": "Automatisch bijwerken biedt nu de keuze tussen stabiele releases en release canditates.",
|
||||
"Automatic upgrades": "Automatische upgrades",
|
||||
@@ -44,7 +44,7 @@
|
||||
"CPU Utilization": "CPU-gebruik",
|
||||
"Changelog": "Wijzigingenlogboek",
|
||||
"Clean out after": "Opruimen na",
|
||||
"Click to see discovery failures": "Klikken om ontdekkingsproblemen weer te geven",
|
||||
"Click to see discovery failures": "Klikken om detectieproblemen weer te geven",
|
||||
"Close": "Sluiten",
|
||||
"Command": "Opdracht",
|
||||
"Comment, when used at the start of a line": "Opmerking, wanneer gebruikt aan het begin van een regel",
|
||||
@@ -84,9 +84,9 @@
|
||||
"Discard": "Verwerpen",
|
||||
"Disconnected": "Niet verbonden",
|
||||
"Disconnected (Unused)": "Niet verbonden (niet gebruikt)",
|
||||
"Discovered": "Ontdekt",
|
||||
"Discovery": "Ontdekking",
|
||||
"Discovery Failures": "Ontdekkingsproblemen",
|
||||
"Discovered": "Gedetecteerd",
|
||||
"Discovery": "Netwerkdetectie",
|
||||
"Discovery Failures": "Detectiefouten",
|
||||
"Do not restore": "Niet herstellen",
|
||||
"Do not restore all": "Niet alles herstellen",
|
||||
"Do you want to enable watching for changes for all your folders?": "Wilt u het opvolgen van wijzigingen voor al uw mappen inschakelen?",
|
||||
@@ -105,8 +105,8 @@
|
||||
"Enabled": "Ingeschakeld",
|
||||
"Enter a non-negative number (e.g., \"2.35\") and select a unit. Percentages are as part of the total disk size.": "Voer een positief getal in (bijv. \"2.35\") en selecteer een eenheid. Percentages zijn als deel van de totale schijfgrootte.",
|
||||
"Enter a non-privileged port number (1024 - 65535).": "Voer een niet-gereserveerd poortnummer in (1024 - 65535).",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Voer door komma's gescheiden (\"tcp://ip:port\", \"tcp://host:port\") adressen in of \"dynamic\" om automatische ontdekking van het adres uit te voeren.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Voer door komma's gescheiden (\"tcp://ip:port\", \"tcp://host:port\") adressen in of \"dynamic\" om automatische ontdekking van het adres uit te voeren.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Voer door komma's gescheiden (\"tcp://ip:port\", \"tcp://host:port\") adressen in of \"dynamic\" om automatische detectie van het adres uit te voeren.",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Voer door komma's gescheiden (\"tcp://ip:port\", \"tcp://host:port\") adressen in of \"dynamic\" om automatische detectie van het adres uit te voeren.",
|
||||
"Enter ignore patterns, one per line.": "Negeerpatronen invoeren, één per regel.",
|
||||
"Enter up to three octal digits.": "Voer tot drie octale cijfers in.",
|
||||
"Error": "Fout",
|
||||
@@ -145,8 +145,8 @@
|
||||
"General": "Algemeen",
|
||||
"Generate": "Genereren",
|
||||
"Global Changes": "Algemene wijzigingen",
|
||||
"Global Discovery": "Globale ontdekking",
|
||||
"Global Discovery Servers": "Globale ontdekkingsservers",
|
||||
"Global Discovery": "Globale detectie",
|
||||
"Global Discovery Servers": "Globale detectieservers",
|
||||
"Global State": "Globale status",
|
||||
"Help": "Help",
|
||||
"Home page": "Startpagina",
|
||||
@@ -176,7 +176,7 @@
|
||||
"Loading data...": "Gegevens laden...",
|
||||
"Loading...": "Laden...",
|
||||
"Local Additions": "Lokale toevoegingen",
|
||||
"Local Discovery": "Lokale ontdekking",
|
||||
"Local Discovery": "Lokale detectie",
|
||||
"Local State": "Lokale status",
|
||||
"Local State (Total)": "Lokale status (totaal)",
|
||||
"Locally Changed Items": "Lokaal gewijzigde items",
|
||||
@@ -215,8 +215,8 @@
|
||||
"Outgoing Rate Limit (KiB/s)": "Begrenzing uploadsnelheid (KiB/s)",
|
||||
"Override Changes": "Wijzigingen overschrijven",
|
||||
"Path": "Pad",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pad naar de map op de lokale computer. Zal aangemaakt worden als het niet bestaat. De tilde (~) kan gebruikt worden als snelkoppeling voor",
|
||||
"Path where new auto accepted folders will be created, as well as the default suggested path when adding new folders via the UI. Tilde character (~) expands to {%tilde%}.": "Pad waar nieuwe automatisch aanvaarde mappen aangemaakt zullen worden, evenals het standaard voorgestelde pad bij toevoegen van nieuwe mappen via de gebruikersinterface. Een tilde (~) zet uit tot {{tilde}}.",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Pad naar de map op de lokale computer. Zal opgemapt worden als het niet bestaat. De tilde (~) kan gebruikt worden als snelkoppeling voor",
|
||||
"Path where new auto accepted folders will be created, as well as the default suggested path when adding new folders via the UI. Tilde character (~) expands to {%tilde%}.": "Pad waar instavellen afgemapt zullen worden, evenals het standaard voorgestelde pad bij toevoegen van nieuwe mappen via de gebruikersinterface. Een tilde (~) zet uit tot {{tilde}}.",
|
||||
"Path where versions should be stored (leave empty for the default .stversions directory in the shared folder).": "Pad waar versies opgeslagen moeten worden (leeg laten voor de standaard .stversion-map in de gedeelde map).",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Pad waar versies opgeslagen moeten worden (leeg laten voor de standaard .stversion-map in de map).",
|
||||
"Pause": "Pauzeren",
|
||||
@@ -317,7 +317,7 @@
|
||||
"Syncthing now supports automatically reporting crashes to the developers. This feature is enabled by default.": "Syncthing ondersteunt nu automatisch rapporteren van crashes naar de ontwikkelaars. De functie is standaard ingeschakeld.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing lijkt gestopt te zijn, of er is een probleem met uw internetverbinding. Opnieuw proberen...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing lijkt een probleem te ondervinden met het verwerken van uw verzoek. Vernieuw de pagina of start Syncthing opnieuw als het probleem zich blijft voordoen. ",
|
||||
"Take me back": "Neem me terug",
|
||||
"Take me back": "Twijfelen",
|
||||
"The GUI address is overridden by startup options. Changes here will not take effect while the override is in place.": "Het GUI-adres wordt overschreven door opstart-opties. Wijzigingen hier zullen geen effect hebben terwijl de overschrijving van kracht is.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "De beheerdersinterface van Syncthing is ingesteld om externe toegang zonder wachtwoord toe te staan.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "De verzamelde statistieken zijn publiek beschikbaar op de onderstaande URL.",
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
<p translate>Copyright © 2014-2019 the following Contributors:</p>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Simon Frei, Alexander Graf, Alexandre Viau, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, dependabot-preview[bot], greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Alessandro G., Alex Xu, Aman Gupta, Andrew Dunham, Andrew Rabert, Andrey D, André Colomb, Anjan Momi, Antoine Lamielle, Aranjedeath, Arkadiusz Tymiński, Arthur Axel fREW Schmidt, Artur Zubilewicz, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Chris Tonkinson, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Bergmann, Daniel Martí, Darshil Chanpura, David Rimmer, Denis A., Dennis Wilson, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Graham Miln, Han Boetes, Harrison Jones, Heiko Zuerker, Hugo Locurcio, Iain Barnett, Ian Johnson, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jacob, Jake Peterson, James Patterson, Jaroslav Malec, Jaya Chithra, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan Cross, Jose Manuel Delicado, Jörg Thalheim, Kalle Laine, Karol Różycki, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Marcus Legendre, Mario Majila, Mark Pulford, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max Schulze, MaximAL, Maxime Thirouin, Michael Jephcote, Michael Rienstra, Michael Tilli, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Nicholas Rishel, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Richard Hartmann, Robert Carosi, Robin Schoonover, Roman Zaynetdinov, Ross Smith II, Ruslan Yevdokymov, Sacheendra Talluri, Scott Klupfel, Simon Mwepu, Sly_tom_cat, Stefan Kuntz, Suhas Gundimeda, Taylor Khan, Thomas Hipp, Tim Abell, Tim Howes, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tomasz Wilczyński, Tommy Thorn, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, Vladimir Rusinov, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, dependabot[bot], derekriemer, desbma, georgespatton, ghjklw, janost, jaseg, jelle van der Waa, klemens, marco-m, mv1005, otbutz, perewa, rubenbe, wangguoliang, xjtdy888, 佛跳墙
|
||||
Jakob Borg, Audrius Butkevicius, Simon Frei, Alexander Graf, Alexandre Viau, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Evgeny Kuznetsov, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Nate Morrison, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Wulf Weich, dependabot-preview[bot], greatroar, Aaron Bieber, Adam Piggott, Adel Qalieh, Alan Pope, Alberto Donato, Alessandro G., Alex Xu, Aman Gupta, Andrew Dunham, Andrew Rabert, Andrey D, André Colomb, Anjan Momi, Antoine Lamielle, Aranjedeath, Arkadiusz Tymiński, Arthur Axel fREW Schmidt, Artur Zubilewicz, Aurélien Rainone, BAHADIR YILMAZ, Bart De Vries, Ben Curthoys, Ben Shepherd, Ben Sidhom, Benedikt Heine, Benedikt Morbach, Benno Fünfstück, Benny Ng, Boqin Qin, Boris Rybalkin, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Chris Tonkinson, Colin Kennedy, Cromefire_, Cyprien Devillez, Dale Visser, Dan, Daniel Bergmann, Daniel Martí, Darshil Chanpura, David Rimmer, Denis A., Dennis Wilson, Dmitry Saveliev, Domenic Horner, Dominik Heidler, Elias Jarlebring, Elliot Huffman, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Graham Miln, Han Boetes, Harrison Jones, Heiko Zuerker, Hugo Locurcio, Iain Barnett, Ian Johnson, Ilya Brin, Iskander Sharipov, Jaakko Hannikainen, Jacek Szafarkiewicz, Jacob, Jake Peterson, James Patterson, Jaroslav Malec, Jaya Chithra, Jens Diemer, Jerry Jacobs, Jochen Voss, Johan Andersson, Johan Vromans, John Rinehart, Jonas Thelemann, Jonathan, Jonathan Cross, Jose Manuel Delicado, Jörg Thalheim, Jędrzej Kula, Kalle Laine, Karol Różycki, Keith Turner, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Kevin Bushiri, Kevin White, Jr., Kurt Fitzner, Laurent Arnoud, Laurent Etiemble, Leo Arias, Liu Siyuan, Lord Landon Agahnim, Lukas Lihotzki, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Marcus Legendre, Mario Majila, Mark Pulford, Mateusz Naściszewski, Mateusz Ż, Matic Potočnik, Matt Burke, Matt Robenolt, Matteo Ruina, Maurizio Tomasi, Max Schulze, MaximAL, Maxime Thirouin, Michael Jephcote, Michael Rienstra, Michael Tilli, Mike Boone, MikeLund, MikolajTwarog, Mingxuan Lin, Nicholas Rishel, Nico Stapelbroek, Nicolas Braud-Santoni, Nicolas Perraut, Niels Peter Roest, Nils Jakobi, NinoM4ster, Nitroretro, NoLooseEnds, Oliver Freyermuth, Otiel, Oyebanji Jacob Mayowa, Pablo, Pascal Jungblut, Paul Brit, Pawel Palenica, Paweł Rozlach, Peter Badida, Peter Dave Hello, Peter Hoeg, Peter Marquardt, Phil Davis, Phill Luby, Pier Paolo Ramon, Piotr Bejda, Pramodh KP, Richard Hartmann, Robert Carosi, Robin Schoonover, Roman Zaynetdinov, Ross Smith II, Ruslan Yevdokymov, Sacheendra Talluri, Scott Klupfel, Shaarad Dalvi, Simon Mwepu, Sly_tom_cat, Stefan Kuntz, Suhas Gundimeda, Taylor Khan, Thomas Hipp, Tim Abell, Tim Howes, Tobias Nygren, Tobias Tom, Tom Jakubowski, Tomasz Wilczyński, Tommy Thorn, Tully Robinson, Tyler Brazier, Tyler Kropp, Unrud, Veeti Paananen, Victor Buinsky, Vil Brekin, Vladimir Rusinov, William A. Kennington III, Xavier O., Yannic A., andresvia, andyleap, boomsquared, chenrui, chucic, dependabot[bot], derekriemer, desbma, georgespatton, ghjklw, janost, jaseg, jelle van der Waa, klemens, marco-m, mv1005, otbutz, perewa, rubenbe, wangguoliang, xarx00, xjtdy888, 佛跳墙
|
||||
</div>
|
||||
</div>
|
||||
<hr />
|
||||
|
||||
@@ -116,3 +116,28 @@
|
||||
</div>
|
||||
</div>
|
||||
</notification>
|
||||
|
||||
<notification id="authenticationUserAndPassword">
|
||||
<div class="panel panel-success">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title"><span class="fas fa-bolt"></span> <span translate>GUI Authentication: Set User and Password</span></h3>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<p>
|
||||
<span translate>Username/Password has not been set for the GUI authentication. Please consider setting it up.</span>
|
||||
</p>
|
||||
<p>
|
||||
<span translate>If you want to prevent other users on this computer from accessing Syncthing and through it your files, consider setting up authentication.</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<button type="button" class="btn btn-sm btn-default pull-right" ng-click="showSettings()">
|
||||
<span class="fas fa-cog"></span> <span translate>Settings</span>
|
||||
</button>
|
||||
<button type="button" class="btn btn-sm btn-default pull-left" ng-click="dismissNotification('authenticationUserAndPassword')">
|
||||
<span class="fa fa-check-circle"></span> <span translate>OK</span>
|
||||
</button>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
</div>
|
||||
</notification>
|
||||
@@ -59,6 +59,7 @@ angular.module('syncthing.core')
|
||||
} catch (exception) { }
|
||||
|
||||
$scope.folderDefaults = {
|
||||
devices: [],
|
||||
sharedDevices: {},
|
||||
selectedDevices: {},
|
||||
unrelatedDevices: {},
|
||||
@@ -448,6 +449,10 @@ angular.module('syncthing.core')
|
||||
&& (!guiCfg.user || !guiCfg.password)
|
||||
&& guiCfg.authMode !== 'ldap'
|
||||
&& !guiCfg.insecureAdminAccess;
|
||||
|
||||
if (guiCfg.user && guiCfg.password) {
|
||||
$scope.dismissNotification('authenticationUserAndPassword');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1829,15 +1834,22 @@ angular.module('syncthing.core')
|
||||
$scope.saveFolder = function () {
|
||||
$('#editFolder').modal('hide');
|
||||
var folderCfg = angular.copy($scope.currentFolder);
|
||||
folderCfg.devices = [];
|
||||
folderCfg.selectedDevices[$scope.myID] = true;
|
||||
var newDevices = [];
|
||||
folderCfg.devices.forEach(function (dev) {
|
||||
if (folderCfg.selectedDevices[dev.deviceID] === true) {
|
||||
newDevices.push(dev);
|
||||
delete folderCfg.selectedDevices[dev.deviceID];
|
||||
};
|
||||
});
|
||||
for (var deviceID in folderCfg.selectedDevices) {
|
||||
if (folderCfg.selectedDevices[deviceID] === true) {
|
||||
folderCfg.devices.push({
|
||||
newDevices.push({
|
||||
deviceID: deviceID
|
||||
});
|
||||
}
|
||||
}
|
||||
folderCfg.devices = newDevices;
|
||||
delete folderCfg.sharedDevices;
|
||||
delete folderCfg.selectedDevices;
|
||||
delete folderCfg.unrelatedDevices;
|
||||
|
||||
@@ -234,7 +234,9 @@
|
||||
<div class="col-md-6 form-group">
|
||||
<label translate>Permissions</label><br />
|
||||
<input type="checkbox" ng-model="currentFolder.ignorePerms" /> <span translate>Ignore</span>
|
||||
<p translate class="help-block">File permission bits are ignored when looking for changes. Use on FAT file systems.</p>
|
||||
<p translate class="help-block">
|
||||
Disables comparing and syncing file permissions. Useful on systems with nonexistent or custom permissions (e.g. FAT, exFAT, Synology, Android).
|
||||
</p>
|
||||
<p class="col-xs-12 help-block" ng-show="folderEditor.minDiskFree.$invalid">
|
||||
<span translate>Enter a non-negative number (e.g., "2.35") and select a unit. Percentages are as part of the total disk size.</span>
|
||||
</p>
|
||||
|
||||
@@ -1267,7 +1267,7 @@ func (s *service) getEventSub(mask events.EventType) events.BufferedSubscription
|
||||
|
||||
func (s *service) getSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
if s.noUpgrade {
|
||||
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
||||
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
opts := s.cfg.Options()
|
||||
@@ -1635,7 +1635,7 @@ func (f jsonFileInfoTrunc) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
func fileIntfJSONMap(f db.FileIntf) map[string]interface{} {
|
||||
func fileIntfJSONMap(f protocol.FileIntf) map[string]interface{} {
|
||||
out := map[string]interface{}{
|
||||
"name": f.FileName(),
|
||||
"type": f.FileType().String(),
|
||||
|
||||
@@ -24,7 +24,7 @@ const themePrefix = "theme-assets/"
|
||||
|
||||
type staticsServer struct {
|
||||
assetDir string
|
||||
assets map[string]string
|
||||
assets map[string]assets.Asset
|
||||
availableThemes []string
|
||||
|
||||
mut sync.RWMutex
|
||||
@@ -118,7 +118,7 @@ func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Check for a compiled in asset for the current theme.
|
||||
bs, ok := s.assets[theme+"/"+file]
|
||||
as, ok := s.assets[theme+"/"+file]
|
||||
if !ok {
|
||||
// Check for an overridden default asset.
|
||||
if s.assetDir != "" {
|
||||
@@ -134,18 +134,15 @@ func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Check for a compiled in default asset.
|
||||
bs, ok = s.assets[config.DefaultTheme+"/"+file]
|
||||
as, ok = s.assets[config.DefaultTheme+"/"+file]
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
assets.Serve(w, r, assets.Asset{
|
||||
ContentGz: bs,
|
||||
Filename: file,
|
||||
Modified: modificationTime,
|
||||
})
|
||||
as.Modified = modificationTime
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
@@ -152,19 +153,25 @@ func TestAssetsDir(t *testing.T) {
|
||||
gw := gzip.NewWriter(buf)
|
||||
gw.Write([]byte("default"))
|
||||
gw.Close()
|
||||
def := buf.String()
|
||||
def := assets.Asset{
|
||||
Content: buf.String(),
|
||||
Gzipped: true,
|
||||
}
|
||||
|
||||
buf = new(bytes.Buffer)
|
||||
gw = gzip.NewWriter(buf)
|
||||
gw.Write([]byte("foo"))
|
||||
gw.Close()
|
||||
foo := buf.String()
|
||||
foo := assets.Asset{
|
||||
Content: buf.String(),
|
||||
Gzipped: true,
|
||||
}
|
||||
|
||||
e := &staticsServer{
|
||||
theme: "foo",
|
||||
mut: sync.NewRWMutex(),
|
||||
assetDir: "testdata",
|
||||
assets: map[string]string{
|
||||
assets: map[string]assets.Asset{
|
||||
"foo/a": foo, // overridden in foo/a
|
||||
"foo/b": foo,
|
||||
"default/a": def, // overridden in default/a (but foo/a takes precedence)
|
||||
|
||||
@@ -22,9 +22,12 @@ func TestAssets(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("No index.html in compiled in assets")
|
||||
}
|
||||
if !idx.Gzipped {
|
||||
t.Fatal("default/index.html should be compressed")
|
||||
}
|
||||
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(strings.NewReader(idx))
|
||||
gr, _ = gzip.NewReader(strings.NewReader(idx.Content))
|
||||
html, _ := ioutil.ReadAll(gr)
|
||||
|
||||
if !bytes.Contains(html, []byte("<html")) {
|
||||
|
||||
@@ -102,6 +102,10 @@ func (c *mockedConfig) SetFolder(fld config.FolderConfiguration) (config.Waiter,
|
||||
return noopWaiter{}, nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) SetFolders(folders []config.FolderConfiguration) (config.Waiter, error) {
|
||||
return noopWaiter{}, nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Device(id protocol.DeviceID) (config.DeviceConfiguration, bool) {
|
||||
return config.DeviceConfiguration{}, false
|
||||
}
|
||||
|
||||
@@ -22,11 +22,13 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Asset is the type of arguments to Serve.
|
||||
// An Asset is an embedded file to be served over HTTP.
|
||||
type Asset struct {
|
||||
ContentGz string // gzipped contents of asset.
|
||||
Filename string // Original filename, determines Content-Type.
|
||||
Modified time.Time // Determines ETag and Last-Modified.
|
||||
Content string // Contents of asset, possibly gzipped.
|
||||
Gzipped bool
|
||||
Length int // Length of (decompressed) Content.
|
||||
Filename string // Original filename, determines Content-Type.
|
||||
Modified time.Time // Determines ETag and Last-Modified.
|
||||
}
|
||||
|
||||
// Serve writes a gzipped asset to w.
|
||||
@@ -53,14 +55,19 @@ func Serve(w http.ResponseWriter, r *http.Request, asset Asset) {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
switch {
|
||||
case !asset.Gzipped:
|
||||
header.Set("Content-Length", strconv.Itoa(len(asset.Content)))
|
||||
io.WriteString(w, asset.Content)
|
||||
case strings.Contains(r.Header.Get("Accept-Encoding"), "gzip"):
|
||||
header.Set("Content-Encoding", "gzip")
|
||||
header.Set("Content-Length", strconv.Itoa(len(asset.ContentGz)))
|
||||
io.WriteString(w, asset.ContentGz)
|
||||
} else {
|
||||
header.Set("Content-Length", strconv.Itoa(len(asset.Content)))
|
||||
io.WriteString(w, asset.Content)
|
||||
default:
|
||||
header.Set("Content-Length", strconv.Itoa(asset.Length))
|
||||
// gunzip for browsers that don't want gzip.
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(strings.NewReader(asset.ContentGz))
|
||||
gr, _ = gzip.NewReader(strings.NewReader(asset.Content))
|
||||
io.Copy(w, gr)
|
||||
gr.Close()
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -38,15 +39,23 @@ func decompress(p []byte) (out []byte) {
|
||||
return out
|
||||
}
|
||||
|
||||
func TestServe(t *testing.T) {
|
||||
indexHTML := `<html>Hello, world!</html>`
|
||||
indexGz := compress(indexHTML)
|
||||
func TestServe(t *testing.T) { testServe(t, false) }
|
||||
func TestServeGzip(t *testing.T) { testServe(t, true) }
|
||||
|
||||
func testServe(t *testing.T, gzip bool) {
|
||||
const indexHTML = `<html>Hello, world!</html>`
|
||||
content := indexHTML
|
||||
if gzip {
|
||||
content = compress(indexHTML)
|
||||
}
|
||||
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
Serve(w, r, Asset{
|
||||
ContentGz: indexGz,
|
||||
Filename: r.URL.Path[1:],
|
||||
Modified: time.Unix(0, 0),
|
||||
Content: content,
|
||||
Gzipped: gzip,
|
||||
Length: len(indexHTML),
|
||||
Filename: r.URL.Path[1:],
|
||||
Modified: time.Unix(0, 0),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,7 +82,17 @@ func TestServe(t *testing.T) {
|
||||
}
|
||||
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
if acceptGzip {
|
||||
|
||||
// Content-Length is the number of bytes in the encoded (compressed) body
|
||||
// (https://stackoverflow.com/a/3819303).
|
||||
n, err := strconv.Atoi(res.Header.Get("Content-Length"))
|
||||
if err != nil {
|
||||
t.Errorf("malformed Content-Length %q", res.Header.Get("Content-Length"))
|
||||
} else if n != len(body) {
|
||||
t.Errorf("wrong Content-Length %d, should be %d", n, len(body))
|
||||
}
|
||||
|
||||
if gzip && acceptGzip {
|
||||
body = decompress(body)
|
||||
}
|
||||
if string(body) != indexHTML {
|
||||
|
||||
@@ -9,6 +9,7 @@ package build
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -37,6 +38,14 @@ var (
|
||||
Tags []string
|
||||
|
||||
allowedVersionExp = regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z0-9]+)*(\.\d+)*(\+\d+-g[0-9a-f]+)?(-[^\s]+)?$`)
|
||||
|
||||
envTags = []string{
|
||||
"STGUIASSETS",
|
||||
"STHASHING",
|
||||
"STNORESTART",
|
||||
"STNOUPGRADE",
|
||||
"USE_BADGER",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -79,6 +88,11 @@ func LongVersionFor(program string) string {
|
||||
// This string and date format is essentially part of our external API. Never change it.
|
||||
date := Date.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
v := fmt.Sprintf(`%s %s "%s" (%s %s-%s) %s@%s %s`, program, Version, Codename, runtime.Version(), runtime.GOOS, runtime.GOARCH, User, Host, date)
|
||||
for _, envVar := range envTags {
|
||||
if os.Getenv(envVar) != "" {
|
||||
Tags = append(Tags, strings.ToLower(envVar))
|
||||
}
|
||||
}
|
||||
if len(Tags) > 0 {
|
||||
v = fmt.Sprintf("%s [%s]", v, strings.Join(Tags, ", "))
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
|
||||
const (
|
||||
OldestHandledVersion = 10
|
||||
CurrentVersion = 30
|
||||
CurrentVersion = 31
|
||||
MaxRescanIntervalS = 365 * 24 * 60 * 60
|
||||
)
|
||||
|
||||
@@ -103,6 +103,8 @@ func New(myID protocol.DeviceID) Configuration {
|
||||
cfg.Version = CurrentVersion
|
||||
cfg.OriginalVersion = CurrentVersion
|
||||
|
||||
cfg.Options.UnackedNotificationIDs = []string{"authenticationUserAndPassword"}
|
||||
|
||||
util.SetDefaults(&cfg)
|
||||
util.SetDefaults(&cfg.Options)
|
||||
util.SetDefaults(&cfg.GUI)
|
||||
@@ -133,8 +135,9 @@ func NewWithFreePorts(myID protocol.DeviceID) (Configuration, error) {
|
||||
cfg.Options.RawListenAddresses = []string{"default"}
|
||||
} else {
|
||||
cfg.Options.RawListenAddresses = []string{
|
||||
fmt.Sprintf("tcp://%s", net.JoinHostPort("0.0.0.0", strconv.Itoa(port))),
|
||||
util.Address("tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(port))),
|
||||
"dynamic+https://relays.syncthing.net/endpoint",
|
||||
util.Address("quic", net.JoinHostPort("0.0.0.0", strconv.Itoa(port))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,6 +421,13 @@ nextPendingDevice:
|
||||
}
|
||||
if cfg.Options.UnackedNotificationIDs == nil {
|
||||
cfg.Options.UnackedNotificationIDs = []string{}
|
||||
} else if cfg.GUI.User != "" && cfg.GUI.Password != "" {
|
||||
for i, key := range cfg.Options.UnackedNotificationIDs {
|
||||
if key == "authenticationUserAndPassword" {
|
||||
cfg.Options.UnackedNotificationIDs = append(cfg.Options.UnackedNotificationIDs[:i], cfg.Options.UnackedNotificationIDs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestDefaultValues(t *testing.T) {
|
||||
AlwaysLocalNets: []string{},
|
||||
OverwriteRemoteDevNames: false,
|
||||
TempIndexMinBlocks: 10,
|
||||
UnackedNotificationIDs: []string{},
|
||||
UnackedNotificationIDs: []string{"authenticationUserAndPassword"},
|
||||
DefaultFolderPath: "~",
|
||||
SetLowPriority: true,
|
||||
CRURL: "https://crash.syncthing.net/newcrash",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
// update the config version. The order of migrations doesn't matter here,
|
||||
// put the newest on top for readability.
|
||||
var migrations = migrationSet{
|
||||
{31, migrateToConfigV31},
|
||||
{30, migrateToConfigV30},
|
||||
{29, migrateToConfigV29},
|
||||
{28, migrateToConfigV28},
|
||||
@@ -85,6 +86,11 @@ func (m migration) apply(cfg *Configuration) {
|
||||
cfg.Version = m.targetVersion
|
||||
}
|
||||
|
||||
func migrateToConfigV31(cfg *Configuration) {
|
||||
// Show a notification about setting User and Password
|
||||
cfg.Options.UnackedNotificationIDs = append(cfg.Options.UnackedNotificationIDs, "authenticationUserAndPassword")
|
||||
}
|
||||
|
||||
func migrateToConfigV30(cfg *Configuration) {
|
||||
// The "max concurrent scans" option is now spelled "max folder concurrency"
|
||||
// to be more general.
|
||||
|
||||
2
lib/config/testdata/overridenvalues.xml
vendored
2
lib/config/testdata/overridenvalues.xml
vendored
@@ -1,4 +1,4 @@
|
||||
<configuration version="29">
|
||||
<configuration version="31">
|
||||
<options>
|
||||
<listenAddress>tcp://:23000</listenAddress>
|
||||
<allowDelete>false</allowDelete>
|
||||
|
||||
@@ -73,6 +73,7 @@ type Wrapper interface {
|
||||
Folders() map[string]FolderConfiguration
|
||||
FolderList() []FolderConfiguration
|
||||
SetFolder(fld FolderConfiguration) (Waiter, error)
|
||||
SetFolders(folders []FolderConfiguration) (Waiter, error)
|
||||
|
||||
Device(id protocol.DeviceID) (DeviceConfiguration, bool)
|
||||
Devices() map[protocol.DeviceID]DeviceConfiguration
|
||||
@@ -96,7 +97,6 @@ type wrapper struct {
|
||||
|
||||
waiter Waiter // Latest ongoing config change
|
||||
deviceMap map[protocol.DeviceID]DeviceConfiguration
|
||||
folderMap map[string]FolderConfiguration
|
||||
subs []Committer
|
||||
mut sync.Mutex
|
||||
|
||||
@@ -196,7 +196,6 @@ func (w *wrapper) replaceLocked(to Configuration) (Waiter, error) {
|
||||
|
||||
w.cfg = to
|
||||
w.deviceMap = nil
|
||||
w.folderMap = nil
|
||||
|
||||
w.waiter = w.notifyListeners(from.Copy(), to.Copy())
|
||||
|
||||
@@ -288,13 +287,11 @@ func (w *wrapper) RemoveDevice(id protocol.DeviceID) (Waiter, error) {
|
||||
func (w *wrapper) Folders() map[string]FolderConfiguration {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
if w.folderMap == nil {
|
||||
w.folderMap = make(map[string]FolderConfiguration, len(w.cfg.Folders))
|
||||
for _, fld := range w.cfg.Folders {
|
||||
w.folderMap[fld.ID] = fld.Copy()
|
||||
}
|
||||
folderMap := make(map[string]FolderConfiguration, len(w.cfg.Folders))
|
||||
for _, fld := range w.cfg.Folders {
|
||||
folderMap[fld.ID] = fld.Copy()
|
||||
}
|
||||
return w.folderMap
|
||||
return folderMap
|
||||
}
|
||||
|
||||
// FolderList returns a slice of folders.
|
||||
@@ -307,19 +304,30 @@ func (w *wrapper) FolderList() []FolderConfiguration {
|
||||
// SetFolder adds a new folder to the configuration, or overwrites an existing
|
||||
// folder with the same ID.
|
||||
func (w *wrapper) SetFolder(fld FolderConfiguration) (Waiter, error) {
|
||||
return w.SetFolders([]FolderConfiguration{fld})
|
||||
}
|
||||
|
||||
// SetFolders adds new folders to the configuration, or overwrites existing
|
||||
// folders with the same ID.
|
||||
func (w *wrapper) SetFolders(folders []FolderConfiguration) (Waiter, error) {
|
||||
w.mut.Lock()
|
||||
defer w.mut.Unlock()
|
||||
|
||||
newCfg := w.cfg.Copy()
|
||||
|
||||
for i := range newCfg.Folders {
|
||||
if newCfg.Folders[i].ID == fld.ID {
|
||||
newCfg.Folders[i] = fld
|
||||
return w.replaceLocked(newCfg)
|
||||
inds := make(map[string]int, len(w.cfg.Folders))
|
||||
for i, folder := range newCfg.Folders {
|
||||
inds[folder.ID] = i
|
||||
}
|
||||
filtered := folders[:0]
|
||||
for _, folder := range folders {
|
||||
if i, ok := inds[folder.ID]; ok {
|
||||
newCfg.Folders[i] = folder
|
||||
} else {
|
||||
filtered = append(filtered, folder)
|
||||
}
|
||||
}
|
||||
|
||||
newCfg.Folders = append(newCfg.Folders, fld)
|
||||
newCfg.Folders = append(newCfg.Folders, filtered...)
|
||||
|
||||
return w.replaceLocked(newCfg)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,6 @@ type service struct {
|
||||
tlsDefaultCommonName string
|
||||
limiter *limiter
|
||||
natService *nat.Service
|
||||
natServiceToken *suture.ServiceToken
|
||||
evLogger events.Logger
|
||||
|
||||
listenersMut sync.RWMutex
|
||||
@@ -188,6 +187,7 @@ func NewService(cfg config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *t
|
||||
service.Add(util.AsService(service.connect, fmt.Sprintf("%s/connect", service)))
|
||||
service.Add(util.AsService(service.handle, fmt.Sprintf("%s/handle", service)))
|
||||
service.Add(service.listenerSupervisor)
|
||||
service.Add(service.natService)
|
||||
|
||||
return service
|
||||
}
|
||||
@@ -601,14 +601,25 @@ func (s *service) CommitConfiguration(from, to config.Configuration) bool {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := s.listeners[addr]; ok {
|
||||
seen[addr] = struct{}{}
|
||||
uri, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
l.Warnf("Skipping malformed listener URL %q: %v", addr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
uri, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
l.Infof("Parsing listener address %s: %v", addr, err)
|
||||
// Make sure we always have the canonical representation of the URL.
|
||||
// This is for consistency as we use it as a map key, but also to
|
||||
// avoid misunderstandings. We do not just use the canonicalized
|
||||
// version, because an URL that looks very similar to a human might
|
||||
// mean something entirely different to the computer (e.g.,
|
||||
// tcp:/127.0.0.1:22000 in fact being equivalent to tcp://:22000).
|
||||
if canonical := uri.String(); canonical != addr {
|
||||
l.Warnf("Skipping malformed listener URL %q (not canonical)", addr)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := s.listeners[addr]; ok {
|
||||
seen[addr] = struct{}{}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -641,16 +652,6 @@ func (s *service) CommitConfiguration(from, to config.Configuration) bool {
|
||||
}
|
||||
s.listenersMut.Unlock()
|
||||
|
||||
if to.Options.NATEnabled && s.natServiceToken == nil {
|
||||
l.Debugln("Starting NAT service")
|
||||
token := s.Add(s.natService)
|
||||
s.natServiceToken = &token
|
||||
} else if !to.Options.NATEnabled && s.natServiceToken != nil {
|
||||
l.Debugln("Stopping NAT service")
|
||||
s.Remove(*s.natServiceToken)
|
||||
s.natServiceToken = nil
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,12 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
// The Reader interface specifies the read-only operations available on the
|
||||
@@ -118,10 +123,24 @@ const (
|
||||
)
|
||||
|
||||
func Open(path string, tuning Tuning) (Backend, error) {
|
||||
if os.Getenv("USE_BADGER") != "" {
|
||||
l.Warnln("Using experimental badger db")
|
||||
if err := maybeCopyDatabase(path, strings.Replace(path, locations.BadgerDir, locations.LevelDBDir, 1), OpenBadger, OpenLevelDBRO); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return OpenBadger(path)
|
||||
}
|
||||
|
||||
if err := maybeCopyDatabase(path, strings.Replace(path, locations.LevelDBDir, locations.BadgerDir, 1), OpenLevelDBAuto, OpenBadger); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return OpenLevelDB(path, tuning)
|
||||
}
|
||||
|
||||
func OpenMemory() Backend {
|
||||
if os.Getenv("USE_BADGER") != "" {
|
||||
return OpenBadgerMemory()
|
||||
}
|
||||
return OpenLevelDBMemory()
|
||||
}
|
||||
|
||||
@@ -202,3 +221,70 @@ func (cg *closeWaitGroup) CloseWait() {
|
||||
cg.closeMut.Unlock()
|
||||
cg.WaitGroup.Wait()
|
||||
}
|
||||
|
||||
type opener func(path string) (Backend, error)
|
||||
|
||||
// maybeCopyDatabase copies the database if the destination doesn't exist
|
||||
// but the source does.
|
||||
func maybeCopyDatabase(toPath, fromPath string, toOpen, fromOpen opener) error {
|
||||
if _, err := os.Lstat(toPath); !os.IsNotExist(err) {
|
||||
// Destination database exists (or is otherwise unavailable), do not
|
||||
// attempt to overwrite it.
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(fromPath); err != nil {
|
||||
// Source database is not available, so nothing to copy
|
||||
return nil
|
||||
}
|
||||
|
||||
fromDB, err := fromOpen(fromPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fromDB.Close()
|
||||
|
||||
toDB, err := toOpen(toPath)
|
||||
if err != nil {
|
||||
// That's odd, but it will be handled & reported in the usual path
|
||||
// so we can ignore it here.
|
||||
return err
|
||||
}
|
||||
defer toDB.Close()
|
||||
|
||||
l.Infoln("Copying database for format conversion...")
|
||||
if err := copyBackend(toDB, fromDB); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move the old database out of the way to mark it as migrated.
|
||||
fromDB.Close()
|
||||
_ = os.Rename(fromPath, fromPath+".migrated."+time.Now().Format("20060102150405"))
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyBackend(to, from Backend) error {
|
||||
srcIt, err := from.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcIt.Release()
|
||||
|
||||
dstTx, err := to.NewWriteTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstTx.Release()
|
||||
|
||||
for srcIt.Next() {
|
||||
if err := dstTx.Put(srcIt.Key(), srcIt.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if srcIt.Error() != nil {
|
||||
return err
|
||||
}
|
||||
srcIt.Release()
|
||||
|
||||
return dstTx.Commit()
|
||||
}
|
||||
|
||||
443
lib/db/backend/badger_backend.go
Normal file
443
lib/db/backend/badger_backend.go
Normal file
@@ -0,0 +1,443 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
badger "github.com/dgraph-io/badger/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
checkpointFlushMinSize = 128 << KiB
|
||||
maxCacheSize = 64 << MiB
|
||||
)
|
||||
|
||||
func OpenBadger(path string) (Backend, error) {
|
||||
opts := badger.DefaultOptions(path)
|
||||
opts = opts.WithMaxCacheSize(maxCacheSize).WithCompactL0OnClose(false)
|
||||
opts.Logger = nil
|
||||
return openBadger(opts)
|
||||
}
|
||||
|
||||
func OpenBadgerMemory() Backend {
|
||||
opts := badger.DefaultOptions("").WithInMemory(true)
|
||||
opts.Logger = nil
|
||||
backend, err := openBadger(opts)
|
||||
if err != nil {
|
||||
// Opening in-memory should never be able to fail, and is anyway
|
||||
// used just by tests.
|
||||
panic(err)
|
||||
}
|
||||
return backend
|
||||
}
|
||||
|
||||
func openBadger(opts badger.Options) (Backend, error) {
|
||||
// XXX: We should find good values for memory utilization in the "small"
|
||||
// and "large" cases we support for LevelDB. Some notes here:
|
||||
// https://github.com/dgraph-io/badger/tree/v2.0.3#memory-usage
|
||||
bdb, err := badger.Open(opts)
|
||||
if err != nil {
|
||||
return nil, wrapBadgerErr(err)
|
||||
}
|
||||
return &badgerBackend{
|
||||
bdb: bdb,
|
||||
closeWG: &closeWaitGroup{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// badgerBackend implements Backend on top of a badger
|
||||
type badgerBackend struct {
|
||||
bdb *badger.DB
|
||||
closeWG *closeWaitGroup
|
||||
}
|
||||
|
||||
func (b *badgerBackend) NewReadTransaction() (ReadTransaction, error) {
|
||||
rel, err := newReleaser(b.closeWG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return badgerSnapshot{
|
||||
txn: b.bdb.NewTransaction(false),
|
||||
rel: rel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *badgerBackend) NewWriteTransaction() (WriteTransaction, error) {
|
||||
rel1, err := newReleaser(b.closeWG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rel2, err := newReleaser(b.closeWG)
|
||||
if err != nil {
|
||||
rel1.Release()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We use two transactions here to preserve the property that our
|
||||
// leveldb wrapper has, that writes in a transaction are completely
|
||||
// invisible until it's committed, even inside that same transaction.
|
||||
rtxn := b.bdb.NewTransaction(false)
|
||||
wtxn := b.bdb.NewTransaction(true)
|
||||
return &badgerTransaction{
|
||||
badgerSnapshot: badgerSnapshot{
|
||||
txn: rtxn,
|
||||
rel: rel1,
|
||||
},
|
||||
txn: wtxn,
|
||||
bdb: b.bdb,
|
||||
rel: rel2,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *badgerBackend) Close() error {
|
||||
b.closeWG.CloseWait()
|
||||
return wrapBadgerErr(b.bdb.Close())
|
||||
}
|
||||
|
||||
func (b *badgerBackend) Get(key []byte) ([]byte, error) {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer b.closeWG.Done()
|
||||
|
||||
txn := b.bdb.NewTransaction(false)
|
||||
defer txn.Discard()
|
||||
item, err := txn.Get(key)
|
||||
if err != nil {
|
||||
return nil, wrapBadgerErr(err)
|
||||
}
|
||||
val, err := item.ValueCopy(nil)
|
||||
if err != nil {
|
||||
return nil, wrapBadgerErr(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (b *badgerBackend) NewPrefixIterator(prefix []byte) (Iterator, error) {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txn := b.bdb.NewTransaction(false)
|
||||
it := badgerPrefixIterator(txn, prefix)
|
||||
it.releaseFn = func() {
|
||||
defer b.closeWG.Done()
|
||||
txn.Discard()
|
||||
}
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (b *badgerBackend) NewRangeIterator(first, last []byte) (Iterator, error) {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txn := b.bdb.NewTransaction(false)
|
||||
it := badgerRangeIterator(txn, first, last)
|
||||
it.releaseFn = func() {
|
||||
defer b.closeWG.Done()
|
||||
txn.Discard()
|
||||
}
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (b *badgerBackend) Put(key, val []byte) error {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.closeWG.Done()
|
||||
|
||||
txn := b.bdb.NewTransaction(true)
|
||||
if err := txn.Set(key, val); err != nil {
|
||||
txn.Discard()
|
||||
return wrapBadgerErr(err)
|
||||
}
|
||||
return wrapBadgerErr(txn.Commit())
|
||||
}
|
||||
|
||||
func (b *badgerBackend) Delete(key []byte) error {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.closeWG.Done()
|
||||
|
||||
txn := b.bdb.NewTransaction(true)
|
||||
if err := txn.Delete(key); err != nil {
|
||||
txn.Discard()
|
||||
return wrapBadgerErr(err)
|
||||
}
|
||||
return wrapBadgerErr(txn.Commit())
|
||||
}
|
||||
|
||||
func (b *badgerBackend) Compact() error {
|
||||
if err := b.closeWG.Add(1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.closeWG.Done()
|
||||
|
||||
// This weird looking loop is as recommended in the README
|
||||
// (https://github.com/dgraph-io/badger/tree/v2.0.3#garbage-collection).
|
||||
// Basically, the RunValueLogGC will pick some promising thing to
|
||||
// garbage collect at random and return nil if it improved the
|
||||
// situation, then return ErrNoRewrite when there is nothing more to GC.
|
||||
// The 0.5 is the discard ratio, for which the method docs say they
|
||||
// "recommend setting discardRatio to 0.5, thus indicating that a file
|
||||
// be rewritten if half the space can be discarded".
|
||||
var err error
|
||||
t0 := time.Now()
|
||||
for err == nil {
|
||||
if time.Since(t0) > time.Hour {
|
||||
l.Warnln("Database compaction is taking a long time, performance may be impacted. Consider investigating and/or opening an issue if this warning repeats.")
|
||||
t0 = time.Now()
|
||||
}
|
||||
err = b.bdb.RunValueLogGC(0.5)
|
||||
}
|
||||
|
||||
if errors.Is(err, badger.ErrNoRewrite) {
|
||||
// GC did nothing, because nothing needed to be done
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, badger.ErrRejected) {
|
||||
// GC was already running (could possibly happen), or the database
|
||||
// is closed (can't happen).
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, badger.ErrGCInMemoryMode) {
|
||||
// GC in in-memory mode, which is fine.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// badgerSnapshot implements backend.ReadTransaction
|
||||
type badgerSnapshot struct {
|
||||
txn *badger.Txn
|
||||
rel *releaser
|
||||
}
|
||||
|
||||
func (l badgerSnapshot) Get(key []byte) ([]byte, error) {
|
||||
item, err := l.txn.Get(key)
|
||||
if err != nil {
|
||||
return nil, wrapBadgerErr(err)
|
||||
}
|
||||
val, err := item.ValueCopy(nil)
|
||||
if err != nil {
|
||||
return nil, wrapBadgerErr(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (l badgerSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) {
|
||||
return badgerPrefixIterator(l.txn, prefix), nil
|
||||
}
|
||||
|
||||
func (l badgerSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) {
|
||||
return badgerRangeIterator(l.txn, first, last), nil
|
||||
}
|
||||
|
||||
func (l badgerSnapshot) Release() {
|
||||
defer l.rel.Release()
|
||||
l.txn.Discard()
|
||||
}
|
||||
|
||||
type badgerTransaction struct {
|
||||
badgerSnapshot
|
||||
txn *badger.Txn
|
||||
bdb *badger.DB
|
||||
rel *releaser
|
||||
size int
|
||||
}
|
||||
|
||||
func (t *badgerTransaction) Delete(key []byte) error {
|
||||
t.size += len(key)
|
||||
kc := make([]byte, len(key))
|
||||
copy(kc, key)
|
||||
return t.transactionRetried(func(txn *badger.Txn) error {
|
||||
return txn.Delete(kc)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *badgerTransaction) Put(key, val []byte) error {
|
||||
t.size += len(key) + len(val)
|
||||
kc := make([]byte, len(key))
|
||||
copy(kc, key)
|
||||
vc := make([]byte, len(val))
|
||||
copy(vc, val)
|
||||
return t.transactionRetried(func(txn *badger.Txn) error {
|
||||
return txn.Set(kc, vc)
|
||||
})
|
||||
}
|
||||
|
||||
// transactionRetried performs the given operation in the current
|
||||
// transaction, with commit and retry if Badger says the transaction has
|
||||
// grown too large.
|
||||
func (t *badgerTransaction) transactionRetried(fn func(*badger.Txn) error) error {
|
||||
if err := fn(t.txn); err == badger.ErrTxnTooBig {
|
||||
if err := t.txn.Commit(); err != nil {
|
||||
return wrapBadgerErr(err)
|
||||
}
|
||||
t.size = 0
|
||||
t.txn = t.bdb.NewTransaction(true)
|
||||
return wrapBadgerErr(fn(t.txn))
|
||||
} else if err != nil {
|
||||
return wrapBadgerErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *badgerTransaction) Commit() error {
|
||||
defer t.rel.Release()
|
||||
defer t.badgerSnapshot.Release()
|
||||
return wrapBadgerErr(t.txn.Commit())
|
||||
}
|
||||
|
||||
func (t *badgerTransaction) Checkpoint(preFlush ...func() error) error {
|
||||
if t.size < checkpointFlushMinSize {
|
||||
return nil
|
||||
}
|
||||
for _, hook := range preFlush {
|
||||
if err := hook(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := t.txn.Commit()
|
||||
if err == nil {
|
||||
t.size = 0
|
||||
t.txn = t.bdb.NewTransaction(true)
|
||||
}
|
||||
return wrapBadgerErr(err)
|
||||
}
|
||||
|
||||
func (t *badgerTransaction) Release() {
|
||||
defer t.rel.Release()
|
||||
defer t.badgerSnapshot.Release()
|
||||
t.txn.Discard()
|
||||
}
|
||||
|
||||
type badgerIterator struct {
|
||||
it *badger.Iterator
|
||||
prefix []byte
|
||||
first []byte
|
||||
last []byte
|
||||
releaseFn func()
|
||||
didSeek bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (i *badgerIterator) Next() bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
}
|
||||
for {
|
||||
if !i.didSeek {
|
||||
if i.first != nil {
|
||||
// Range iterator
|
||||
i.it.Seek(i.first)
|
||||
} else {
|
||||
// Prefix iterator
|
||||
i.it.Seek(i.prefix)
|
||||
}
|
||||
i.didSeek = true
|
||||
} else {
|
||||
i.it.Next()
|
||||
}
|
||||
|
||||
if !i.it.ValidForPrefix(i.prefix) {
|
||||
// Done
|
||||
return false
|
||||
}
|
||||
if i.first == nil && i.last == nil {
|
||||
// No range checks required
|
||||
return true
|
||||
}
|
||||
|
||||
key := i.it.Item().Key()
|
||||
if bytes.Compare(key, i.last) > 0 {
|
||||
// Key is after range last
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (i *badgerIterator) Key() []byte {
|
||||
if i.err != nil {
|
||||
return nil
|
||||
}
|
||||
return i.it.Item().Key()
|
||||
}
|
||||
|
||||
func (i *badgerIterator) Value() []byte {
|
||||
if i.err != nil {
|
||||
return nil
|
||||
}
|
||||
val, err := i.it.Item().ValueCopy(nil)
|
||||
if err != nil {
|
||||
i.err = err
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (i *badgerIterator) Error() error {
|
||||
return wrapBadgerErr(i.err)
|
||||
}
|
||||
|
||||
func (i *badgerIterator) Release() {
|
||||
i.it.Close()
|
||||
if i.releaseFn != nil {
|
||||
i.releaseFn()
|
||||
}
|
||||
}
|
||||
|
||||
// wrapBadgerErr wraps errors so that the backend package can recognize them
|
||||
func wrapBadgerErr(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err == badger.ErrDiscardedTxn {
|
||||
return errClosed{}
|
||||
}
|
||||
if err == badger.ErrKeyNotFound {
|
||||
return errNotFound{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func badgerPrefixIterator(txn *badger.Txn, prefix []byte) *badgerIterator {
|
||||
it := iteratorForPrefix(txn, prefix)
|
||||
return &badgerIterator{it: it, prefix: prefix}
|
||||
}
|
||||
|
||||
func badgerRangeIterator(txn *badger.Txn, first, last []byte) *badgerIterator {
|
||||
prefix := commonPrefix(first, last)
|
||||
it := iteratorForPrefix(txn, prefix)
|
||||
return &badgerIterator{it: it, prefix: prefix, first: first, last: last}
|
||||
}
|
||||
|
||||
func iteratorForPrefix(txn *badger.Txn, prefix []byte) *badger.Iterator {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.Prefix = prefix
|
||||
return txn.NewIterator(opts)
|
||||
}
|
||||
|
||||
func commonPrefix(a, b []byte) []byte {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
prefix := make([]byte, 0, minLen)
|
||||
for i := 0; i < minLen; i++ {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
prefix = append(prefix, a[i])
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
38
lib/db/backend/badger_test.go
Normal file
38
lib/db/backend/badger_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package backend
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCommonPrefix(t *testing.T) {
|
||||
cases := []struct {
|
||||
a string
|
||||
b string
|
||||
common string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"a", "b", ""},
|
||||
{"aa", "ab", "a"},
|
||||
{"aa", "a", "a"},
|
||||
{"a", "aa", "a"},
|
||||
{"aabab", "ab", "a"},
|
||||
{"ab", "aabab", "a"},
|
||||
{"abac", "ababab", "aba"},
|
||||
{"ababab", "abac", "aba"},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
pref := string(commonPrefix([]byte(tc.a), []byte(tc.b)))
|
||||
if pref != tc.common {
|
||||
t.Errorf("commonPrefix(%q, %q) => %q, expected %q", tc.a, tc.b, pref, tc.common)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadgerBackendBehavior(t *testing.T) {
|
||||
testBackendBehavior(t, OpenBadgerMemory)
|
||||
}
|
||||
@@ -13,10 +13,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// Never flush transactions smaller than this, even on Checkpoint()
|
||||
dbFlushBatchMin = 1 << MiB
|
||||
// Once a transaction reaches this size, flush it unconditionally.
|
||||
dbFlushBatchMax = 128 << MiB
|
||||
// Never flush transactions smaller than this, even on Checkpoint().
|
||||
// This just needs to be just large enough to avoid flushing
|
||||
// transactions when they are super tiny, thus creating millions of tiny
|
||||
// transactions unnecessarily.
|
||||
dbFlushBatchMin = 64 << KiB
|
||||
// Once a transaction reaches this size, flush it unconditionally. This
|
||||
// should be large enough to avoid forcing a flush between Checkpoint()
|
||||
// calls in loops where we do those, so in principle just large enough
|
||||
// to hold a FileInfo plus corresponding version list and metadata
|
||||
// updates or two.
|
||||
dbFlushBatchMax = 1 << MiB
|
||||
)
|
||||
|
||||
// leveldbBackend implements Backend on top of a leveldb
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
MiB = 20
|
||||
)
|
||||
|
||||
// Open attempts to open the database at the given location, and runs
|
||||
// OpenLevelDB attempts to open the database at the given location, and runs
|
||||
// recovery on it if opening fails. Worst case, if recovery is not possible,
|
||||
// the database is erased and created from scratch.
|
||||
func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
|
||||
@@ -45,7 +45,13 @@ func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
|
||||
return newLeveldbBackend(ldb), nil
|
||||
}
|
||||
|
||||
// OpenRO attempts to open the database at the given location, read only.
|
||||
// OpenLevelDBAuto is OpenLevelDB with TuningAuto tuning.
|
||||
func OpenLevelDBAuto(location string) (Backend, error) {
|
||||
return OpenLevelDB(location, TuningAuto)
|
||||
}
|
||||
|
||||
// OpenLevelDBRO attempts to open the database at the given location, read
|
||||
// only.
|
||||
func OpenLevelDBRO(location string) (Backend, error) {
|
||||
opts := &opt.Options{
|
||||
OpenFilesCacheCapacity: dbMaxOpenFiles,
|
||||
|
||||
@@ -187,7 +187,7 @@ func BenchmarkNeedHalf(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -211,7 +211,7 @@ func BenchmarkNeedHalfRemote(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := fset.Snapshot()
|
||||
snap.WithNeed(remoteDevice0, func(fi db.FileIntf) bool {
|
||||
snap.WithNeed(remoteDevice0, func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -232,7 +232,7 @@ func BenchmarkHave(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
snap.WithHave(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -253,7 +253,7 @@ func BenchmarkGlobal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithGlobal(func(fi db.FileIntf) bool {
|
||||
snap.WithGlobal(func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -274,7 +274,7 @@ func BenchmarkNeedHalfTruncated(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithNeedTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
snap.WithNeedTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -295,7 +295,7 @@ func BenchmarkHaveTruncated(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithHaveTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
snap.WithHaveTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -316,7 +316,7 @@ func BenchmarkGlobalTruncated(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
count := 0
|
||||
snap := benchS.Snapshot()
|
||||
snap.WithGlobalTruncated(func(fi db.FileIntf) bool {
|
||||
snap.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -183,7 +183,9 @@ func TestUpdate0to3(t *testing.T) {
|
||||
t.Error("File prefixed by '/' was not removed during transition to schema 1")
|
||||
}
|
||||
|
||||
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid))
|
||||
var key []byte
|
||||
|
||||
key, err = db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -201,7 +203,7 @@ func TestUpdate0to3(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer trans.Release()
|
||||
_ = trans.withHaveSequence(folder, 0, func(fi FileIntf) bool {
|
||||
_ = trans.withHaveSequence(folder, 0, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
l.Infoln(f)
|
||||
if found {
|
||||
@@ -228,12 +230,46 @@ func TestUpdate0to3(t *testing.T) {
|
||||
haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0],
|
||||
haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2],
|
||||
}
|
||||
|
||||
trans, err = db.newReadOnlyTransaction()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer trans.Release()
|
||||
_ = trans.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool {
|
||||
|
||||
key, err = trans.keyer.GenerateNeedFileKey(nil, folder, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dbi, err := trans.NewPrefixIterator(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer dbi.Release()
|
||||
|
||||
for dbi.Next() {
|
||||
name := trans.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||
key, err = trans.keyer.GenerateGlobalVersionKey(key, folder, name)
|
||||
bs, err := trans.Get(key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(bs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key, err = trans.keyer.GenerateDeviceFileKey(key, folder, vl.Versions[0].Device, name)
|
||||
fi, ok, err := trans.getFileTrunc(key, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ok {
|
||||
device := "<invalid>"
|
||||
if dev, err := protocol.DeviceIDFromBytes(vl.Versions[0].Device); err != nil {
|
||||
device = dev.String()
|
||||
}
|
||||
t.Fatal("surprise missing global file", string(name), device)
|
||||
}
|
||||
e, ok := need[fi.FileName()]
|
||||
if !ok {
|
||||
t.Error("Got unexpected needed file:", fi.FileName())
|
||||
@@ -243,8 +279,11 @@ func TestUpdate0to3(t *testing.T) {
|
||||
if !f.IsEquivalentOptional(e, 0, true, true, 0) {
|
||||
t.Errorf("Wrong needed file, got %v, expected %v", f, e)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
if dbi.Error() != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for n := range need {
|
||||
t.Errorf(`Missing needed file "%v"`, n)
|
||||
}
|
||||
@@ -467,7 +506,7 @@ func TestCheckGlobals(t *testing.T) {
|
||||
}
|
||||
|
||||
// Clean up global entry of the now missing file
|
||||
if err := db.checkGlobals([]byte(fs.folder), fs.meta); err != nil {
|
||||
if err := db.checkGlobals([]byte(fs.folder)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -525,7 +564,7 @@ func TestUpdateTo10(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, v := range vl.Versions {
|
||||
for _, v := range vl.RawVersions {
|
||||
if !v.Deleted {
|
||||
t.Error("Unexpected undeleted global version for a")
|
||||
}
|
||||
@@ -535,10 +574,10 @@ func TestUpdateTo10(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !vl.Versions[0].Deleted {
|
||||
if !vl.RawVersions[0].Deleted {
|
||||
t.Error("vl.Versions[0] not deleted for b")
|
||||
}
|
||||
if vl.Versions[1].Deleted {
|
||||
if vl.RawVersions[1].Deleted {
|
||||
t.Error("vl.Versions[1] deleted for b")
|
||||
}
|
||||
// c
|
||||
@@ -546,10 +585,10 @@ func TestUpdateTo10(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if vl.Versions[0].Deleted {
|
||||
if vl.RawVersions[0].Deleted {
|
||||
t.Error("vl.Versions[0] deleted for c")
|
||||
}
|
||||
if !vl.Versions[1].Deleted {
|
||||
if !vl.RawVersions[1].Deleted {
|
||||
t.Error("vl.Versions[1] not deleted for c")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,9 @@ const (
|
||||
|
||||
// KeyTypeBlockListMap <int32 folder ID> <block list hash> <file name> = <nothing>
|
||||
KeyTypeBlockListMap = 14
|
||||
|
||||
// KeyTypeVersion <version hash> = Vector
|
||||
KeyTypeVersion = 15
|
||||
)
|
||||
|
||||
type keyer interface {
|
||||
@@ -104,6 +107,9 @@ type keyer interface {
|
||||
|
||||
// Block lists
|
||||
GenerateBlockListKey(key []byte, hash []byte) blockListKey
|
||||
|
||||
// Version vectors
|
||||
GenerateVersionKey(key []byte, hash []byte) versionKey
|
||||
}
|
||||
|
||||
// defaultKeyer implements our key scheme. It needs folder and device
|
||||
@@ -328,7 +334,20 @@ func (k defaultKeyer) GenerateBlockListKey(key []byte, hash []byte) blockListKey
|
||||
return key
|
||||
}
|
||||
|
||||
func (k blockListKey) BlocksHash() []byte {
|
||||
func (k blockListKey) Hash() []byte {
|
||||
return k[keyPrefixLen:]
|
||||
}
|
||||
|
||||
type versionKey []byte
|
||||
|
||||
func (k defaultKeyer) GenerateVersionKey(key []byte, hash []byte) versionKey {
|
||||
key = resize(key, keyPrefixLen+len(hash))
|
||||
key[0] = KeyTypeVersion
|
||||
copy(key[keyPrefixLen:], hash)
|
||||
return key
|
||||
}
|
||||
|
||||
func (k versionKey) Hash() []byte {
|
||||
return k[keyPrefixLen:]
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/greatroar/blobloom"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/util"
|
||||
"github.com/thejerf/suture"
|
||||
@@ -34,6 +35,8 @@ const (
|
||||
|
||||
// Use indirection for the block list when it exceeds this many entries
|
||||
blocksIndirectionCutoff = 3
|
||||
// Use indirection for the version vector when it exceeds this many entries
|
||||
versionIndirectionCutoff = 10
|
||||
|
||||
recheckDefaultInterval = 30 * 24 * time.Hour
|
||||
)
|
||||
@@ -118,7 +121,10 @@ func (db *Lowlevel) updateRemoteFiles(folder, device []byte, fs []protocol.FileI
|
||||
defer t.close()
|
||||
|
||||
var dk, gk, keyBuf []byte
|
||||
devID := protocol.DeviceIDFromBytes(device)
|
||||
devID, err := protocol.DeviceIDFromBytes(device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range fs {
|
||||
name := []byte(f.Name)
|
||||
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, device, name)
|
||||
@@ -379,6 +385,8 @@ func (db *Lowlevel) dropDeviceFolder(device, folder []byte, meta *metadataTracke
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbi.Release()
|
||||
|
||||
var gk, keyBuf []byte
|
||||
for dbi.Next() {
|
||||
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
||||
@@ -397,10 +405,10 @@ func (db *Lowlevel) dropDeviceFolder(device, folder []byte, meta *metadataTracke
|
||||
return err
|
||||
}
|
||||
}
|
||||
dbi.Release()
|
||||
if err := dbi.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
dbi.Release()
|
||||
|
||||
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||
key, err := db.keyer.GenerateBlockMapKey(nil, folder, nil, nil)
|
||||
@@ -421,7 +429,7 @@ func (db *Lowlevel) dropDeviceFolder(device, folder []byte, meta *metadataTracke
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
func (db *Lowlevel) checkGlobals(folder []byte) error {
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -439,9 +447,10 @@ func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
defer dbi.Release()
|
||||
|
||||
var dk []byte
|
||||
ro := t.readOnlyTransaction
|
||||
for dbi.Next() {
|
||||
var vl VersionList
|
||||
if err := vl.Unmarshal(dbi.Value()); err != nil || len(vl.Versions) == 0 {
|
||||
if err := vl.Unmarshal(dbi.Value()); err != nil || vl.Empty() {
|
||||
if err := t.Delete(dbi.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -454,40 +463,33 @@ func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
// we find those and clear them out.
|
||||
|
||||
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||
var newVL VersionList
|
||||
for i, version := range vl.Versions {
|
||||
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, version.Device, name)
|
||||
newVL := &VersionList{}
|
||||
var changed, changedHere bool
|
||||
for _, fv := range vl.RawVersions {
|
||||
changedHere, err = checkGlobalsFilterDevices(dk, folder, name, fv.Devices, newVL, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := t.Get(dk)
|
||||
if backend.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newVL.Versions = append(newVL.Versions, version)
|
||||
changed = changed || changedHere
|
||||
|
||||
if i == 0 {
|
||||
if fi, ok, err := t.getFileTrunc(dk, true); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
meta.addFile(protocol.GlobalDeviceID, fi)
|
||||
}
|
||||
changedHere, err = checkGlobalsFilterDevices(dk, folder, name, fv.InvalidDevices, newVL, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
changed = changed || changedHere
|
||||
}
|
||||
|
||||
if newLen := len(newVL.Versions); newLen == 0 {
|
||||
if newVL.Empty() {
|
||||
if err := t.Delete(dbi.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if newLen != len(vl.Versions) {
|
||||
if err := t.Put(dbi.Key(), mustMarshal(&newVL)); err != nil {
|
||||
} else if changed {
|
||||
if err := t.Put(dbi.Key(), mustMarshal(newVL)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
dbi.Release()
|
||||
if err := dbi.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -496,6 +498,30 @@ func (db *Lowlevel) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func checkGlobalsFilterDevices(dk, folder, name []byte, devices [][]byte, vl *VersionList, t readOnlyTransaction) (bool, error) {
|
||||
var changed bool
|
||||
var err error
|
||||
for _, device := range devices {
|
||||
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, device, name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
f, ok, err := t.getFileTrunc(dk, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
changed = true
|
||||
continue
|
||||
}
|
||||
_, _, _, _, _, _, err = vl.update(folder, device, f, t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return changed, nil
|
||||
}
|
||||
|
||||
func (db *Lowlevel) getIndexID(device, folder []byte) (protocol.IndexID, error) {
|
||||
key, err := db.keyer.GenerateIndexIDKey(nil, device, folder)
|
||||
if err != nil {
|
||||
@@ -630,11 +656,8 @@ func (db *Lowlevel) gcIndirect(ctx context.Context) error {
|
||||
if db.gcKeyCount > capacity {
|
||||
capacity = db.gcKeyCount
|
||||
}
|
||||
blockFilter := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: uint64(capacity),
|
||||
FPRate: indirectGCBloomFalsePositiveRate,
|
||||
MaxBits: 8 * indirectGCBloomMaxBytes,
|
||||
})
|
||||
blockFilter := newBloomFilter(capacity)
|
||||
versionFilter := newBloomFilter(capacity)
|
||||
|
||||
// Iterate the FileInfos, unmarshal the block and version hashes and
|
||||
// add them to the filter.
|
||||
@@ -651,12 +674,15 @@ func (db *Lowlevel) gcIndirect(ctx context.Context) error {
|
||||
default:
|
||||
}
|
||||
|
||||
var bl BlocksHashOnly
|
||||
if err := bl.Unmarshal(it.Value()); err != nil {
|
||||
var hashes IndirectionHashesOnly
|
||||
if err := hashes.Unmarshal(it.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(bl.BlocksHash) > 0 {
|
||||
blockFilter.Add(bloomHash(bl.BlocksHash))
|
||||
if len(hashes.BlocksHash) > 0 {
|
||||
blockFilter.Add(bloomHash(hashes.BlocksHash))
|
||||
}
|
||||
if len(hashes.VersionHash) > 0 {
|
||||
versionFilter.Add(bloomHash(hashes.VersionHash))
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
@@ -681,7 +707,7 @@ func (db *Lowlevel) gcIndirect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
key := blockListKey(it.Key())
|
||||
if blockFilter.Has(bloomHash(key.BlocksHash())) {
|
||||
if blockFilter.Has(bloomHash(key.Hash())) {
|
||||
matchedBlocks++
|
||||
continue
|
||||
}
|
||||
@@ -694,8 +720,40 @@ func (db *Lowlevel) gcIndirect(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate over version lists, removing keys with hashes that don't match
|
||||
// the filter.
|
||||
|
||||
it, err = db.NewPrefixIterator([]byte{KeyTypeVersion})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matchedVersions := 0
|
||||
for it.Next() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
key := versionKey(it.Key())
|
||||
if versionFilter.Has(bloomHash(key.Hash())) {
|
||||
matchedVersions++
|
||||
continue
|
||||
}
|
||||
if err := t.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
if err := it.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remember the number of unique keys we kept until the next pass.
|
||||
db.gcKeyCount = matchedBlocks
|
||||
if matchedVersions > matchedBlocks {
|
||||
db.gcKeyCount = matchedVersions
|
||||
}
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return err
|
||||
@@ -704,9 +762,20 @@ func (db *Lowlevel) gcIndirect(ctx context.Context) error {
|
||||
return db.Compact()
|
||||
}
|
||||
|
||||
func newBloomFilter(capacity int) *blobloom.Filter {
|
||||
return blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: uint64(capacity),
|
||||
FPRate: indirectGCBloomFalsePositiveRate,
|
||||
MaxBits: 8 * indirectGCBloomMaxBytes,
|
||||
})
|
||||
}
|
||||
|
||||
// Hash function for the bloomfilter: first eight bytes of the SHA-256.
|
||||
// Big or little-endian makes no difference, as long as we're consistent.
|
||||
func bloomHash(key []byte) uint64 {
|
||||
if len(key) != sha256.Size {
|
||||
panic("bug: bloomHash passed something not a SHA256 hash")
|
||||
}
|
||||
return binary.BigEndian.Uint64(key)
|
||||
}
|
||||
|
||||
@@ -762,7 +831,7 @@ func (db *Lowlevel) loadMetadataTracker(folder string) *metadataTracker {
|
||||
|
||||
func (db *Lowlevel) recalcMeta(folder string) (*metadataTracker, error) {
|
||||
meta := newMetadataTracker()
|
||||
if err := db.checkGlobals([]byte(folder), meta); err != nil {
|
||||
if err := db.checkGlobals([]byte(folder)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -782,8 +851,13 @@ func (db *Lowlevel) recalcMeta(folder string) (*metadataTracker, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = t.withGlobal([]byte(folder), nil, true, func(f protocol.FileIntf) bool {
|
||||
meta.addFile(protocol.GlobalDeviceID, f)
|
||||
return true
|
||||
})
|
||||
|
||||
meta.emptyNeeded(protocol.LocalDeviceID)
|
||||
err = t.withNeed([]byte(folder), protocol.LocalDeviceID[:], true, func(f FileIntf) bool {
|
||||
err = t.withNeed([]byte(folder), protocol.LocalDeviceID[:], true, func(f protocol.FileIntf) bool {
|
||||
meta.addNeeded(protocol.LocalDeviceID, f)
|
||||
return true
|
||||
})
|
||||
@@ -792,7 +866,7 @@ func (db *Lowlevel) recalcMeta(folder string) (*metadataTracker, error) {
|
||||
}
|
||||
for _, device := range meta.devices() {
|
||||
meta.emptyNeeded(device)
|
||||
err = t.withNeed([]byte(folder), device[:], true, func(f FileIntf) bool {
|
||||
err = t.withNeed([]byte(folder), device[:], true, func(f protocol.FileIntf) bool {
|
||||
meta.addNeeded(device, f)
|
||||
return true
|
||||
})
|
||||
@@ -829,7 +903,7 @@ func (db *Lowlevel) verifyLocalSequence(curSeq int64, folder string) bool {
|
||||
panic(err)
|
||||
}
|
||||
ok := true
|
||||
if err := t.withHaveSequence([]byte(folder), curSeq+1, func(fi FileIntf) bool {
|
||||
if err := t.withHaveSequence([]byte(folder), curSeq+1, func(fi protocol.FileIntf) bool {
|
||||
ok = false // we got something, which we should not have
|
||||
return false
|
||||
}); err != nil && !backend.IsClosed(err) {
|
||||
@@ -955,6 +1029,6 @@ func (db *Lowlevel) repairSequenceGCLocked(folderStr string, meta *metadataTrack
|
||||
// unchanged checks if two files are the same and thus don't need to be updated.
|
||||
// Local flags or the invalid bit might change without the version
|
||||
// being bumped.
|
||||
func unchanged(nf, ef FileIntf) bool {
|
||||
func unchanged(nf, ef protocol.FileIntf) bool {
|
||||
return ef.FileVersion().Equal(nf.FileVersion()) && ef.IsInvalid() == nf.IsInvalid() && ef.FileLocalFlags() == nf.FileLocalFlags()
|
||||
}
|
||||
|
||||
@@ -52,7 +52,11 @@ func (m *metadataTracker) Unmarshal(bs []byte) error {
|
||||
|
||||
// Initialize the index map
|
||||
for i, c := range m.counts.Counts {
|
||||
m.indexes[metaKey{protocol.DeviceIDFromBytes(c.DeviceID), c.LocalFlags}] = i
|
||||
dev, err := protocol.DeviceIDFromBytes(c.DeviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.indexes[metaKey{dev, c.LocalFlags}] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -147,7 +151,7 @@ func (m *countsMap) allNeededCounts(dev protocol.DeviceID) Counts {
|
||||
|
||||
// addFile adds a file to the counts, adjusting the sequence number as
|
||||
// appropriate
|
||||
func (m *metadataTracker) addFile(dev protocol.DeviceID, f FileIntf) {
|
||||
func (m *metadataTracker) addFile(dev protocol.DeviceID, f protocol.FileIntf) {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
@@ -186,7 +190,7 @@ func (m *metadataTracker) emptyNeeded(dev protocol.DeviceID) {
|
||||
}
|
||||
|
||||
// addNeeded adds a file to the needed counts
|
||||
func (m *metadataTracker) addNeeded(dev protocol.DeviceID, f FileIntf) {
|
||||
func (m *metadataTracker) addNeeded(dev protocol.DeviceID, f protocol.FileIntf) {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
@@ -201,7 +205,7 @@ func (m *metadataTracker) Sequence(dev protocol.DeviceID) int64 {
|
||||
return m.countsPtr(dev, 0).Sequence
|
||||
}
|
||||
|
||||
func (m *metadataTracker) updateSeqLocked(dev protocol.DeviceID, f FileIntf) {
|
||||
func (m *metadataTracker) updateSeqLocked(dev protocol.DeviceID, f protocol.FileIntf) {
|
||||
if dev == protocol.GlobalDeviceID {
|
||||
return
|
||||
}
|
||||
@@ -210,7 +214,7 @@ func (m *metadataTracker) updateSeqLocked(dev protocol.DeviceID, f FileIntf) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flag uint32, f FileIntf) {
|
||||
func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flag uint32, f protocol.FileIntf) {
|
||||
cp := m.countsPtr(dev, flag)
|
||||
|
||||
switch {
|
||||
@@ -227,7 +231,7 @@ func (m *metadataTracker) addFileLocked(dev protocol.DeviceID, flag uint32, f Fi
|
||||
}
|
||||
|
||||
// removeFile removes a file from the counts
|
||||
func (m *metadataTracker) removeFile(dev protocol.DeviceID, f FileIntf) {
|
||||
func (m *metadataTracker) removeFile(dev protocol.DeviceID, f protocol.FileIntf) {
|
||||
if f.IsInvalid() && f.FileLocalFlags() == 0 {
|
||||
// This is a remote invalid file; it does not count.
|
||||
return
|
||||
@@ -250,7 +254,7 @@ func (m *metadataTracker) removeFile(dev protocol.DeviceID, f FileIntf) {
|
||||
}
|
||||
|
||||
// removeNeeded removes a file from the needed counts
|
||||
func (m *metadataTracker) removeNeeded(dev protocol.DeviceID, f FileIntf) {
|
||||
func (m *metadataTracker) removeNeeded(dev protocol.DeviceID, f protocol.FileIntf) {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
@@ -259,7 +263,7 @@ func (m *metadataTracker) removeNeeded(dev protocol.DeviceID, f FileIntf) {
|
||||
m.removeFileLocked(dev, needFlag, f)
|
||||
}
|
||||
|
||||
func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flag uint32, f FileIntf) {
|
||||
func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flag uint32, f protocol.FileIntf) {
|
||||
cp := m.countsPtr(dev, flag)
|
||||
|
||||
switch {
|
||||
@@ -392,7 +396,10 @@ func (m *countsMap) devices() []protocol.DeviceID {
|
||||
|
||||
for _, dev := range m.counts.Counts {
|
||||
if dev.Sequence > 0 {
|
||||
id := protocol.DeviceIDFromBytes(dev.DeviceID)
|
||||
id, err := protocol.DeviceIDFromBytes(dev.DeviceID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if id == protocol.GlobalDeviceID || id == protocol.LocalDeviceID {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -7,7 +7,10 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
@@ -23,11 +26,14 @@ import (
|
||||
// 7: v0.14.53
|
||||
// 8-9: v1.4.0
|
||||
// 10-11: v1.6.0
|
||||
// 12-13: v1.7.0
|
||||
const (
|
||||
dbVersion = 11
|
||||
dbMinSyncthingVersion = "v1.6.0"
|
||||
dbVersion = 13
|
||||
dbMinSyncthingVersion = "v1.7.0"
|
||||
)
|
||||
|
||||
var errFolderMissing = errors.New("folder present in global list but missing in keyer index")
|
||||
|
||||
type databaseDowngradeError struct {
|
||||
minSyncthingVersion string
|
||||
}
|
||||
@@ -88,13 +94,14 @@ func (db *schemaUpdater) updateSchema() error {
|
||||
{9, db.updateSchemaTo9},
|
||||
{10, db.updateSchemaTo10},
|
||||
{11, db.updateSchemaTo11},
|
||||
{13, db.updateSchemaTo13},
|
||||
}
|
||||
|
||||
for _, m := range migrations {
|
||||
if prevVersion < m.schemaVersion {
|
||||
l.Infof("Migrating database to schema version %d...", m.schemaVersion)
|
||||
if err := m.migration(int(prevVersion)); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed migrating to version %v: %w", m.schemaVersion, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -126,8 +133,8 @@ func (db *schemaUpdater) updateSchema0to1(_ int) error {
|
||||
symlinkConv := 0
|
||||
changedFolders := make(map[string]struct{})
|
||||
ignAdded := 0
|
||||
meta := newMetadataTracker() // dummy metadata tracker
|
||||
var gk, buf []byte
|
||||
var gk []byte
|
||||
ro := t.readOnlyTransaction
|
||||
|
||||
for dbi.Next() {
|
||||
folder, ok := db.keyer.FolderFromDeviceFileKey(dbi.Key())
|
||||
@@ -153,17 +160,27 @@ func (db *schemaUpdater) updateSchema0to1(_ int) error {
|
||||
if _, ok := changedFolders[string(folder)]; !ok {
|
||||
changedFolders[string(folder)] = struct{}{}
|
||||
}
|
||||
if err := t.Delete(dbi.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Purposely pass nil file name to remove from global list,
|
||||
// but don't touch meta and needs
|
||||
buf, err = t.removeFromGlobal(gk, buf, folder, device, nil, nil)
|
||||
if err != nil && err != errEntryFromGlobalMissing {
|
||||
fl, err := getGlobalVersionsByKeyBefore11(gk, ro)
|
||||
if backend.IsNotFound(err) {
|
||||
// Shouldn't happen, but not critical.
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.Delete(dbi.Key()); err != nil {
|
||||
_, _ = fl.pop(device)
|
||||
if len(fl.Versions) == 0 {
|
||||
err = t.Delete(gk)
|
||||
} else {
|
||||
err = t.Put(gk, mustMarshal(&fl))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
@@ -197,25 +214,52 @@ func (db *schemaUpdater) updateSchema0to1(_ int) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if buf, ok, err = t.updateGlobal(gk, buf, folder, device, f, meta); err != nil {
|
||||
|
||||
fl, err := getGlobalVersionsByKeyBefore11(gk, ro)
|
||||
if err != nil && !backend.IsNotFound(err) {
|
||||
return err
|
||||
} else if ok {
|
||||
if _, ok = changedFolders[string(folder)]; !ok {
|
||||
changedFolders[string(folder)] = struct{}{}
|
||||
}
|
||||
i := 0
|
||||
i = sort.Search(len(fl.Versions), func(j int) bool {
|
||||
return fl.Versions[j].Invalid
|
||||
})
|
||||
for ; i < len(fl.Versions); i++ {
|
||||
ordering := fl.Versions[i].Version.Compare(f.Version)
|
||||
shouldInsert := ordering == protocol.Equal
|
||||
if !shouldInsert {
|
||||
shouldInsert, err = shouldInsertBefore(ordering, folder, fl.Versions[i].Device, true, f, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ignAdded++
|
||||
if shouldInsert {
|
||||
nv := FileVersionDeprecated{
|
||||
Device: device,
|
||||
Version: f.Version,
|
||||
Invalid: true,
|
||||
}
|
||||
fl.insertAt(i, nv)
|
||||
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := changedFolders[string(folder)]; !ok {
|
||||
changedFolders[string(folder)] = struct{}{}
|
||||
}
|
||||
ignAdded++
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if err := t.Checkpoint(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for folder := range changedFolders {
|
||||
if err := db.dropFolderMeta([]byte(folder)); err != nil {
|
||||
return err
|
||||
}
|
||||
dbi.Release()
|
||||
if err != dbi.Error() {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
@@ -233,7 +277,7 @@ func (db *schemaUpdater) updateSchema1to2(_ int) error {
|
||||
for _, folderStr := range db.ListFolders() {
|
||||
folder := []byte(folderStr)
|
||||
var putErr error
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool {
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f protocol.FileIntf) bool {
|
||||
sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo())
|
||||
if putErr != nil {
|
||||
return false
|
||||
@@ -268,7 +312,7 @@ func (db *schemaUpdater) updateSchema2to3(_ int) error {
|
||||
for _, folderStr := range db.ListFolders() {
|
||||
folder := []byte(folderStr)
|
||||
var putErr error
|
||||
err := t.withGlobal(folder, nil, true, func(f FileIntf) bool {
|
||||
err := withGlobalBefore11(folder, true, func(f protocol.FileIntf) bool {
|
||||
name := []byte(f.FileName())
|
||||
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
|
||||
if putErr != nil {
|
||||
@@ -283,12 +327,12 @@ func (db *schemaUpdater) updateSchema2to3(_ int) error {
|
||||
if ok {
|
||||
v = haveFile.FileVersion()
|
||||
}
|
||||
fv := FileVersion{
|
||||
fv := FileVersionDeprecated{
|
||||
Version: f.FileVersion(),
|
||||
Invalid: f.IsInvalid(),
|
||||
Deleted: f.IsDeleted(),
|
||||
}
|
||||
if !need(fv, ok, v) {
|
||||
if !needDeprecated(fv, ok, v) {
|
||||
return true
|
||||
}
|
||||
nk, putErr = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName()))
|
||||
@@ -297,7 +341,7 @@ func (db *schemaUpdater) updateSchema2to3(_ int) error {
|
||||
}
|
||||
putErr = t.Put(nk, nil)
|
||||
return putErr == nil
|
||||
})
|
||||
}, t.readOnlyTransaction)
|
||||
if putErr != nil {
|
||||
return putErr
|
||||
}
|
||||
@@ -353,7 +397,7 @@ func (db *schemaUpdater) updateSchema5to6(_ int) error {
|
||||
for _, folderStr := range db.ListFolders() {
|
||||
folder := []byte(folderStr)
|
||||
var iterErr error
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool {
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f protocol.FileIntf) bool {
|
||||
if !f.IsInvalid() {
|
||||
return true
|
||||
}
|
||||
@@ -398,7 +442,7 @@ func (db *schemaUpdater) updateSchema6to7(_ int) error {
|
||||
for _, folderStr := range db.ListFolders() {
|
||||
folder := []byte(folderStr)
|
||||
var delErr error
|
||||
err := t.withNeedLocal(folder, false, func(f FileIntf) bool {
|
||||
err := withNeedLocalBefore11(folder, false, func(f protocol.FileIntf) bool {
|
||||
name := []byte(f.FileName())
|
||||
gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||
if delErr != nil {
|
||||
@@ -415,20 +459,20 @@ func (db *schemaUpdater) updateSchema6to7(_ int) error {
|
||||
delErr = t.Delete(key)
|
||||
return delErr == nil
|
||||
}
|
||||
var fl VersionList
|
||||
var fl VersionListDeprecated
|
||||
err = fl.Unmarshal(svl)
|
||||
if err != nil {
|
||||
// This can't happen, but it's ignored everywhere else too,
|
||||
// so lets not act on it.
|
||||
return true
|
||||
}
|
||||
globalFV := FileVersion{
|
||||
globalFV := FileVersionDeprecated{
|
||||
Version: f.FileVersion(),
|
||||
Invalid: f.IsInvalid(),
|
||||
Deleted: f.IsDeleted(),
|
||||
}
|
||||
|
||||
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !need(globalFV, haveLocalFV, localFV.Version) {
|
||||
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !needDeprecated(globalFV, haveLocalFV, localFV.Version) {
|
||||
key, err := t.keyer.GenerateNeedFileKey(nk, folder, name)
|
||||
if err != nil {
|
||||
delErr = err
|
||||
@@ -437,7 +481,7 @@ func (db *schemaUpdater) updateSchema6to7(_ int) error {
|
||||
delErr = t.Delete(key)
|
||||
}
|
||||
return delErr == nil
|
||||
})
|
||||
}, t.readOnlyTransaction)
|
||||
if delErr != nil {
|
||||
return delErr
|
||||
}
|
||||
@@ -453,7 +497,6 @@ func (db *schemaUpdater) updateSchema6to7(_ int) error {
|
||||
|
||||
func (db *schemaUpdater) updateSchemaTo9(prev int) error {
|
||||
// Loads and rewrites all files with blocks, to deduplicate block lists.
|
||||
// Checks for missing or incorrect sequence entries and rewrites those.
|
||||
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
@@ -461,10 +504,21 @@ func (db *schemaUpdater) updateSchemaTo9(prev int) error {
|
||||
}
|
||||
defer t.close()
|
||||
|
||||
if err := db.rewriteFiles(t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.recordTime(indirectGCTimeKey)
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) rewriteFiles(t readWriteTransaction) error {
|
||||
it, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer it.Release()
|
||||
for it.Next() {
|
||||
intf, err := t.unmarshalTrunc(it.Value(), false)
|
||||
if backend.IsNotFound(err) {
|
||||
@@ -491,16 +545,12 @@ func (db *schemaUpdater) updateSchemaTo9(prev int) error {
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
if err := it.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.recordTime(indirectGCTimeKey)
|
||||
|
||||
return t.Commit()
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) updateSchemaTo10(_ int) error {
|
||||
// Rewrites global lists to include a Deleted flag.
|
||||
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -524,7 +574,7 @@ func (db *schemaUpdater) updateSchemaTo10(_ int) error {
|
||||
defer dbi.Release()
|
||||
|
||||
for dbi.Next() {
|
||||
var vl VersionList
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(dbi.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -583,7 +633,7 @@ func (db *schemaUpdater) updateSchemaTo11(_ int) error {
|
||||
for _, folderStr := range db.ListFolders() {
|
||||
folder := []byte(folderStr)
|
||||
var putErr error
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(fi FileIntf) bool {
|
||||
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(FileInfoTruncated)
|
||||
if f.IsDirectory() || f.IsDeleted() || f.IsSymlink() || f.IsInvalid() || f.BlocksHash == nil {
|
||||
return true
|
||||
@@ -610,3 +660,291 @@ func (db *schemaUpdater) updateSchemaTo11(_ int) error {
|
||||
}
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) updateSchemaTo13(prev int) error {
|
||||
// Loads and rewrites all files, to deduplicate version vectors.
|
||||
|
||||
t, err := db.newReadWriteTransaction()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.close()
|
||||
|
||||
if prev < 12 {
|
||||
if err := db.rewriteFiles(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := db.rewriteGlobals(t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *schemaUpdater) rewriteGlobals(t readWriteTransaction) error {
|
||||
it, err := t.NewPrefixIterator([]byte{KeyTypeGlobal})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer it.Release()
|
||||
for it.Next() {
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(it.Value()); err != nil {
|
||||
// If we crashed during an earlier migration, some version
|
||||
// lists might already be in the new format: Skip those.
|
||||
var nvl VersionList
|
||||
if nerr := nvl.Unmarshal(it.Value()); nerr == nil {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
if err := t.Delete(it.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newVl, err := convertVersionList(vl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.Put(it.Key(), mustMarshal(&newVl)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.Checkpoint(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
func convertVersionList(vl VersionListDeprecated) (VersionList, error) {
|
||||
var newVl VersionList
|
||||
var newPos, oldPos int
|
||||
var lastVersion protocol.Vector
|
||||
|
||||
for _, fv := range vl.Versions {
|
||||
if fv.Invalid {
|
||||
break
|
||||
}
|
||||
oldPos++
|
||||
if len(newVl.RawVersions) > 0 && lastVersion.Equal(fv.Version) {
|
||||
newVl.RawVersions[newPos].Devices = append(newVl.RawVersions[newPos].Devices, fv.Device)
|
||||
continue
|
||||
}
|
||||
newPos = len(newVl.RawVersions)
|
||||
newVl.RawVersions = append(newVl.RawVersions, newFileVersion(fv.Device, fv.Version, false, fv.Deleted))
|
||||
lastVersion = fv.Version
|
||||
}
|
||||
|
||||
if oldPos == len(vl.Versions) {
|
||||
return newVl, nil
|
||||
}
|
||||
|
||||
if len(newVl.RawVersions) == 0 {
|
||||
fv := vl.Versions[oldPos]
|
||||
newVl.RawVersions = []FileVersion{newFileVersion(fv.Device, fv.Version, true, fv.Deleted)}
|
||||
oldPos++
|
||||
}
|
||||
newPos = 0
|
||||
outer:
|
||||
for _, fv := range vl.Versions[oldPos:] {
|
||||
for _, nfv := range newVl.RawVersions[newPos:] {
|
||||
switch nfv.Version.Compare(fv.Version) {
|
||||
case protocol.Equal:
|
||||
newVl.RawVersions[newPos].InvalidDevices = append(newVl.RawVersions[newPos].InvalidDevices, fv.Device)
|
||||
lastVersion = fv.Version
|
||||
continue outer
|
||||
case protocol.Lesser:
|
||||
newVl.insertAt(newPos, newFileVersion(fv.Device, fv.Version, true, fv.Deleted))
|
||||
lastVersion = fv.Version
|
||||
continue outer
|
||||
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
||||
// The version is invalid, i.e. it looses anyway,
|
||||
// no need to check/get the conflicting file.
|
||||
}
|
||||
newPos++
|
||||
}
|
||||
// Couldn't insert into any existing versions
|
||||
newVl.RawVersions = append(newVl.RawVersions, newFileVersion(fv.Device, fv.Version, true, fv.Deleted))
|
||||
lastVersion = fv.Version
|
||||
newPos++
|
||||
}
|
||||
|
||||
return newVl, nil
|
||||
}
|
||||
|
||||
func getGlobalVersionsByKeyBefore11(key []byte, t readOnlyTransaction) (VersionListDeprecated, error) {
|
||||
bs, err := t.Get(key)
|
||||
if err != nil {
|
||||
return VersionListDeprecated{}, err
|
||||
}
|
||||
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(bs); err != nil {
|
||||
return VersionListDeprecated{}, err
|
||||
}
|
||||
|
||||
return vl, nil
|
||||
}
|
||||
|
||||
func withGlobalBefore11(folder []byte, truncate bool, fn Iterator, t readOnlyTransaction) error {
|
||||
key, err := t.keyer.GenerateGlobalVersionKey(nil, folder, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbi, err := t.NewPrefixIterator(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbi.Release()
|
||||
|
||||
var dk []byte
|
||||
for dbi.Next() {
|
||||
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(dbi.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, ok, err := t.getFileTrunc(dk, truncate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !fn(f) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dbi.Error()
|
||||
}
|
||||
|
||||
func withNeedLocalBefore11(folder []byte, truncate bool, fn Iterator, t readOnlyTransaction) error {
|
||||
key, err := t.keyer.GenerateNeedFileKey(nil, folder, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbi, err := t.NewPrefixIterator(key.WithoutName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dbi.Release()
|
||||
|
||||
var keyBuf []byte
|
||||
var f protocol.FileIntf
|
||||
var ok bool
|
||||
for dbi.Next() {
|
||||
keyBuf, f, ok, err = getGlobalBefore11(keyBuf, folder, t.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !fn(f) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return dbi.Error()
|
||||
}
|
||||
|
||||
func getGlobalBefore11(keyBuf, folder, file []byte, truncate bool, t readOnlyTransaction) ([]byte, protocol.FileIntf, bool, error) {
|
||||
keyBuf, err := t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
bs, err := t.Get(keyBuf)
|
||||
if backend.IsNotFound(err) {
|
||||
return keyBuf, nil, false, nil
|
||||
} else if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
var vl VersionListDeprecated
|
||||
if err := vl.Unmarshal(bs); err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
fi, ok, err := t.getFileTrunc(keyBuf, truncate)
|
||||
if err != nil || !ok {
|
||||
return keyBuf, nil, false, err
|
||||
}
|
||||
return keyBuf, fi, true, nil
|
||||
}
|
||||
|
||||
func (vl *VersionListDeprecated) String() string {
|
||||
var b bytes.Buffer
|
||||
var id protocol.DeviceID
|
||||
b.WriteString("{")
|
||||
for i, v := range vl.Versions {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
copy(id[:], v.Device)
|
||||
fmt.Fprintf(&b, "{%v, %v}", v.Version, id)
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (vl *VersionListDeprecated) pop(device []byte) (FileVersionDeprecated, int) {
|
||||
for i, v := range vl.Versions {
|
||||
if bytes.Equal(v.Device, device) {
|
||||
vl.Versions = append(vl.Versions[:i], vl.Versions[i+1:]...)
|
||||
return v, i
|
||||
}
|
||||
}
|
||||
return FileVersionDeprecated{}, -1
|
||||
}
|
||||
|
||||
func (vl *VersionListDeprecated) Get(device []byte) (FileVersionDeprecated, bool) {
|
||||
for _, v := range vl.Versions {
|
||||
if bytes.Equal(v.Device, device) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
|
||||
return FileVersionDeprecated{}, false
|
||||
}
|
||||
|
||||
func (vl *VersionListDeprecated) insertAt(i int, v FileVersionDeprecated) {
|
||||
vl.Versions = append(vl.Versions, FileVersionDeprecated{})
|
||||
copy(vl.Versions[i+1:], vl.Versions[i:])
|
||||
vl.Versions[i] = v
|
||||
}
|
||||
|
||||
func needDeprecated(global FileVersionDeprecated, haveLocal bool, localVersion protocol.Vector) bool {
|
||||
// We never need an invalid file.
|
||||
if global.Invalid {
|
||||
return false
|
||||
}
|
||||
// We don't need a deleted file if we don't have it.
|
||||
if global.Deleted && !haveLocal {
|
||||
return false
|
||||
}
|
||||
// We don't need the global file if we already have the same version.
|
||||
if haveLocal && localVersion.GreaterEqual(global.Version) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
@@ -31,35 +29,10 @@ type FileSet struct {
|
||||
updateMutex sync.Mutex // protects database updates and the corresponding metadata changes
|
||||
}
|
||||
|
||||
// FileIntf is the set of methods implemented by both protocol.FileInfo and
|
||||
// FileInfoTruncated.
|
||||
type FileIntf interface {
|
||||
FileSize() int64
|
||||
FileName() string
|
||||
FileLocalFlags() uint32
|
||||
IsDeleted() bool
|
||||
IsInvalid() bool
|
||||
IsIgnored() bool
|
||||
IsUnsupported() bool
|
||||
MustRescan() bool
|
||||
IsReceiveOnlyChanged() bool
|
||||
IsDirectory() bool
|
||||
IsSymlink() bool
|
||||
ShouldConflict() bool
|
||||
HasPermissionBits() bool
|
||||
SequenceNo() int64
|
||||
BlockSize() int
|
||||
FileVersion() protocol.Vector
|
||||
FileType() protocol.FileInfoType
|
||||
FilePermissions() uint32
|
||||
FileModifiedBy() protocol.ShortID
|
||||
ModTime() time.Time
|
||||
}
|
||||
|
||||
// The Iterator is called with either a protocol.FileInfo or a
|
||||
// FileInfoTruncated (depending on the method) and returns true to
|
||||
// continue iteration, false to stop.
|
||||
type Iterator func(f FileIntf) bool
|
||||
type Iterator func(f protocol.FileIntf) bool
|
||||
|
||||
func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
|
||||
return &FileSet{
|
||||
@@ -335,7 +308,7 @@ func (s *Snapshot) LocalChangedFiles(page, perpage int) []FileInfoTruncated {
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
|
||||
s.WithHaveTruncated(protocol.LocalDeviceID, func(f FileIntf) bool {
|
||||
s.WithHaveTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
if !f.IsReceiveOnlyChanged() {
|
||||
return true
|
||||
}
|
||||
@@ -359,7 +332,7 @@ func (s *Snapshot) RemoteNeedFolderFiles(device protocol.DeviceID, page, perpage
|
||||
files := make([]FileInfoTruncated, 0, perpage)
|
||||
skip := (page - 1) * perpage
|
||||
get := perpage
|
||||
s.WithNeedTruncated(device, func(f FileIntf) bool {
|
||||
s.WithNeedTruncated(device, func(f protocol.FileIntf) bool {
|
||||
if skip > 0 {
|
||||
skip--
|
||||
return true
|
||||
@@ -497,7 +470,7 @@ func normalizeFilenamesAndDropDuplicates(fs []protocol.FileInfo) []protocol.File
|
||||
}
|
||||
|
||||
func nativeFileIterator(fn Iterator) Iterator {
|
||||
return func(fi FileIntf) bool {
|
||||
return func(fi protocol.FileIntf) bool {
|
||||
switch f := fi.(type) {
|
||||
case protocol.FileInfo:
|
||||
f.Name = osutil.NativeFilename(f.Name)
|
||||
|
||||
@@ -48,7 +48,7 @@ func globalList(s *db.FileSet) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithGlobal(func(fi db.FileIntf) bool {
|
||||
snap.WithGlobal(func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
@@ -59,7 +59,7 @@ func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated {
|
||||
var fs []db.FileInfoTruncated
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool {
|
||||
snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
@@ -71,7 +71,7 @@ func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithHave(n, func(fi db.FileIntf) bool {
|
||||
snap.WithHave(n, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
@@ -83,7 +83,7 @@ func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.Fi
|
||||
var fs []db.FileInfoTruncated
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithPrefixedHaveTruncated(n, prefix, func(fi db.FileIntf) bool {
|
||||
snap.WithPrefixedHaveTruncated(n, prefix, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
@@ -95,7 +95,7 @@ func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
|
||||
var fs []protocol.FileInfo
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithNeed(n, func(fi db.FileIntf) bool {
|
||||
snap.WithNeed(n, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfo)
|
||||
fs = append(fs, f)
|
||||
return true
|
||||
@@ -998,7 +998,7 @@ func TestWithHaveSequence(t *testing.T) {
|
||||
i := 2
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithHaveSequence(int64(i), func(fi db.FileIntf) bool {
|
||||
snap.WithHaveSequence(int64(i), func(fi protocol.FileIntf) bool {
|
||||
if f := fi.(protocol.FileInfo); !f.IsEquivalent(localHave[i-1], 0) {
|
||||
t.Fatalf("Got %v\nExpected %v", f, localHave[i-1])
|
||||
}
|
||||
@@ -1049,7 +1049,7 @@ loop:
|
||||
default:
|
||||
}
|
||||
snap := s.Snapshot()
|
||||
snap.WithHaveSequence(prevSeq+1, func(fi db.FileIntf) bool {
|
||||
snap.WithHaveSequence(prevSeq+1, func(fi protocol.FileIntf) bool {
|
||||
if fi.SequenceNo() < prevSeq+1 {
|
||||
t.Fatal("Skipped ", prevSeq+1, fi.SequenceNo())
|
||||
}
|
||||
@@ -1527,8 +1527,8 @@ func TestSequenceIndex(t *testing.T) {
|
||||
|
||||
// Start a routine to walk the sequence index and inspect the result.
|
||||
|
||||
seen := make(map[string]db.FileIntf)
|
||||
latest := make([]db.FileIntf, 0, len(local))
|
||||
seen := make(map[string]protocol.FileIntf)
|
||||
latest := make([]protocol.FileIntf, 0, len(local))
|
||||
var seq int64
|
||||
t0 := time.Now()
|
||||
|
||||
@@ -1539,7 +1539,7 @@ func TestSequenceIndex(t *testing.T) {
|
||||
// update has happened since our last iteration.
|
||||
latest = latest[:0]
|
||||
snap := s.Snapshot()
|
||||
snap.WithHaveSequence(seq+1, func(f db.FileIntf) bool {
|
||||
snap.WithHaveSequence(seq+1, func(f protocol.FileIntf) bool {
|
||||
seen[f.FileName()] = f
|
||||
latest = append(latest, f)
|
||||
seq = f.SequenceNo()
|
||||
@@ -1644,7 +1644,7 @@ func TestUpdateWithOneFileTwice(t *testing.T) {
|
||||
snap := s.Snapshot()
|
||||
defer snap.Release()
|
||||
count := 0
|
||||
snap.WithHaveSequence(0, func(f db.FileIntf) bool {
|
||||
snap.WithHaveSequence(0, func(f protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -12,7 +12,6 @@ package db
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
@@ -196,98 +195,317 @@ func (vl VersionList) String() string {
|
||||
var b bytes.Buffer
|
||||
var id protocol.DeviceID
|
||||
b.WriteString("{")
|
||||
for i, v := range vl.Versions {
|
||||
for i, v := range vl.RawVersions {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
copy(id[:], v.Device)
|
||||
fmt.Fprintf(&b, "{%v, %v}", v.Version, id)
|
||||
fmt.Fprintf(&b, "{%v, {", v.Version)
|
||||
for j, dev := range v.Devices {
|
||||
if j > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
copy(id[:], dev)
|
||||
fmt.Fprint(&b, id.Short())
|
||||
}
|
||||
b.WriteString("}, {")
|
||||
for j, dev := range v.InvalidDevices {
|
||||
if j > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
copy(id[:], dev)
|
||||
fmt.Fprint(&b, id.Short())
|
||||
}
|
||||
fmt.Fprint(&b, "}}")
|
||||
}
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// update brings the VersionList up to date with file. It returns the updated
|
||||
// VersionList, a potentially removed old FileVersion and its index, as well as
|
||||
// the index where the new FileVersion was inserted.
|
||||
func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (_ VersionList, removedFV FileVersion, removedAt int, insertedAt int, err error) {
|
||||
vl, removedFV, removedAt = vl.pop(device)
|
||||
|
||||
nv := FileVersion{
|
||||
Device: device,
|
||||
Version: file.Version,
|
||||
Invalid: file.IsInvalid(),
|
||||
Deleted: file.IsDeleted(),
|
||||
}
|
||||
i := 0
|
||||
if nv.Invalid {
|
||||
i = sort.Search(len(vl.Versions), func(j int) bool {
|
||||
return vl.Versions[j].Invalid
|
||||
})
|
||||
}
|
||||
for ; i < len(vl.Versions); i++ {
|
||||
switch vl.Versions[i].Version.Compare(file.Version) {
|
||||
case protocol.Equal:
|
||||
fallthrough
|
||||
|
||||
case protocol.Lesser:
|
||||
// The version at this point in the list is equal to or lesser
|
||||
// ("older") than us. We insert ourselves in front of it.
|
||||
vl = vl.insertAt(i, nv)
|
||||
return vl, removedFV, removedAt, i, nil
|
||||
|
||||
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
||||
// The version at this point is in conflict with us. We must pull
|
||||
// the actual file metadata to determine who wins. If we win, we
|
||||
// insert ourselves in front of the loser here. (The "Lesser" and
|
||||
// "Greater" in the condition above is just based on the device
|
||||
// IDs in the version vector, which is not the only thing we use
|
||||
// to determine the winner.)
|
||||
//
|
||||
// A surprise missing file entry here is counted as a win for us.
|
||||
if of, ok, err := t.getFile(folder, vl.Versions[i].Device, []byte(file.Name)); err != nil {
|
||||
return vl, removedFV, removedAt, i, err
|
||||
} else if !ok || file.WinsConflict(of) {
|
||||
vl = vl.insertAt(i, nv)
|
||||
return vl, removedFV, removedAt, i, nil
|
||||
}
|
||||
}
|
||||
// VersionList, a device that has the global/newest version, a device that previously
|
||||
// had the global/newest version, a boolean indicating if the global version has
|
||||
// changed and if any error occurred (only possible in db interaction).
|
||||
func (vl *VersionList) update(folder, device []byte, file protocol.FileIntf, t readOnlyTransaction) (FileVersion, FileVersion, FileVersion, bool, bool, bool, error) {
|
||||
if len(vl.RawVersions) == 0 {
|
||||
nv := newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted())
|
||||
vl.RawVersions = append(vl.RawVersions, nv)
|
||||
return nv, FileVersion{}, FileVersion{}, false, false, true, nil
|
||||
}
|
||||
|
||||
// We didn't find a position for an insert above, so append to the end.
|
||||
vl.Versions = append(vl.Versions, nv)
|
||||
// Get the current global (before updating)
|
||||
oldFV, haveOldGlobal := vl.GetGlobal()
|
||||
|
||||
return vl, removedFV, removedAt, len(vl.Versions) - 1, nil
|
||||
// Remove ourselves first
|
||||
removedFV, haveRemoved, _, err := vl.pop(folder, device, []byte(file.FileName()), t)
|
||||
if err == nil {
|
||||
// Find position and insert the file
|
||||
err = vl.insert(folder, device, file, t)
|
||||
}
|
||||
if err != nil {
|
||||
return FileVersion{}, FileVersion{}, FileVersion{}, false, false, false, err
|
||||
}
|
||||
|
||||
newFV, _ := vl.GetGlobal() // We just inserted something above, can't be empty
|
||||
|
||||
if !haveOldGlobal {
|
||||
return newFV, FileVersion{}, removedFV, false, haveRemoved, true, nil
|
||||
}
|
||||
|
||||
globalChanged := true
|
||||
if oldFV.IsInvalid() == newFV.IsInvalid() && oldFV.Version.Equal(newFV.Version) {
|
||||
globalChanged = false
|
||||
}
|
||||
|
||||
return newFV, oldFV, removedFV, true, haveRemoved, globalChanged, nil
|
||||
}
|
||||
|
||||
func (vl VersionList) insertAt(i int, v FileVersion) VersionList {
|
||||
vl.Versions = append(vl.Versions, FileVersion{})
|
||||
copy(vl.Versions[i+1:], vl.Versions[i:])
|
||||
vl.Versions[i] = v
|
||||
return vl
|
||||
func (vl *VersionList) insert(folder, device []byte, file protocol.FileIntf, t readOnlyTransaction) error {
|
||||
var added bool
|
||||
var err error
|
||||
i := 0
|
||||
for ; i < len(vl.RawVersions); i++ {
|
||||
// Insert our new version
|
||||
added, err = vl.checkInsertAt(i, folder, device, file, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if added {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(vl.RawVersions) {
|
||||
// Append to the end
|
||||
vl.RawVersions = append(vl.RawVersions, newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vl *VersionList) insertAt(i int, v FileVersion) {
|
||||
vl.RawVersions = append(vl.RawVersions, FileVersion{})
|
||||
copy(vl.RawVersions[i+1:], vl.RawVersions[i:])
|
||||
vl.RawVersions[i] = v
|
||||
}
|
||||
|
||||
// pop returns the VersionList without the entry for the given device, as well
|
||||
// as the removed FileVersion and the position, where that FileVersion was.
|
||||
// If there is no FileVersion for the given device, the position is -1.
|
||||
func (vl VersionList) pop(device []byte) (VersionList, FileVersion, int) {
|
||||
for i, v := range vl.Versions {
|
||||
if bytes.Equal(v.Device, device) {
|
||||
vl.Versions = append(vl.Versions[:i], vl.Versions[i+1:]...)
|
||||
return vl, v, i
|
||||
}
|
||||
// as the removed FileVersion, whether it was found/removed at all and whether
|
||||
// the global changed in the process.
|
||||
func (vl *VersionList) pop(folder, device, name []byte, t readOnlyTransaction) (FileVersion, bool, bool, error) {
|
||||
invDevice, i, j, ok := vl.findDevice(device)
|
||||
if !ok {
|
||||
return FileVersion{}, false, false, nil
|
||||
}
|
||||
return vl, FileVersion{}, -1
|
||||
globalPos := vl.findGlobal()
|
||||
|
||||
if vl.RawVersions[i].deviceCount() == 1 {
|
||||
fv := vl.RawVersions[i]
|
||||
vl.popVersionAt(i)
|
||||
return fv, true, globalPos == i, nil
|
||||
}
|
||||
|
||||
if invDevice {
|
||||
vl.RawVersions[i].InvalidDevices = popDeviceAt(vl.RawVersions[i].InvalidDevices, j)
|
||||
} else {
|
||||
vl.RawVersions[i].Devices = popDeviceAt(vl.RawVersions[i].Devices, j)
|
||||
}
|
||||
// If the last valid device of the previous global was removed above,
|
||||
// the next entry is now the global entry (unless all entries are invalid).
|
||||
if len(vl.RawVersions[i].Devices) == 0 && globalPos == i {
|
||||
return vl.RawVersions[i], true, globalPos == vl.findGlobal(), nil
|
||||
}
|
||||
return vl.RawVersions[i], true, false, nil
|
||||
}
|
||||
|
||||
func (vl VersionList) Get(device []byte) (FileVersion, bool) {
|
||||
for _, v := range vl.Versions {
|
||||
if bytes.Equal(v.Device, device) {
|
||||
return v, true
|
||||
// Get returns a FileVersion that contains the given device and whether it has
|
||||
// been found at all.
|
||||
func (vl *VersionList) Get(device []byte) (FileVersion, bool) {
|
||||
_, i, _, ok := vl.findDevice(device)
|
||||
if !ok {
|
||||
return FileVersion{}, false
|
||||
}
|
||||
return vl.RawVersions[i], true
|
||||
}
|
||||
|
||||
// GetGlobal returns the current global FileVersion. The returned FileVersion
|
||||
// may be invalid, if all FileVersions are invalid. Returns false only if
|
||||
// VersionList is empty.
|
||||
func (vl *VersionList) GetGlobal() (FileVersion, bool) {
|
||||
i := vl.findGlobal()
|
||||
if i == -1 {
|
||||
return FileVersion{}, false
|
||||
}
|
||||
return vl.RawVersions[i], true
|
||||
}
|
||||
|
||||
func (vl *VersionList) Empty() bool {
|
||||
return len(vl.RawVersions) == 0
|
||||
}
|
||||
|
||||
// findGlobal returns the first version that isn't invalid, or if all versions are
|
||||
// invalid just the first version (i.e. 0) or -1, if there's no versions at all.
|
||||
func (vl *VersionList) findGlobal() int {
|
||||
for i, fv := range vl.RawVersions {
|
||||
if !fv.IsInvalid() {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if len(vl.RawVersions) == 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
return FileVersion{}, false
|
||||
// findDevices returns whether the device is in InvalidVersions or Versions and
|
||||
// in InvalidDevices or Devices (true for invalid), the positions in the version
|
||||
// and device slices and whether it has been found at all.
|
||||
func (vl *VersionList) findDevice(device []byte) (bool, int, int, bool) {
|
||||
for i, v := range vl.RawVersions {
|
||||
if j := deviceIndex(v.Devices, device); j != -1 {
|
||||
return false, i, j, true
|
||||
}
|
||||
if j := deviceIndex(v.InvalidDevices, device); j != -1 {
|
||||
return true, i, j, true
|
||||
}
|
||||
}
|
||||
return false, -1, -1, false
|
||||
}
|
||||
|
||||
func (vl *VersionList) popVersion(version protocol.Vector) (FileVersion, bool) {
|
||||
i := vl.versionIndex(version)
|
||||
if i == -1 {
|
||||
return FileVersion{}, false
|
||||
}
|
||||
fv := vl.RawVersions[i]
|
||||
vl.popVersionAt(i)
|
||||
return fv, true
|
||||
}
|
||||
|
||||
func (vl *VersionList) versionIndex(version protocol.Vector) int {
|
||||
for i, v := range vl.RawVersions {
|
||||
if version.Equal(v.Version) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (vl *VersionList) popVersionAt(i int) {
|
||||
vl.RawVersions = append(vl.RawVersions[:i], vl.RawVersions[i+1:]...)
|
||||
}
|
||||
|
||||
// checkInsertAt determines if the given device and associated file should be
|
||||
// inserted into the FileVersion at position i or into a new FileVersion at
|
||||
// position i.
|
||||
func (vl *VersionList) checkInsertAt(i int, folder, device []byte, file protocol.FileIntf, t readOnlyTransaction) (bool, error) {
|
||||
ordering := vl.RawVersions[i].Version.Compare(file.FileVersion())
|
||||
if ordering == protocol.Equal {
|
||||
if !file.IsInvalid() {
|
||||
vl.RawVersions[i].Devices = append(vl.RawVersions[i].Devices, device)
|
||||
} else {
|
||||
vl.RawVersions[i].InvalidDevices = append(vl.RawVersions[i].InvalidDevices, device)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
existingDevice, _ := vl.RawVersions[i].FirstDevice()
|
||||
insert, err := shouldInsertBefore(ordering, folder, existingDevice, vl.RawVersions[i].IsInvalid(), file, t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if insert {
|
||||
vl.insertAt(i, newFileVersion(device, file.FileVersion(), file.IsInvalid(), file.IsDeleted()))
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// shouldInsertBefore determines whether the file comes before an existing
|
||||
// entry, given the version ordering (existing compared to new one), existing
|
||||
// device and if the existing version is invalid.
|
||||
func shouldInsertBefore(ordering protocol.Ordering, folder, existingDevice []byte, existingInvalid bool, file protocol.FileIntf, t readOnlyTransaction) (bool, error) {
|
||||
switch ordering {
|
||||
case protocol.Lesser:
|
||||
// The version at this point in the list is lesser
|
||||
// ("older") than us. We insert ourselves in front of it.
|
||||
return true, nil
|
||||
|
||||
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
||||
// The version in conflict with us.
|
||||
// Check if we can shortcut due to one being invalid.
|
||||
if existingInvalid != file.IsInvalid() {
|
||||
return existingInvalid, nil
|
||||
}
|
||||
// We must pull the actual file metadata to determine who wins.
|
||||
// If we win, we insert ourselves in front of the loser here.
|
||||
// (The "Lesser" and "Greater" in the condition above is just
|
||||
// based on the device IDs in the version vector, which is not
|
||||
// the only thing we use to determine the winner.)
|
||||
of, ok, err := t.getFile(folder, existingDevice, []byte(file.FileName()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// A surprise missing file entry here is counted as a win for us.
|
||||
if !ok {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if protocol.WinsConflict(file, of) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func deviceIndex(devices [][]byte, device []byte) int {
|
||||
for i, dev := range devices {
|
||||
if bytes.Equal(device, dev) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func popDeviceAt(devices [][]byte, i int) [][]byte {
|
||||
return append(devices[:i], devices[i+1:]...)
|
||||
}
|
||||
|
||||
func popDevice(devices [][]byte, device []byte) ([][]byte, bool) {
|
||||
i := deviceIndex(devices, device)
|
||||
if i == -1 {
|
||||
return devices, false
|
||||
}
|
||||
return popDeviceAt(devices, i), true
|
||||
}
|
||||
|
||||
func newFileVersion(device []byte, version protocol.Vector, invalid, deleted bool) FileVersion {
|
||||
fv := FileVersion{
|
||||
Version: version,
|
||||
Deleted: deleted,
|
||||
}
|
||||
if invalid {
|
||||
fv.InvalidDevices = [][]byte{device}
|
||||
} else {
|
||||
fv.Devices = [][]byte{device}
|
||||
}
|
||||
return fv
|
||||
}
|
||||
|
||||
func (fv FileVersion) FirstDevice() ([]byte, bool) {
|
||||
if len(fv.Devices) != 0 {
|
||||
return fv.Devices[0], true
|
||||
}
|
||||
if len(fv.InvalidDevices) != 0 {
|
||||
return fv.InvalidDevices[0], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (fv FileVersion) IsInvalid() bool {
|
||||
return len(fv.Devices) == 0
|
||||
}
|
||||
|
||||
func (fv FileVersion) deviceCount() int {
|
||||
return len(fv.Devices) + len(fv.InvalidDevices)
|
||||
}
|
||||
|
||||
type fileList []protocol.FileInfo
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,15 +13,15 @@ option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message FileVersion {
|
||||
protocol.Vector version = 1 [(gogoproto.nullable) = false];
|
||||
bytes device = 2;
|
||||
bool invalid = 3;
|
||||
bool deleted = 4;
|
||||
protocol.Vector version = 1 [(gogoproto.nullable) = false];
|
||||
bool deleted = 2;
|
||||
repeated bytes devices = 3;
|
||||
repeated bytes invalid_devices = 4;
|
||||
}
|
||||
|
||||
message VersionList {
|
||||
option (gogoproto.goproto_stringer) = false;
|
||||
repeated FileVersion versions = 1 [(gogoproto.nullable) = false];
|
||||
repeated FileVersion versions = 1 [(gogoproto.customname) = "RawVersions", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Must be the same as FileInfo but without the blocks field
|
||||
@@ -42,7 +42,8 @@ message FileInfoTruncated {
|
||||
int32 block_size = 13 [(gogoproto.customname) = "RawBlockSize"];
|
||||
|
||||
// see bep.proto
|
||||
uint32 local_flags = 1000;
|
||||
uint32 local_flags = 1000;
|
||||
bytes version_hash = 1001;
|
||||
|
||||
bool deleted = 6;
|
||||
bool invalid = 7 [(gogoproto.customname) = "RawInvalid"];
|
||||
@@ -54,9 +55,11 @@ message BlockList {
|
||||
repeated protocol.BlockInfo Blocks = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// BlocksHashOnly is used to only unmarshal the block list hash from a FileInfo
|
||||
message BlocksHashOnly {
|
||||
bytes blocks_hash = 18;
|
||||
// IndirectionHashesOnly is used to only unmarshal the indirection hashes
|
||||
// from a FileInfo
|
||||
message IndirectionHashesOnly {
|
||||
bytes blocks_hash = 18;
|
||||
bytes version_hash = 1001;
|
||||
}
|
||||
|
||||
// For each folder and device we keep one of these to track the current
|
||||
@@ -76,3 +79,15 @@ message CountsSet {
|
||||
repeated Counts counts = 1 [(gogoproto.nullable) = false];
|
||||
int64 created = 2; // unix nanos
|
||||
}
|
||||
|
||||
message FileVersionDeprecated {
|
||||
protocol.Vector version = 1 [(gogoproto.nullable) = false];
|
||||
bytes device = 2;
|
||||
bool invalid = 3;
|
||||
bool deleted = 4;
|
||||
}
|
||||
|
||||
message VersionListDeprecated {
|
||||
option (gogoproto.goproto_stringer) = false;
|
||||
repeated FileVersionDeprecated versions = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
@@ -15,7 +15,11 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var errEntryFromGlobalMissing = errors.New("device present in global list but missing as device/fileinfo entry")
|
||||
var (
|
||||
errEntryFromGlobalMissing = errors.New("device present in global list but missing as device/fileinfo entry")
|
||||
errEmptyGlobal = errors.New("no versions in global list")
|
||||
errEmptyFileVersion = errors.New("no devices in global file version")
|
||||
)
|
||||
|
||||
// A readOnlyTransaction represents a database snapshot.
|
||||
type readOnlyTransaction struct {
|
||||
@@ -54,7 +58,7 @@ func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool,
|
||||
return f.(protocol.FileInfo), true, nil
|
||||
}
|
||||
|
||||
func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, bool, error) {
|
||||
func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (protocol.FileIntf, bool, error) {
|
||||
bs, err := t.Get(key)
|
||||
if backend.IsNotFound(err) {
|
||||
return nil, false, nil
|
||||
@@ -72,13 +76,16 @@ func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, boo
|
||||
return f, true, nil
|
||||
}
|
||||
|
||||
func (t readOnlyTransaction) unmarshalTrunc(bs []byte, trunc bool) (FileIntf, error) {
|
||||
func (t readOnlyTransaction) unmarshalTrunc(bs []byte, trunc bool) (protocol.FileIntf, error) {
|
||||
if trunc {
|
||||
var tf FileInfoTruncated
|
||||
err := tf.Unmarshal(bs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := t.fillTruncated(&tf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tf, nil
|
||||
}
|
||||
|
||||
@@ -92,7 +99,8 @@ func (t readOnlyTransaction) unmarshalTrunc(bs []byte, trunc bool) (FileIntf, er
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// fillFileInfo follows the (possible) indirection of blocks and fills it out.
|
||||
// fillFileInfo follows the (possible) indirection of blocks and version
|
||||
// vector and fills it out.
|
||||
func (t readOnlyTransaction) fillFileInfo(fi *protocol.FileInfo) error {
|
||||
var key []byte
|
||||
|
||||
@@ -110,6 +118,41 @@ func (t readOnlyTransaction) fillFileInfo(fi *protocol.FileInfo) error {
|
||||
fi.Blocks = bl.Blocks
|
||||
}
|
||||
|
||||
if len(fi.VersionHash) != 0 {
|
||||
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
|
||||
bs, err := t.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(bs); err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Version = v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fillTruncated follows the (possible) indirection of version vector and
|
||||
// fills it.
|
||||
func (t readOnlyTransaction) fillTruncated(fi *FileInfoTruncated) error {
|
||||
var key []byte
|
||||
|
||||
if len(fi.VersionHash) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
|
||||
bs, err := t.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(bs); err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Version = v
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -136,26 +179,44 @@ func (t readOnlyTransaction) getGlobalVersionsByKey(key []byte) (VersionList, er
|
||||
return vl, nil
|
||||
}
|
||||
|
||||
func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, FileIntf, bool, error) {
|
||||
func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, protocol.FileIntf, bool, error) {
|
||||
vl, err := t.getGlobalVersions(keyBuf, folder, file)
|
||||
if backend.IsNotFound(err) {
|
||||
return keyBuf, nil, false, nil
|
||||
} else if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
if len(vl.Versions) == 0 {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
var fi protocol.FileIntf
|
||||
keyBuf, fi, _, err = t.getGlobalFromVersionList(keyBuf, folder, file, truncate, vl)
|
||||
return keyBuf, fi, true, err
|
||||
}
|
||||
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file)
|
||||
func (t readOnlyTransaction) getGlobalFromVersionList(keyBuf, folder, file []byte, truncate bool, vl VersionList) ([]byte, protocol.FileIntf, FileVersion, error) {
|
||||
fv, ok := vl.GetGlobal()
|
||||
if !ok {
|
||||
return keyBuf, nil, FileVersion{}, errEmptyGlobal
|
||||
}
|
||||
keyBuf, fi, err := t.getGlobalFromFileVersion(keyBuf, folder, file, truncate, fv)
|
||||
return keyBuf, fi, fv, err
|
||||
}
|
||||
|
||||
func (t readOnlyTransaction) getGlobalFromFileVersion(keyBuf, folder, file []byte, truncate bool, fv FileVersion) ([]byte, protocol.FileIntf, error) {
|
||||
dev, ok := fv.FirstDevice()
|
||||
if !ok {
|
||||
return keyBuf, nil, errEmptyFileVersion
|
||||
}
|
||||
keyBuf, err := t.keyer.GenerateDeviceFileKey(keyBuf, folder, dev, file)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return keyBuf, nil, err
|
||||
}
|
||||
fi, ok, err := t.getFileTrunc(keyBuf, truncate)
|
||||
if err != nil || !ok {
|
||||
return keyBuf, nil, false, err
|
||||
if err != nil {
|
||||
return keyBuf, nil, err
|
||||
}
|
||||
return keyBuf, fi, true, nil
|
||||
if !ok {
|
||||
return keyBuf, nil, errEntryFromGlobalMissing
|
||||
}
|
||||
return keyBuf, fi, nil
|
||||
}
|
||||
|
||||
func (t *readOnlyTransaction) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) error {
|
||||
@@ -281,19 +342,12 @@ func (t *readOnlyTransaction) withGlobal(folder, prefix []byte, truncate bool, f
|
||||
return err
|
||||
}
|
||||
|
||||
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
|
||||
var f protocol.FileIntf
|
||||
dk, f, _, err = t.getGlobalFromVersionList(dk, folder, name, truncate, vl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, ok, err := t.getFileTrunc(dk, truncate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !fn(f) {
|
||||
return nil
|
||||
}
|
||||
@@ -354,16 +408,17 @@ func (t *readOnlyTransaction) availability(folder, file []byte) ([]protocol.Devi
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var devices []protocol.DeviceID
|
||||
for _, v := range vl.Versions {
|
||||
if !v.Version.Equal(vl.Versions[0].Version) {
|
||||
break
|
||||
fv, ok := vl.GetGlobal()
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
devices := make([]protocol.DeviceID, len(fv.Devices))
|
||||
for i, dev := range fv.Devices {
|
||||
n, err := protocol.DeviceIDFromBytes(dev)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.Invalid {
|
||||
continue
|
||||
}
|
||||
n := protocol.DeviceIDFromBytes(v.Device)
|
||||
devices = append(devices, n)
|
||||
devices[i] = n
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
@@ -385,32 +440,38 @@ func (t *readOnlyTransaction) withNeed(folder, device []byte, truncate bool, fn
|
||||
defer dbi.Release()
|
||||
|
||||
var dk []byte
|
||||
devID := protocol.DeviceIDFromBytes(device)
|
||||
devID, err := protocol.DeviceIDFromBytes(device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dbi.Next() {
|
||||
var vl VersionList
|
||||
if err := vl.Unmarshal(dbi.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
globalFV := vl.Versions[0]
|
||||
globalFV, ok := vl.GetGlobal()
|
||||
if !ok {
|
||||
return errEmptyGlobal
|
||||
}
|
||||
haveFV, have := vl.Get(device)
|
||||
|
||||
if !need(globalFV, have, haveFV.Version) {
|
||||
continue
|
||||
}
|
||||
|
||||
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, globalFV.Device, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gf, ok, err := t.getFileTrunc(dk, truncate)
|
||||
var gf protocol.FileIntf
|
||||
dk, gf, err = t.getGlobalFromFileVersion(dk, folder, name, truncate, globalFV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
globalDev, ok := globalFV.FirstDevice()
|
||||
if !ok {
|
||||
return errEntryFromGlobalMissing
|
||||
return errEmptyFileVersion
|
||||
}
|
||||
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, globalFV.Version, globalFV.Device)
|
||||
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.IsInvalid(), haveFV.Version, gf.FileVersion(), globalDev)
|
||||
if !fn(gf) {
|
||||
return dbi.Error()
|
||||
}
|
||||
@@ -430,7 +491,7 @@ func (t *readOnlyTransaction) withNeedLocal(folder []byte, truncate bool, fn Ite
|
||||
defer dbi.Release()
|
||||
|
||||
var keyBuf []byte
|
||||
var f FileIntf
|
||||
var f protocol.FileIntf
|
||||
var ok bool
|
||||
for dbi.Next() {
|
||||
keyBuf, f, ok, err = t.getGlobal(keyBuf, folder, t.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate)
|
||||
@@ -470,7 +531,9 @@ func (db *Lowlevel) newReadWriteTransaction() (readWriteTransaction, error) {
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) Commit() error {
|
||||
t.readOnlyTransaction.close()
|
||||
// The readOnlyTransaction must close after commit, because they may be
|
||||
// backed by the same actual lower level transaction.
|
||||
defer t.readOnlyTransaction.close()
|
||||
return t.WriteTransaction.Commit()
|
||||
}
|
||||
|
||||
@@ -510,6 +573,24 @@ func (t readWriteTransaction) putFile(fkey []byte, fi protocol.FileInfo, truncat
|
||||
fi.Blocks = nil
|
||||
}
|
||||
|
||||
// Indirect the version vector if it's large enough.
|
||||
if len(fi.Version.Counters) > versionIndirectionCutoff {
|
||||
fi.VersionHash = protocol.VectorHash(fi.Version)
|
||||
bkey = t.keyer.GenerateVersionKey(bkey, fi.VersionHash)
|
||||
if _, err := t.Get(bkey); backend.IsNotFound(err) {
|
||||
// Marshal the version vector and save it
|
||||
versionBs := mustMarshal(&fi.Version)
|
||||
if err := t.Put(bkey, versionBs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fi.Version = protocol.Vector{}
|
||||
} else {
|
||||
fi.VersionHash = nil
|
||||
}
|
||||
|
||||
fiBs := mustMarshal(&fi)
|
||||
return t.Put(fkey, fiBs)
|
||||
}
|
||||
@@ -518,7 +599,10 @@ func (t readWriteTransaction) putFile(fkey []byte, fi protocol.FileInfo, truncat
|
||||
// file. If the device is already present in the list, the version is updated.
|
||||
// If the file does not have an entry in the global list, it is created.
|
||||
func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, file protocol.FileInfo, meta *metadataTracker) ([]byte, bool, error) {
|
||||
deviceID := protocol.DeviceIDFromBytes(device)
|
||||
deviceID, err := protocol.DeviceIDFromBytes(device)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
l.Debugf("update global; folder=%q device=%v file=%q version=%v invalid=%v", folder, deviceID, file.Name, file.Version, file.IsInvalid())
|
||||
|
||||
@@ -527,7 +611,7 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
fl, removedFV, removedAt, insertedAt, err := fl.update(folder, device, file, t.readOnlyTransaction)
|
||||
globalFV, oldGlobalFV, removedFV, haveOldGlobal, haveRemoved, globalChanged, err := fl.update(folder, device, file, t.readOnlyTransaction)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -542,26 +626,7 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
// Only load those from db if actually needed
|
||||
|
||||
var gotGlobal, gotOldGlobal bool
|
||||
var global, oldGlobal FileIntf
|
||||
|
||||
globalFV := fl.Versions[0]
|
||||
var oldGlobalFV FileVersion
|
||||
haveOldGlobal := false
|
||||
|
||||
globalUnaffected := removedAt != 0 && insertedAt != 0
|
||||
if globalUnaffected {
|
||||
oldGlobalFV = globalFV
|
||||
haveOldGlobal = true
|
||||
} else {
|
||||
if removedAt == 0 {
|
||||
oldGlobalFV = removedFV
|
||||
haveOldGlobal = true
|
||||
} else if len(fl.Versions) > 1 {
|
||||
// The previous newest version is now at index 1
|
||||
oldGlobalFV = fl.Versions[1]
|
||||
haveOldGlobal = true
|
||||
}
|
||||
}
|
||||
var global, oldGlobal protocol.FileIntf
|
||||
|
||||
// Check the need of the device that was updated
|
||||
// Must happen before updating global meta: If this is the first
|
||||
@@ -569,16 +634,14 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
|
||||
needBefore := false
|
||||
if haveOldGlobal {
|
||||
needBefore = need(oldGlobalFV, removedAt >= 0, removedFV.Version)
|
||||
needBefore = need(oldGlobalFV, haveRemoved, removedFV.Version)
|
||||
}
|
||||
needNow := need(globalFV, true, fl.Versions[insertedAt].Version)
|
||||
needNow := need(globalFV, true, file.Version)
|
||||
if needBefore {
|
||||
if !gotOldGlobal {
|
||||
if oldGlobal, err = t.updateGlobalGetOldGlobal(keyBuf, folder, name, oldGlobalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotOldGlobal = true
|
||||
if keyBuf, oldGlobal, err = t.getGlobalFromFileVersion(keyBuf, folder, name, true, oldGlobalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotOldGlobal = true
|
||||
meta.removeNeeded(deviceID, oldGlobal)
|
||||
if !needNow && bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||
if keyBuf, err = t.updateLocalNeed(keyBuf, folder, name, false); err != nil {
|
||||
@@ -587,12 +650,10 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
}
|
||||
}
|
||||
if needNow {
|
||||
if !gotGlobal {
|
||||
if global, err = t.updateGlobalGetGlobal(keyBuf, folder, name, file, insertedAt, fl); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotGlobal = true
|
||||
if keyBuf, global, err = t.updateGlobalGetGlobal(keyBuf, folder, name, file, globalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotGlobal = true
|
||||
meta.addNeeded(deviceID, global)
|
||||
if !needBefore && bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||
if keyBuf, err = t.updateLocalNeed(keyBuf, folder, name, true); err != nil {
|
||||
@@ -602,40 +663,34 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
}
|
||||
|
||||
// Update global size counter if necessary
|
||||
// Necessary here means the first item in the global list was changed,
|
||||
// even if both new and old are invalid, due to potential change in
|
||||
// LocalFlags.
|
||||
|
||||
if !globalUnaffected {
|
||||
if global, err = t.updateGlobalGetGlobal(keyBuf, folder, name, file, insertedAt, fl); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotGlobal = true
|
||||
if haveOldGlobal {
|
||||
if oldGlobal, err = t.updateGlobalGetOldGlobal(keyBuf, folder, name, oldGlobalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotOldGlobal = true
|
||||
// Remove the old global from the global size counter
|
||||
meta.removeFile(protocol.GlobalDeviceID, oldGlobal)
|
||||
}
|
||||
|
||||
// Add the new global to the global size counter
|
||||
meta.addFile(protocol.GlobalDeviceID, global)
|
||||
}
|
||||
|
||||
if globalUnaffected {
|
||||
if !globalChanged {
|
||||
// Neither the global state nor the needs of any devices, except
|
||||
// the one updated, changed.
|
||||
return keyBuf, true, nil
|
||||
}
|
||||
|
||||
// If global changed, but both the new and old are invalid, noone needed
|
||||
// the file before and now -> nothing to do.
|
||||
if global.IsInvalid() && (!haveOldGlobal || oldGlobal.IsInvalid()) {
|
||||
return keyBuf, true, nil
|
||||
// Remove the old global from the global size counter
|
||||
if haveOldGlobal {
|
||||
if !gotOldGlobal {
|
||||
if keyBuf, oldGlobal, err = t.getGlobalFromFileVersion(keyBuf, folder, name, true, oldGlobalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotOldGlobal = true
|
||||
}
|
||||
// Remove the old global from the global size counter
|
||||
meta.removeFile(protocol.GlobalDeviceID, oldGlobal)
|
||||
}
|
||||
|
||||
// Add the new global to the global size counter
|
||||
if !gotGlobal {
|
||||
if keyBuf, global, err = t.updateGlobalGetGlobal(keyBuf, folder, name, file, globalFV); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
gotGlobal = true
|
||||
}
|
||||
meta.addFile(protocol.GlobalDeviceID, global)
|
||||
|
||||
// check for local (if not already done before)
|
||||
if !bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||
localFV, haveLocal := fl.Get(protocol.LocalDeviceID[:])
|
||||
@@ -679,40 +734,12 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
||||
return keyBuf, true, nil
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) updateGlobalGetGlobal(keyBuf, folder, name []byte, file protocol.FileInfo, insertedAt int, fl VersionList) (FileIntf, error) {
|
||||
if insertedAt == 0 {
|
||||
func (t readWriteTransaction) updateGlobalGetGlobal(keyBuf, folder, name []byte, file protocol.FileInfo, fv FileVersion) ([]byte, protocol.FileIntf, error) {
|
||||
if fv.Version.Equal(file.Version) {
|
||||
// Inserted a new newest version
|
||||
return file, nil
|
||||
return keyBuf, file, nil
|
||||
}
|
||||
var err error
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
global, ok, err := t.getFileTrunc(keyBuf, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, errEntryFromGlobalMissing
|
||||
}
|
||||
return global, nil
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) updateGlobalGetOldGlobal(keyBuf, folder, name []byte, oldGlobalFV FileVersion) (FileIntf, error) {
|
||||
var err error
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, oldGlobalFV.Device, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldGlobal, ok, err := t.getFileTrunc(keyBuf, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, errEntryFromGlobalMissing
|
||||
}
|
||||
return oldGlobal, nil
|
||||
return t.getGlobalFromFileVersion(keyBuf, folder, name, true, fv)
|
||||
}
|
||||
|
||||
func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, add bool) ([]byte, error) {
|
||||
@@ -733,7 +760,7 @@ func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, add b
|
||||
|
||||
func need(global FileVersion, haveLocal bool, localVersion protocol.Vector) bool {
|
||||
// We never need an invalid file.
|
||||
if global.Invalid {
|
||||
if global.IsInvalid() {
|
||||
return false
|
||||
}
|
||||
// We don't need a deleted file if we don't have it.
|
||||
@@ -750,8 +777,11 @@ func need(global FileVersion, haveLocal bool, localVersion protocol.Vector) bool
|
||||
// removeFromGlobal removes the device from the global version list for the
|
||||
// given file. If the version list is empty after this, the file entry is
|
||||
// removed entirely.
|
||||
func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte, file []byte, meta *metadataTracker) ([]byte, error) {
|
||||
deviceID := protocol.DeviceIDFromBytes(device)
|
||||
func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device, file []byte, meta *metadataTracker) ([]byte, error) {
|
||||
deviceID, err := protocol.DeviceIDFromBytes(device)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, deviceID, file)
|
||||
|
||||
@@ -764,17 +794,48 @@ func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fl, removedFV, removedAt := fl.pop(device)
|
||||
if removedAt == -1 {
|
||||
oldGlobalFV, haveOldGlobal := fl.GetGlobal()
|
||||
|
||||
if !haveOldGlobal {
|
||||
// Shouldn't ever happen, but doesn't hurt to handle.
|
||||
return keyBuf, t.Delete(gk)
|
||||
}
|
||||
|
||||
removedFV, haveRemoved, globalChanged, err := fl.pop(folder, device, file, t.readOnlyTransaction)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !haveRemoved {
|
||||
// There is no version for the given device
|
||||
return keyBuf, nil
|
||||
}
|
||||
|
||||
if removedAt != 0 {
|
||||
var global protocol.FileIntf
|
||||
var gotGlobal, ok bool
|
||||
|
||||
globalFV, ok := fl.GetGlobal()
|
||||
// Add potential needs of the removed device
|
||||
if ok && !globalFV.IsInvalid() && need(globalFV, false, protocol.Vector{}) && !need(oldGlobalFV, haveRemoved, removedFV.Version) {
|
||||
keyBuf, global, _, err = t.getGlobalFromVersionList(keyBuf, folder, file, true, fl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gotGlobal = true
|
||||
meta.addNeeded(deviceID, global)
|
||||
if bytes.Equal(protocol.LocalDeviceID[:], device) {
|
||||
if keyBuf, err = t.updateLocalNeed(keyBuf, folder, file, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Global hasn't changed, abort early
|
||||
if !globalChanged {
|
||||
l.Debugf("new global after remove: %v", fl)
|
||||
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyBuf, nil
|
||||
}
|
||||
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, device, file)
|
||||
@@ -789,6 +850,7 @@ func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte
|
||||
}
|
||||
meta.removeFile(protocol.GlobalDeviceID, f)
|
||||
|
||||
// Remove potential device needs
|
||||
if fv, have := fl.Get(protocol.LocalDeviceID[:]); need(removedFV, have, fv.Version) {
|
||||
meta.removeNeeded(protocol.LocalDeviceID, f)
|
||||
if keyBuf, err = t.updateLocalNeed(keyBuf, folder, file, false); err != nil {
|
||||
@@ -796,7 +858,7 @@ func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte
|
||||
}
|
||||
}
|
||||
for _, dev := range meta.devices() {
|
||||
if bytes.Equal(dev[:], device) {
|
||||
if bytes.Equal(dev[:], device) { // Was the previous global
|
||||
continue
|
||||
}
|
||||
if fv, have := fl.Get(dev[:]); need(removedFV, have, fv.Version) {
|
||||
@@ -804,42 +866,24 @@ func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte
|
||||
}
|
||||
}
|
||||
|
||||
if len(fl.Versions) == 0 {
|
||||
// Nothing left, i.e. nothing to add to the global counter below.
|
||||
if fl.Empty() {
|
||||
if err := t.Delete(gk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keyBuf, nil
|
||||
}
|
||||
|
||||
globalFV := fl.Versions[0]
|
||||
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, globalFV.Device, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
global, ok, err := t.getFileTrunc(keyBuf, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, errEntryFromGlobalMissing
|
||||
// Add to global
|
||||
if !gotGlobal {
|
||||
keyBuf, global, _, err = t.getGlobalFromVersionList(keyBuf, folder, file, true, fl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
meta.addFile(protocol.GlobalDeviceID, global)
|
||||
|
||||
if !globalFV.Invalid {
|
||||
if fv, have := fl.Get(protocol.LocalDeviceID[:]); need(globalFV, have, fv.Version) {
|
||||
meta.addNeeded(deviceID, global)
|
||||
if keyBuf, err = t.updateLocalNeed(keyBuf, folder, file, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, dev := range meta.devices() {
|
||||
if fv, have := fl.Get(dev[:]); need(globalFV, have, fv.Version) {
|
||||
meta.addNeeded(deviceID, global)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.Debugf("new global after remove: %v", fl)
|
||||
l.Debugf(`new global for "%s" after remove: %v`, file, fl)
|
||||
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -297,7 +297,7 @@ func (f *BasicFilesystem) SameFile(fi1, fi2 FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return os.SameFile(f1.FileInfo, f2.FileInfo)
|
||||
return os.SameFile(f1.osFileInfo(), f2.osFileInfo())
|
||||
}
|
||||
|
||||
// basicFile implements the fs.File interface on top of an os.File
|
||||
|
||||
@@ -8,7 +8,10 @@
|
||||
|
||||
package fs
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (e basicFileInfo) Mode() FileMode {
|
||||
return FileMode(e.FileInfo.Mode())
|
||||
@@ -27,3 +30,9 @@ func (e basicFileInfo) Group() int {
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// fileStat converts e to os.FileInfo that is suitable
|
||||
// to be passed to os.SameFile. Non-trivial on Windows.
|
||||
func (e *basicFileInfo) osFileInfo() os.FileInfo {
|
||||
return e.FileInfo
|
||||
}
|
||||
|
||||
@@ -56,3 +56,13 @@ func (e basicFileInfo) Owner() int {
|
||||
func (e basicFileInfo) Group() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// osFileInfo converts e to os.FileInfo that is suitable
|
||||
// to be passed to os.SameFile.
|
||||
func (e *basicFileInfo) osFileInfo() os.FileInfo {
|
||||
fi := e.FileInfo
|
||||
if fi, ok := fi.(*dirJunctFileInfo); ok {
|
||||
return fi.FileInfo
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
@@ -577,3 +577,15 @@ func TestBasicWalkSkipSymlink(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
testWalkSkipSymlink(t, FilesystemTypeBasic, dir)
|
||||
}
|
||||
|
||||
func TestWalkTraverseDirJunct(t *testing.T) {
|
||||
_, dir := setup(t)
|
||||
defer os.RemoveAll(dir)
|
||||
testWalkTraverseDirJunct(t, FilesystemTypeBasic, dir)
|
||||
}
|
||||
|
||||
func TestWalkInfiniteRecursion(t *testing.T) {
|
||||
_, dir := setup(t)
|
||||
defer os.RemoveAll(dir)
|
||||
testWalkInfiniteRecursion(t, FilesystemTypeBasic, dir)
|
||||
}
|
||||
|
||||
@@ -171,6 +171,11 @@ func TestWatchWinRoot(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -179,6 +184,7 @@ func TestWatchWinRoot(t *testing.T) {
|
||||
cancel()
|
||||
}()
|
||||
fs.watchLoop(ctx, ".", roots, backendChan, outChan, errChan, fakeMatcher{})
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// filepath.Dir as watch has a /... suffix
|
||||
@@ -214,12 +220,19 @@ func expectErrorForPath(t *testing.T, path string) {
|
||||
errChan := make(chan error)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// testFs is Filesystem, but we need BasicFilesystem here
|
||||
fs := newBasicFilesystem(testDirAbs)
|
||||
|
||||
go fs.watchLoop(ctx, ".", []string{testDirAbs}, backendChan, outChan, errChan, fakeMatcher{})
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
fs.watchLoop(ctx, ".", []string{testDirAbs}, backendChan, outChan, errChan, fakeMatcher{})
|
||||
close(done)
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
backendChan <- fakeEventInfo(path)
|
||||
|
||||
@@ -244,7 +257,15 @@ func TestWatchSubpath(t *testing.T) {
|
||||
fs := newBasicFilesystem(testDirAbs)
|
||||
|
||||
abs, _ := fs.rooted("sub")
|
||||
go fs.watchLoop(ctx, "sub", []string{testDirAbs}, backendChan, outChan, errChan, fakeMatcher{})
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
fs.watchLoop(ctx, "sub", []string{testDirAbs}, backendChan, outChan, errChan, fakeMatcher{})
|
||||
close(done)
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
backendChan <- fakeEventInfo(filepath.Join(abs, "file"))
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build !linux,!android
|
||||
// +build !linux,!android,!windows
|
||||
|
||||
package fs
|
||||
|
||||
|
||||
82
lib/fs/lstat_windows.go
Normal file
82
lib/fs/lstat_windows.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func isDirectoryJunction(path string) (bool, error) {
|
||||
namep, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("syscall.UTF16PtrFromString failed with: %s", err)
|
||||
}
|
||||
attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS | syscall.FILE_FLAG_OPEN_REPARSE_POINT)
|
||||
h, err := syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, attrs, 0)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("syscall.CreateFile failed with: %s", err)
|
||||
}
|
||||
defer syscall.CloseHandle(h)
|
||||
|
||||
//https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_attribute_tag_info
|
||||
const fileAttributeTagInfo = 9
|
||||
type FILE_ATTRIBUTE_TAG_INFO struct {
|
||||
FileAttributes uint32
|
||||
ReparseTag uint32
|
||||
}
|
||||
|
||||
var ti FILE_ATTRIBUTE_TAG_INFO
|
||||
err = windows.GetFileInformationByHandleEx(windows.Handle(h), fileAttributeTagInfo, (*byte)(unsafe.Pointer(&ti)), uint32(unsafe.Sizeof(ti)))
|
||||
if err != nil {
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == windows.ERROR_INVALID_PARAMETER {
|
||||
// It appears calling GetFileInformationByHandleEx with
|
||||
// FILE_ATTRIBUTE_TAG_INFO fails on FAT file system with
|
||||
// ERROR_INVALID_PARAMETER. Clear ti.ReparseTag in that
|
||||
// instance to indicate no symlinks are possible.
|
||||
ti.ReparseTag = 0
|
||||
} else {
|
||||
return false, fmt.Errorf("windows.GetFileInformationByHandleEx failed with: %s", err)
|
||||
}
|
||||
}
|
||||
return ti.ReparseTag == windows.IO_REPARSE_TAG_MOUNT_POINT, nil
|
||||
}
|
||||
|
||||
type dirJunctFileInfo struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (fi *dirJunctFileInfo) Mode() os.FileMode {
|
||||
// Simulate a directory and not a symlink; also set the execute
|
||||
// bits so the directory can be traversed Unix-side.
|
||||
return fi.FileInfo.Mode() ^ os.ModeSymlink | os.ModeDir | 0111
|
||||
}
|
||||
|
||||
func (fi *dirJunctFileInfo) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func underlyingLstat(name string) (os.FileInfo, error) {
|
||||
var fi, err = os.Lstat(name)
|
||||
|
||||
// NTFS directory junctions are treated as ordinary directories,
|
||||
// see https://forum.syncthing.net/t/option-to-follow-directory-junctions-symbolic-links/14750
|
||||
if err == nil && fi.Mode()&os.ModeSymlink != 0 {
|
||||
var isJunct bool
|
||||
isJunct, err = isDirectoryJunction(name)
|
||||
if err == nil && isJunct {
|
||||
return &dirJunctFileInfo{fi}, nil
|
||||
}
|
||||
}
|
||||
return fi, err
|
||||
}
|
||||
@@ -10,7 +10,37 @@
|
||||
|
||||
package fs
|
||||
|
||||
import "path/filepath"
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type ancestorDirList struct {
|
||||
list []FileInfo
|
||||
fs Filesystem
|
||||
}
|
||||
|
||||
func (ancestors *ancestorDirList) Push(info FileInfo) {
|
||||
l.Debugf("ancestorDirList: Push '%s'", info.Name())
|
||||
ancestors.list = append(ancestors.list, info)
|
||||
}
|
||||
|
||||
func (ancestors *ancestorDirList) Pop() FileInfo {
|
||||
aLen := len(ancestors.list)
|
||||
info := ancestors.list[aLen-1]
|
||||
l.Debugf("ancestorDirList: Pop '%s'", info.Name())
|
||||
ancestors.list = ancestors.list[:aLen-1]
|
||||
return info
|
||||
}
|
||||
|
||||
func (ancestors *ancestorDirList) Contains(info FileInfo) bool {
|
||||
l.Debugf("ancestorDirList: Contains '%s'", info.Name())
|
||||
for _, ancestor := range ancestors.list {
|
||||
if ancestors.fs.SameFile(info, ancestor) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WalkFunc is the type of the function called for each file or directory
|
||||
// visited by Walk. The path argument contains the argument to Walk as a
|
||||
@@ -37,7 +67,8 @@ func NewWalkFilesystem(next Filesystem) Filesystem {
|
||||
}
|
||||
|
||||
// walk recursively descends path, calling walkFn.
|
||||
func (f *walkFilesystem) walk(path string, info FileInfo, walkFn WalkFunc) error {
|
||||
func (f *walkFilesystem) walk(path string, info FileInfo, walkFn WalkFunc, ancestors *ancestorDirList) error {
|
||||
l.Debugf("walk: path=%s", path)
|
||||
path, err := Canonicalize(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -55,6 +86,14 @@ func (f *walkFilesystem) walk(path string, info FileInfo, walkFn WalkFunc) error
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ancestors.Contains(info) {
|
||||
ancestors.Push(info)
|
||||
defer ancestors.Pop()
|
||||
} else {
|
||||
l.Warnf("Infinite filesystem recursion detected on path '%s', not walking further down", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
names, err := f.DirNames(path)
|
||||
if err != nil {
|
||||
return walkFn(path, info, err)
|
||||
@@ -68,7 +107,7 @@ func (f *walkFilesystem) walk(path string, info FileInfo, walkFn WalkFunc) error
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = f.walk(filename, fileInfo, walkFn)
|
||||
err = f.walk(filename, fileInfo, walkFn, ancestors)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != SkipDir {
|
||||
return err
|
||||
@@ -90,5 +129,6 @@ func (f *walkFilesystem) Walk(root string, walkFn WalkFunc) error {
|
||||
if err != nil {
|
||||
return walkFn(root, nil, err)
|
||||
}
|
||||
return f.walk(root, info, walkFn)
|
||||
ancestors := &ancestorDirList{fs: f.Filesystem}
|
||||
return f.walk(root, info, walkFn, ancestors)
|
||||
}
|
||||
|
||||
@@ -7,13 +7,16 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
osexec "os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testWalkSkipSymlink(t *testing.T, fsType FilesystemType, uri string) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Symlinks on windows")
|
||||
t.Skip("Symlinks skipping is not tested on windows")
|
||||
}
|
||||
|
||||
fs := NewFilesystem(fsType, uri)
|
||||
@@ -39,3 +42,83 @@ func testWalkSkipSymlink(t *testing.T, fsType FilesystemType, uri string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createDirJunct(target string, name string) error {
|
||||
output, err := osexec.Command("cmd", "/c", "mklink", "/J", name, target).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to run mklink %v %v: %v %q", name, target, err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testWalkTraverseDirJunct(t *testing.T, fsType FilesystemType, uri string) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("Directory junctions are available and tested on windows only")
|
||||
}
|
||||
|
||||
fs := NewFilesystem(fsType, uri)
|
||||
|
||||
if err := fs.MkdirAll("target/foo", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := fs.Mkdir("towalk", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createDirJunct(filepath.Join(uri, "target"), filepath.Join(uri, "towalk/dirjunct")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
traversed := false
|
||||
if err := fs.Walk("towalk", func(path string, info FileInfo, err error) error {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if info.Name() == "foo" {
|
||||
traversed = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !traversed {
|
||||
t.Fatal("Directory junction was not traversed")
|
||||
}
|
||||
}
|
||||
|
||||
func testWalkInfiniteRecursion(t *testing.T, fsType FilesystemType, uri string) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("Infinite recursion detection is tested on windows only")
|
||||
}
|
||||
|
||||
fs := NewFilesystem(fsType, uri)
|
||||
|
||||
if err := fs.MkdirAll("target/foo", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := fs.Mkdir("towalk", 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createDirJunct(filepath.Join(uri, "target"), filepath.Join(uri, "towalk/dirjunct")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := createDirJunct(filepath.Join(uri, "towalk"), filepath.Join(uri, "target/foo/recurse")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dirjunctCnt := 0
|
||||
fooCnt := 0
|
||||
if err := fs.Walk("towalk", func(path string, info FileInfo, err error) error {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if info.Name() == "dirjunct" {
|
||||
dirjunctCnt++
|
||||
} else if info.Name() == "foo" {
|
||||
fooCnt++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dirjunctCnt != 2 || fooCnt != 1 {
|
||||
t.Fatal("Infinite recursion not detected correctly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,12 +44,20 @@ const (
|
||||
DataBaseDir BaseDirEnum = "data"
|
||||
// User's home directory, *not* -home flag
|
||||
UserHomeBaseDir BaseDirEnum = "userHome"
|
||||
|
||||
LevelDBDir = "index-v0.14.0.db"
|
||||
BadgerDir = "indexdb.badger"
|
||||
)
|
||||
|
||||
// Platform dependent directories
|
||||
var baseDirs = make(map[BaseDirEnum]string, 3)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("USE_BADGER") != "" {
|
||||
// XXX: Replace the leveldb name with the badger name.
|
||||
locationTemplates[Database] = strings.Replace(locationTemplates[Database], LevelDBDir, BadgerDir, 1)
|
||||
}
|
||||
|
||||
userHome := userHomeDir()
|
||||
config := defaultConfigDir(userHome)
|
||||
baseDirs[UserHomeBaseDir] = userHome
|
||||
@@ -80,8 +88,6 @@ func GetBaseDir(baseDir BaseDirEnum) string {
|
||||
return baseDirs[baseDir]
|
||||
}
|
||||
|
||||
var databaseDirname = "index-v0.14.0.db"
|
||||
|
||||
// Use the variables from baseDirs here
|
||||
var locationTemplates = map[LocationEnum]string{
|
||||
ConfigFile: "${config}/config.xml",
|
||||
@@ -89,7 +95,7 @@ var locationTemplates = map[LocationEnum]string{
|
||||
KeyFile: "${config}/key.pem",
|
||||
HTTPSCertFile: "${config}/https-cert.pem",
|
||||
HTTPSKeyFile: "${config}/https-key.pem",
|
||||
Database: "${data}/" + databaseDirname,
|
||||
Database: "${data}/" + LevelDBDir,
|
||||
LogFile: "${data}/syncthing.log", // -logfile on Windows
|
||||
CsrfTokens: "${data}/csrftokens.txt",
|
||||
PanicLog: "${data}/panic-${timestamp}.log",
|
||||
@@ -150,7 +156,14 @@ func defaultDataDir(userHome, config string) string {
|
||||
|
||||
default:
|
||||
// If a database exists at the "normal" location, use that anyway.
|
||||
if _, err := os.Lstat(filepath.Join(config, databaseDirname)); err == nil {
|
||||
// We look for both LevelDB and Badger variants here regardless of
|
||||
// what we're currently configured to use, because we might be
|
||||
// starting up in Badger mode with only a LevelDB database present
|
||||
// (will be converted), or vice versa.
|
||||
if _, err := os.Lstat(filepath.Join(config, LevelDBDir)); err == nil {
|
||||
return config
|
||||
}
|
||||
if _, err := os.Lstat(filepath.Join(config, BadgerDir)); err == nil {
|
||||
return config
|
||||
}
|
||||
// Always use this env var, as it's explicitly set by the user
|
||||
|
||||
@@ -148,7 +148,10 @@ func (f *folder) serve(ctx context.Context) {
|
||||
f.pull()
|
||||
|
||||
case <-f.pullFailTimer.C:
|
||||
f.pull()
|
||||
if !f.pull() && f.pullPause < 60*f.pullBasePause() {
|
||||
// Back off from retrying to pull
|
||||
f.pullPause *= 2
|
||||
}
|
||||
|
||||
case <-initialCompleted:
|
||||
// Initial scan has completed, we should do a pull
|
||||
@@ -276,7 +279,7 @@ func (f *folder) getHealthErrorWithoutIgnores() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *folder) pull() bool {
|
||||
func (f *folder) pull() (success bool) {
|
||||
f.pullFailTimer.Stop()
|
||||
select {
|
||||
case <-f.pullFailTimer.C:
|
||||
@@ -290,10 +293,17 @@ func (f *folder) pull() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if success {
|
||||
// We're good, reset the pause interval.
|
||||
f.pullPause = f.pullBasePause()
|
||||
}
|
||||
}()
|
||||
|
||||
// If there is nothing to do, don't even enter sync-waiting state.
|
||||
abort := true
|
||||
snap := f.fset.Snapshot()
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
|
||||
abort = false
|
||||
return false
|
||||
})
|
||||
@@ -312,24 +322,16 @@ func (f *folder) pull() bool {
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
success := f.puller.pull()
|
||||
|
||||
basePause := f.pullBasePause()
|
||||
success = f.puller.pull()
|
||||
|
||||
if success {
|
||||
// We're good. Don't schedule another pull and reset
|
||||
// the pause interval.
|
||||
f.pullPause = basePause
|
||||
return true
|
||||
}
|
||||
|
||||
// Pulling failed, try again later.
|
||||
delay := f.pullPause + time.Since(startTime)
|
||||
l.Infof("Folder %v isn't making sync progress - retrying in %v.", f.Description(), delay)
|
||||
l.Infof("Folder %v isn't making sync progress - retrying in %v.", f.Description(), delay.Truncate(time.Second))
|
||||
f.pullFailTimer.Reset(delay)
|
||||
if f.pullPause < 60*basePause {
|
||||
f.pullPause *= 2
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -497,7 +499,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
|
||||
for _, sub := range subDirs {
|
||||
var iterError error
|
||||
|
||||
snap.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
|
||||
snap.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi protocol.FileIntf) bool {
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return false
|
||||
@@ -632,7 +634,7 @@ func (f *folder) findRename(snap *db.Snapshot, mtimefs fs.Filesystem, file proto
|
||||
found := false
|
||||
nf := protocol.FileInfo{}
|
||||
|
||||
snap.WithBlocksHash(file.BlocksHash, func(ifi db.FileIntf) bool {
|
||||
snap.WithBlocksHash(file.BlocksHash, func(ifi protocol.FileIntf) bool {
|
||||
fi := ifi.(protocol.FileInfo)
|
||||
|
||||
select {
|
||||
|
||||
@@ -87,7 +87,7 @@ func (f *receiveOnlyFolder) revert() {
|
||||
batchSizeBytes := 0
|
||||
snap := f.fset.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithHave(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
snap.WithHave(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
|
||||
fi := intf.(protocol.FileInfo)
|
||||
if !fi.IsReceiveOnlyChanged() {
|
||||
// We're only interested in files that have changed locally in
|
||||
|
||||
@@ -52,7 +52,7 @@ func (f *sendOnlyFolder) pull() bool {
|
||||
|
||||
snap := f.fset.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
|
||||
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
|
||||
f.updateLocalsFromPulling(batch)
|
||||
batch = batch[:0]
|
||||
@@ -110,7 +110,7 @@ func (f *sendOnlyFolder) override() {
|
||||
batchSizeBytes := 0
|
||||
snap := f.fset.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
|
||||
need := fi.(protocol.FileInfo)
|
||||
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
|
||||
f.updateLocalsFromScanning(batch)
|
||||
|
||||
@@ -61,17 +61,18 @@ type copyBlocksState struct {
|
||||
const retainBits = fs.ModeSetgid | fs.ModeSetuid | fs.ModeSticky
|
||||
|
||||
var (
|
||||
activity = newDeviceActivity()
|
||||
errNoDevice = errors.New("peers who had this file went away, or the file has changed while syncing. will retry later")
|
||||
errDirPrefix = "directory has been deleted on a remote device but "
|
||||
errDirHasToBeScanned = errors.New(errDirPrefix + "contains unexpected files, scheduling scan")
|
||||
errDirHasIgnored = errors.New(errDirPrefix + "contains ignored files (see ignore documentation for (?d) prefix)")
|
||||
errDirNotEmpty = errors.New(errDirPrefix + "is not empty; the contents are probably ignored on that remote device, but not locally")
|
||||
errNotAvailable = errors.New("no connected device has the required version of this file")
|
||||
errModified = errors.New("file modified but not rescanned; will try again later")
|
||||
errUnexpectedDirOnFileDel = errors.New("encountered directory when trying to remove file/symlink")
|
||||
errIncompatibleSymlink = errors.New("incompatible symlink entry; rescan with newer Syncthing on source")
|
||||
contextRemovingOldItem = "removing item to be replaced"
|
||||
activity = newDeviceActivity()
|
||||
errNoDevice = errors.New("peers who had this file went away, or the file has changed while syncing. will retry later")
|
||||
errDirPrefix = "directory has been deleted on a remote device but "
|
||||
errDirHasToBeScanned = errors.New(errDirPrefix + "contains changed files, scheduling scan")
|
||||
errDirHasIgnored = errors.New(errDirPrefix + "contains ignored files (see ignore documentation for (?d) prefix)")
|
||||
errDirHasReceiveOnlyChanged = errors.New(errDirPrefix + "contains locally changed files")
|
||||
errDirNotEmpty = errors.New(errDirPrefix + "is not empty; the contents are probably ignored on that remote device, but not locally")
|
||||
errNotAvailable = errors.New("no connected device has the required version of this file")
|
||||
errModified = errors.New("file modified but not rescanned; will try again later")
|
||||
errUnexpectedDirOnFileDel = errors.New("encountered directory when trying to remove file/symlink")
|
||||
errIncompatibleSymlink = errors.New("incompatible symlink entry; rescan with newer Syncthing on source")
|
||||
contextRemovingOldItem = "removing item to be replaced"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -304,7 +305,7 @@ func (f *sendReceiveFolder) processNeeded(snap *db.Snapshot, dbUpdateChan chan<-
|
||||
// Regular files to pull goes into the file queue, everything else
|
||||
// (directories, symlinks and deletes) goes into the "process directly"
|
||||
// pile.
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
||||
snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return false
|
||||
@@ -460,18 +461,13 @@ nextFile:
|
||||
// Check our list of files to be removed for a match, in which case
|
||||
// we can just do a rename instead.
|
||||
key := string(fi.BlocksHash)
|
||||
for i, candidate := range buckets[key] {
|
||||
// Remove the candidate from the bucket
|
||||
lidx := len(buckets[key]) - 1
|
||||
buckets[key][i] = buckets[key][lidx]
|
||||
buckets[key] = buckets[key][:lidx]
|
||||
|
||||
for candidate, ok := popCandidate(buckets, key); ok; candidate, ok = popCandidate(buckets, key) {
|
||||
// candidate is our current state of the file, where as the
|
||||
// desired state with the delete bit set is in the deletion
|
||||
// map.
|
||||
desired := fileDeletions[candidate.Name]
|
||||
if err := f.renameFile(candidate, desired, fi, snap, dbUpdateChan, scanChan); err != nil {
|
||||
l.Debugln("rename shortcut for %s failed: %S", fi.Name, err.Error())
|
||||
l.Debugf("rename shortcut for %s failed: %s", fi.Name, err.Error())
|
||||
// Failed to rename, try next one.
|
||||
continue
|
||||
}
|
||||
@@ -498,6 +494,16 @@ nextFile:
|
||||
return changed, fileDeletions, dirDeletions, nil
|
||||
}
|
||||
|
||||
func popCandidate(buckets map[string][]protocol.FileInfo, key string) (protocol.FileInfo, bool) {
|
||||
cands := buckets[key]
|
||||
if len(cands) == 0 {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
|
||||
buckets[key] = cands[1:]
|
||||
return cands[0], true
|
||||
}
|
||||
|
||||
func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.FileInfo, dirDeletions []protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
|
||||
for _, file := range fileDeletions {
|
||||
select {
|
||||
@@ -1814,31 +1820,52 @@ func (f *sendReceiveFolder) deleteDirOnDisk(dir string, snap *db.Snapshot, scanC
|
||||
|
||||
toBeDeleted := make([]string, 0, len(files))
|
||||
|
||||
hasIgnored := false
|
||||
hasKnown := false
|
||||
hasToBeScanned := false
|
||||
var hasIgnored, hasKnown, hasToBeScanned, hasReceiveOnlyChanged bool
|
||||
|
||||
for _, dirFile := range files {
|
||||
fullDirFile := filepath.Join(dir, dirFile)
|
||||
if fs.IsTemporary(dirFile) || f.ignores.Match(fullDirFile).IsDeletable() {
|
||||
switch {
|
||||
case fs.IsTemporary(dirFile) || f.ignores.Match(fullDirFile).IsDeletable():
|
||||
toBeDeleted = append(toBeDeleted, fullDirFile)
|
||||
} else if f.ignores != nil && f.ignores.Match(fullDirFile).IsIgnored() {
|
||||
continue
|
||||
case f.ignores != nil && f.ignores.Match(fullDirFile).IsIgnored():
|
||||
hasIgnored = true
|
||||
} else if cf, ok := snap.Get(protocol.LocalDeviceID, fullDirFile); !ok || cf.IsDeleted() || cf.IsInvalid() {
|
||||
// Something appeared in the dir that we either are not aware of
|
||||
// at all, that we think should be deleted or that is invalid,
|
||||
// but not currently ignored -> schedule scan. The scanChan
|
||||
// might be nil, in which case we trust the scanning to be
|
||||
// handled later as a result of our error return.
|
||||
if scanChan != nil {
|
||||
scanChan <- fullDirFile
|
||||
}
|
||||
hasToBeScanned = true
|
||||
} else {
|
||||
// Dir contains file that is valid according to db and
|
||||
// not ignored -> something weird is going on
|
||||
hasKnown = true
|
||||
continue
|
||||
}
|
||||
cf, ok := snap.Get(protocol.LocalDeviceID, fullDirFile)
|
||||
switch {
|
||||
case !ok || cf.IsDeleted():
|
||||
// Something appeared in the dir that we either are not
|
||||
// aware of at all or that we think should be deleted
|
||||
// -> schedule scan.
|
||||
scanChan <- fullDirFile
|
||||
hasToBeScanned = true
|
||||
continue
|
||||
case ok && f.Type == config.FolderTypeReceiveOnly && cf.IsReceiveOnlyChanged():
|
||||
hasReceiveOnlyChanged = true
|
||||
continue
|
||||
}
|
||||
info, err := f.fs.Lstat(fullDirFile)
|
||||
var diskFile protocol.FileInfo
|
||||
if err == nil {
|
||||
diskFile, err = scanner.CreateFileInfo(info, fullDirFile, f.fs)
|
||||
}
|
||||
if err != nil {
|
||||
// Lets just assume the file has changed.
|
||||
scanChan <- fullDirFile
|
||||
hasToBeScanned = true
|
||||
continue
|
||||
}
|
||||
if !cf.IsEquivalentOptional(diskFile, f.ModTimeWindow(), f.IgnorePerms, true, protocol.LocalAllFlags) {
|
||||
// File on disk changed compared to what we have in db
|
||||
// -> schedule scan.
|
||||
scanChan <- fullDirFile
|
||||
hasToBeScanned = true
|
||||
continue
|
||||
}
|
||||
// Dir contains file that is valid according to db and
|
||||
// not ignored -> something weird is going on
|
||||
hasKnown = true
|
||||
}
|
||||
|
||||
if hasToBeScanned {
|
||||
@@ -1847,6 +1874,9 @@ func (f *sendReceiveFolder) deleteDirOnDisk(dir string, snap *db.Snapshot, scanC
|
||||
if hasIgnored {
|
||||
return errDirHasIgnored
|
||||
}
|
||||
if hasReceiveOnlyChanged {
|
||||
return errDirHasReceiveOnlyChanged
|
||||
}
|
||||
if hasKnown {
|
||||
return errDirNotEmpty
|
||||
}
|
||||
|
||||
@@ -818,9 +818,14 @@ func TestCopyOwner(t *testing.T) {
|
||||
// owner/group.
|
||||
|
||||
dbUpdateChan := make(chan dbUpdateJob, 1)
|
||||
scanChan := make(chan string)
|
||||
defer close(dbUpdateChan)
|
||||
f.handleDir(dir, f.fset.Snapshot(), dbUpdateChan, nil)
|
||||
<-dbUpdateChan // empty the channel for later
|
||||
f.handleDir(dir, f.fset.Snapshot(), dbUpdateChan, scanChan)
|
||||
select {
|
||||
case <-dbUpdateChan: // empty the channel for later
|
||||
case toScan := <-scanChan:
|
||||
t.Fatal("Unexpected receive on scanChan:", toScan)
|
||||
}
|
||||
|
||||
info, err := f.fs.Lstat("foo/bar")
|
||||
if err != nil {
|
||||
@@ -874,8 +879,12 @@ func TestCopyOwner(t *testing.T) {
|
||||
SymlinkTarget: "over the rainbow",
|
||||
}
|
||||
|
||||
f.handleSymlink(symlink, snap, dbUpdateChan, nil)
|
||||
<-dbUpdateChan
|
||||
f.handleSymlink(symlink, snap, dbUpdateChan, scanChan)
|
||||
select {
|
||||
case <-dbUpdateChan:
|
||||
case toScan := <-scanChan:
|
||||
t.Fatal("Unexpected receive on scanChan:", toScan)
|
||||
}
|
||||
|
||||
info, err = f.fs.Lstat("foo/bar/sym")
|
||||
if err != nil {
|
||||
|
||||
@@ -164,6 +164,7 @@ var (
|
||||
errDeviceUnknown = errors.New("unknown device")
|
||||
errDevicePaused = errors.New("device is paused")
|
||||
errDeviceIgnored = errors.New("device is ignored")
|
||||
errDeviceRemoved = errors.New("device has been removed")
|
||||
ErrFolderPaused = errors.New("folder is paused")
|
||||
errFolderNotRunning = errors.New("folder is not running")
|
||||
errFolderMissing = errors.New("no such folder")
|
||||
@@ -850,7 +851,7 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
|
||||
}
|
||||
|
||||
rest = make([]db.FileInfoTruncated, 0, perpage)
|
||||
snap.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
|
||||
snap.WithNeedTruncated(protocol.LocalDeviceID, func(f protocol.FileIntf) bool {
|
||||
if cfg.IgnoreDelete && f.IsDeleted() {
|
||||
return true
|
||||
}
|
||||
@@ -924,6 +925,7 @@ func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
// The local attributes should never be transmitted over the wire.
|
||||
// Make sure they look like they weren't.
|
||||
fs[i].LocalFlags = 0
|
||||
fs[i].VersionHash = nil
|
||||
}
|
||||
files.Update(deviceID, fs)
|
||||
|
||||
@@ -954,12 +956,27 @@ func (m *model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
}
|
||||
|
||||
changed := false
|
||||
deviceCfg := m.cfg.Devices()[deviceID]
|
||||
deviceCfg, ok := m.cfg.Devices()[deviceID]
|
||||
if !ok {
|
||||
l.Debugln("Device disappeared from config while processing cluster-config")
|
||||
return errDeviceUnknown
|
||||
}
|
||||
|
||||
// Needs to happen outside of the fmut, as can cause CommitConfiguration
|
||||
if deviceCfg.AutoAcceptFolders {
|
||||
changedFolders := make([]config.FolderConfiguration, 0, len(cm.Folders))
|
||||
for _, folder := range cm.Folders {
|
||||
changed = m.handleAutoAccepts(deviceCfg, folder) || changed
|
||||
if fcfg, fchanged := m.handleAutoAccepts(deviceCfg, folder); fchanged {
|
||||
changedFolders = append(changedFolders, fcfg)
|
||||
}
|
||||
}
|
||||
if len(changedFolders) > 0 {
|
||||
// Need to wait for the waiter, as this calls CommitConfiguration,
|
||||
// which sets up the folder and as we return from this call,
|
||||
// ClusterConfig starts poking at m.folderFiles and other things
|
||||
// that might not exist until the config is committed.
|
||||
w, _ := m.cfg.SetFolders(changedFolders)
|
||||
w.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1240,7 +1257,7 @@ func (m *model) handleDeintroductions(introducerCfg config.DeviceConfiguration,
|
||||
|
||||
// handleAutoAccepts handles adding and sharing folders for devices that have
|
||||
// AutoAcceptFolders set to true.
|
||||
func (m *model) handleAutoAccepts(deviceCfg config.DeviceConfiguration, folder protocol.Folder) bool {
|
||||
func (m *model) handleAutoAccepts(deviceCfg config.DeviceConfiguration, folder protocol.Folder) (config.FolderConfiguration, bool) {
|
||||
if cfg, ok := m.cfg.Folder(folder.ID); !ok {
|
||||
defaultPath := m.cfg.Options().DefaultFolderPath
|
||||
defaultPathFs := fs.NewFilesystem(fs.FilesystemTypeBasic, defaultPath)
|
||||
@@ -1257,32 +1274,24 @@ func (m *model) handleAutoAccepts(deviceCfg config.DeviceConfiguration, folder p
|
||||
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: deviceCfg.DeviceID,
|
||||
})
|
||||
// Need to wait for the waiter, as this calls CommitConfiguration,
|
||||
// which sets up the folder and as we return from this call,
|
||||
// ClusterConfig starts poking at m.folderFiles and other things
|
||||
// that might not exist until the config is committed.
|
||||
w, _ := m.cfg.SetFolder(fcfg)
|
||||
w.Wait()
|
||||
|
||||
l.Infof("Auto-accepted %s folder %s at path %s", deviceCfg.DeviceID, folder.Description(), fcfg.Path)
|
||||
return true
|
||||
return fcfg, true
|
||||
}
|
||||
l.Infof("Failed to auto-accept folder %s from %s due to path conflict", folder.Description(), deviceCfg.DeviceID)
|
||||
return false
|
||||
return config.FolderConfiguration{}, false
|
||||
} else {
|
||||
for _, device := range cfg.DeviceIDs() {
|
||||
if device == deviceCfg.DeviceID {
|
||||
// Already shared nothing todo.
|
||||
return false
|
||||
return config.FolderConfiguration{}, false
|
||||
}
|
||||
}
|
||||
cfg.Devices = append(cfg.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: deviceCfg.DeviceID,
|
||||
})
|
||||
w, _ := m.cfg.SetFolder(cfg)
|
||||
w.Wait()
|
||||
l.Infof("Shared %s with %s due to auto-accept", folder.ID, deviceCfg.DeviceID)
|
||||
return true
|
||||
return cfg, true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1927,7 +1936,7 @@ func (s *indexSender) sendIndexTo(ctx context.Context) error {
|
||||
snap := s.fset.Snapshot()
|
||||
defer snap.Release()
|
||||
previousWasDelete := false
|
||||
snap.WithHaveSequence(s.prevSequence+1, func(fi db.FileIntf) bool {
|
||||
snap.WithHaveSequence(s.prevSequence+1, func(fi protocol.FileIntf) bool {
|
||||
// This is to make sure that renames (which is an add followed by a delete) land in the same batch.
|
||||
// Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
|
||||
// the batch ends with a non-delete, or that the last item in the batch is already a delete
|
||||
@@ -1968,7 +1977,10 @@ func (s *indexSender) sendIndexTo(ctx context.Context) error {
|
||||
if f.IsReceiveOnlyChanged() {
|
||||
f.Version = protocol.Vector{}
|
||||
}
|
||||
f.LocalFlags = 0 // never sent externally
|
||||
|
||||
// never sent externally
|
||||
f.LocalFlags = 0
|
||||
f.VersionHash = nil
|
||||
|
||||
previousWasDelete = f.IsDeleted()
|
||||
|
||||
@@ -2236,7 +2248,7 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly
|
||||
|
||||
snap := files.Snapshot()
|
||||
defer snap.Release()
|
||||
snap.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool {
|
||||
snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
|
||||
// Don't include the prefix itself.
|
||||
@@ -2478,11 +2490,14 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
|
||||
m.evLogger.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
|
||||
}
|
||||
}
|
||||
removedDevices := make([]protocol.DeviceID, 0, len(fromDevices))
|
||||
m.fmut.Lock()
|
||||
for deviceID := range fromDevices {
|
||||
delete(m.deviceStatRefs, deviceID)
|
||||
removedDevices = append(removedDevices, deviceID)
|
||||
}
|
||||
m.fmut.Unlock()
|
||||
m.closeConns(removedDevices, errDeviceRemoved)
|
||||
|
||||
m.globalRequestLimiter.setCapacity(1024 * to.Options.MaxConcurrentIncomingRequestKiB())
|
||||
m.folderIOLimiter.setCapacity(to.Options.MaxFolderConcurrency())
|
||||
|
||||
@@ -2426,7 +2426,7 @@ func TestIssue3496(t *testing.T) {
|
||||
m.fmut.RUnlock()
|
||||
var localFiles []protocol.FileInfo
|
||||
snap := fs.Snapshot()
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
||||
localFiles = append(localFiles, i.(protocol.FileInfo))
|
||||
return true
|
||||
})
|
||||
@@ -3556,7 +3556,7 @@ func TestRenameSequenceOrder(t *testing.T) {
|
||||
|
||||
count := 0
|
||||
snap := dbSnapshot(t, m, "default")
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -3588,7 +3588,7 @@ func TestRenameSequenceOrder(t *testing.T) {
|
||||
var firstExpectedSequence int64
|
||||
var secondExpectedSequence int64
|
||||
failed := false
|
||||
snap.WithHaveSequence(0, func(i db.FileIntf) bool {
|
||||
snap.WithHaveSequence(0, func(i protocol.FileIntf) bool {
|
||||
t.Log(i)
|
||||
if i.FileName() == "17" {
|
||||
firstExpectedSequence = i.SequenceNo() + 1
|
||||
@@ -3621,7 +3621,7 @@ func TestRenameSameFile(t *testing.T) {
|
||||
|
||||
count := 0
|
||||
snap := dbSnapshot(t, m, "default")
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
|
||||
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -3644,7 +3644,7 @@ func TestRenameSameFile(t *testing.T) {
|
||||
|
||||
prevSeq := int64(0)
|
||||
seen := false
|
||||
snap.WithHaveSequence(0, func(i db.FileIntf) bool {
|
||||
snap.WithHaveSequence(0, func(i protocol.FileIntf) bool {
|
||||
if i.SequenceNo() <= prevSeq {
|
||||
t.Fatalf("non-increasing sequences: %d <= %d", i.SequenceNo(), prevSeq)
|
||||
}
|
||||
@@ -3683,7 +3683,7 @@ func TestRenameEmptyFile(t *testing.T) {
|
||||
}
|
||||
|
||||
count := 0
|
||||
snap.WithBlocksHash(empty.BlocksHash, func(_ db.FileIntf) bool {
|
||||
snap.WithBlocksHash(empty.BlocksHash, func(_ protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -3693,7 +3693,7 @@ func TestRenameEmptyFile(t *testing.T) {
|
||||
}
|
||||
|
||||
count = 0
|
||||
snap.WithBlocksHash(file.BlocksHash, func(_ db.FileIntf) bool {
|
||||
snap.WithBlocksHash(file.BlocksHash, func(_ protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -3712,7 +3712,7 @@ func TestRenameEmptyFile(t *testing.T) {
|
||||
defer snap.Release()
|
||||
|
||||
count = 0
|
||||
snap.WithBlocksHash(empty.BlocksHash, func(_ db.FileIntf) bool {
|
||||
snap.WithBlocksHash(empty.BlocksHash, func(_ protocol.FileIntf) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
@@ -3722,7 +3722,7 @@ func TestRenameEmptyFile(t *testing.T) {
|
||||
}
|
||||
|
||||
count = 0
|
||||
snap.WithBlocksHash(file.BlocksHash, func(i db.FileIntf) bool {
|
||||
snap.WithBlocksHash(file.BlocksHash, func(i protocol.FileIntf) bool {
|
||||
count++
|
||||
if i.FileName() != "new-file" {
|
||||
t.Fatalf("unexpected file name %s, expected new-file", i.FileName())
|
||||
@@ -3757,7 +3757,7 @@ func TestBlockListMap(t *testing.T) {
|
||||
}
|
||||
var paths []string
|
||||
|
||||
snap.WithBlocksHash(fi.BlocksHash, func(fi db.FileIntf) bool {
|
||||
snap.WithBlocksHash(fi.BlocksHash, func(fi protocol.FileIntf) bool {
|
||||
paths = append(paths, fi.FileName())
|
||||
return true
|
||||
})
|
||||
@@ -3790,7 +3790,7 @@ func TestBlockListMap(t *testing.T) {
|
||||
defer snap.Release()
|
||||
|
||||
paths = paths[:0]
|
||||
snap.WithBlocksHash(fi.BlocksHash, func(fi db.FileIntf) bool {
|
||||
snap.WithBlocksHash(fi.BlocksHash, func(fi protocol.FileIntf) bool {
|
||||
paths = append(paths, fi.FileName())
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -720,8 +720,6 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
var gotA, gotB, gotConfl bool
|
||||
bIntermediateVersion := protocol.Vector{}.Update(fc.id.Short()).Update(myID.Short())
|
||||
bFinalVersion := bIntermediateVersion.Copy().Update(fc.id.Short())
|
||||
done := make(chan struct{})
|
||||
fc.mut.Lock()
|
||||
fc.indexFn = func(_ context.Context, folder string, fs []protocol.FileInfo) {
|
||||
@@ -742,16 +740,12 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
|
||||
if gotB {
|
||||
t.Error("Got more than one index update for", f.Name)
|
||||
}
|
||||
if f.Version.Equal(bIntermediateVersion) {
|
||||
if f.Version.Counter(fc.id.Short()) == 0 {
|
||||
// This index entry might be superseeded
|
||||
// by the final one or sent before it separately.
|
||||
break
|
||||
}
|
||||
if f.Version.Equal(bFinalVersion) {
|
||||
gotB = true
|
||||
break
|
||||
}
|
||||
t.Errorf("Got unexpected version %v for file %v in index update", f.Version, f.Name)
|
||||
gotB = true
|
||||
case strings.HasPrefix(f.Name, "b.sync-conflict-"):
|
||||
if gotConfl {
|
||||
t.Error("Got more than one index update for conflicts of", f.Name)
|
||||
|
||||
@@ -45,9 +45,36 @@ func NewService(id protocol.DeviceID, cfg config.Wrapper) *Service {
|
||||
mut: sync.NewRWMutex(),
|
||||
}
|
||||
s.Service = util.AsService(s.serve, s.String())
|
||||
cfg.Subscribe(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Service) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
|
||||
if !from.Options.NATEnabled && to.Options.NATEnabled {
|
||||
s.mut.Lock()
|
||||
l.Debugln("Starting NAT service")
|
||||
s.timer.Reset(0)
|
||||
s.mut.Unlock()
|
||||
} else if from.Options.NATEnabled && !to.Options.NATEnabled {
|
||||
s.mut.Lock()
|
||||
l.Debugln("Stopping NAT service")
|
||||
if !s.timer.Stop() {
|
||||
<-s.timer.C
|
||||
}
|
||||
s.mut.Unlock()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) Stop() {
|
||||
s.cfg.Unsubscribe(s)
|
||||
s.Service.Stop()
|
||||
}
|
||||
|
||||
func (s *Service) serve(ctx context.Context) {
|
||||
announce := stdsync.Once{}
|
||||
|
||||
|
||||
@@ -7,9 +7,13 @@
|
||||
package nat
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -56,7 +60,15 @@ func TestMappingValidGateway(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMappingClearAddresses(t *testing.T) {
|
||||
natSvc := NewService(protocol.EmptyDeviceID, nil)
|
||||
tmpFile, err := ioutil.TempFile("", "syncthing-testConfig-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
w := config.Wrap(tmpFile.Name(), config.Configuration{}, events.NoopLogger)
|
||||
defer os.RemoveAll(tmpFile.Name())
|
||||
tmpFile.Close()
|
||||
|
||||
natSvc := NewService(protocol.EmptyDeviceID, w)
|
||||
// Mock a mapped port; avoids the need to actually map a port
|
||||
ip := net.ParseIP("192.168.0.1")
|
||||
m := natSvc.NewMapping(TCP, ip, 1024)
|
||||
|
||||
@@ -506,7 +506,10 @@ type FileInfo struct {
|
||||
// host only. It is not part of the protocol, doesn't get sent or
|
||||
// received (we make sure to zero it), nonetheless we need it on our
|
||||
// struct and to be able to serialize it to/from the database.
|
||||
LocalFlags uint32 `protobuf:"varint,1000,opt,name=local_flags,json=localFlags,proto3" json:"local_flags,omitempty"`
|
||||
LocalFlags uint32 `protobuf:"varint,1000,opt,name=local_flags,json=localFlags,proto3" json:"local_flags,omitempty"`
|
||||
// The version_hash is an implementation detail and not part of the wire
|
||||
// format.
|
||||
VersionHash []byte `protobuf:"bytes,1001,opt,name=version_hash,json=versionHash,proto3" json:"version_hash,omitempty"`
|
||||
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
|
||||
RawInvalid bool `protobuf:"varint,7,opt,name=invalid,proto3" json:"invalid,omitempty"`
|
||||
NoPermissions bool `protobuf:"varint,8,opt,name=no_permissions,json=noPermissions,proto3" json:"no_permissions,omitempty"`
|
||||
@@ -922,122 +925,123 @@ func init() {
|
||||
func init() { proto.RegisterFile("bep.proto", fileDescriptor_e3f59eb60afbbc6e) }
|
||||
|
||||
var fileDescriptor_e3f59eb60afbbc6e = []byte{
|
||||
// 1825 bytes of a gzipped FileDescriptorProto
|
||||
// 1842 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x73, 0xdb, 0xc6,
|
||||
0x15, 0x26, 0x48, 0xf0, 0xd7, 0x23, 0xa5, 0x40, 0x6b, 0x5b, 0x45, 0x61, 0x9b, 0x84, 0x69, 0x3b,
|
||||
0x66, 0x34, 0xa9, 0xed, 0x26, 0x69, 0x3b, 0xed, 0xb4, 0x9d, 0xe1, 0x0f, 0x48, 0xe6, 0x54, 0x26,
|
||||
0xd5, 0x25, 0xe5, 0xd4, 0x39, 0x14, 0x03, 0x11, 0x4b, 0x0a, 0x63, 0x10, 0xcb, 0x02, 0xa0, 0x64,
|
||||
0xe6, 0x4f, 0xe0, 0xa9, 0xc7, 0x5e, 0x38, 0x93, 0x99, 0x9e, 0xfa, 0x9f, 0xf8, 0xe8, 0xf6, 0xd4,
|
||||
0xe9, 0x41, 0xd3, 0xc8, 0x97, 0x1c, 0x7b, 0xe9, 0xb5, 0xed, 0xec, 0x2e, 0x40, 0x82, 0x52, 0x9c,
|
||||
0xc9, 0xa1, 0x27, 0xec, 0xbe, 0xf7, 0xed, 0x5b, 0xec, 0xf7, 0xde, 0xfb, 0x76, 0xa1, 0x78, 0x42,
|
||||
0xa6, 0x8f, 0xa7, 0x3e, 0x0d, 0x29, 0x2a, 0xf0, 0xcf, 0x90, 0xba, 0xda, 0x7d, 0x9f, 0x4c, 0x69,
|
||||
0xf0, 0x84, 0xcf, 0x4f, 0x66, 0xa3, 0x27, 0x63, 0x3a, 0xa6, 0x7c, 0xc2, 0x47, 0x02, 0x5e, 0x9b,
|
||||
0x42, 0xf6, 0x19, 0x71, 0x5d, 0x8a, 0xaa, 0x50, 0xb2, 0xc9, 0x99, 0x33, 0x24, 0xa6, 0x67, 0x4d,
|
||||
0x88, 0x2a, 0xe9, 0x52, 0xbd, 0x88, 0x41, 0x98, 0xba, 0xd6, 0x84, 0x30, 0xc0, 0xd0, 0x75, 0x88,
|
||||
0x17, 0x0a, 0x40, 0x5a, 0x00, 0x84, 0x89, 0x03, 0x1e, 0xc2, 0x76, 0x04, 0x38, 0x23, 0x7e, 0xe0,
|
||||
0x50, 0x4f, 0xcd, 0x70, 0xcc, 0x96, 0xb0, 0xbe, 0x10, 0xc6, 0x5a, 0x00, 0xb9, 0x67, 0xc4, 0xb2,
|
||||
0x89, 0x8f, 0x3e, 0x02, 0x39, 0x9c, 0x4f, 0xc5, 0x5e, 0xdb, 0x9f, 0xdc, 0x7a, 0x1c, 0xff, 0xf9,
|
||||
0xe3, 0xe7, 0x24, 0x08, 0xac, 0x31, 0x19, 0xcc, 0xa7, 0x04, 0x73, 0x08, 0xfa, 0x35, 0x94, 0x86,
|
||||
0x74, 0x32, 0xf5, 0x49, 0xc0, 0x03, 0xa7, 0xf9, 0x8a, 0x3b, 0xd7, 0x56, 0xb4, 0xd6, 0x18, 0x9c,
|
||||
0x5c, 0x50, 0x6b, 0xc0, 0x56, 0xcb, 0x9d, 0x05, 0x21, 0xf1, 0x5b, 0xd4, 0x1b, 0x39, 0x63, 0xf4,
|
||||
0x14, 0xf2, 0x23, 0xea, 0xda, 0xc4, 0x0f, 0x54, 0x49, 0xcf, 0xd4, 0x4b, 0x9f, 0x28, 0xeb, 0x60,
|
||||
0xfb, 0xdc, 0xd1, 0x94, 0xdf, 0x5c, 0x54, 0x53, 0x38, 0x86, 0xd5, 0xfe, 0x9c, 0x86, 0x9c, 0xf0,
|
||||
0xa0, 0x5d, 0x48, 0x3b, 0xb6, 0xa0, 0xa8, 0x99, 0xbb, 0xbc, 0xa8, 0xa6, 0x3b, 0x6d, 0x9c, 0x76,
|
||||
0x6c, 0x74, 0x13, 0xb2, 0xae, 0x75, 0x42, 0xdc, 0x88, 0x1c, 0x31, 0x41, 0xb7, 0xa1, 0xe8, 0x13,
|
||||
0xcb, 0x36, 0xa9, 0xe7, 0xce, 0x39, 0x25, 0x05, 0x5c, 0x60, 0x86, 0x9e, 0xe7, 0xce, 0xd1, 0x8f,
|
||||
0x00, 0x39, 0x63, 0x8f, 0xfa, 0xc4, 0x9c, 0x12, 0x7f, 0xe2, 0xf0, 0xbf, 0x0d, 0x54, 0x99, 0xa3,
|
||||
0x76, 0x84, 0xe7, 0x68, 0xed, 0x40, 0xf7, 0x61, 0x2b, 0x82, 0xdb, 0xc4, 0x25, 0x21, 0x51, 0xb3,
|
||||
0x1c, 0x59, 0x16, 0xc6, 0x36, 0xb7, 0xa1, 0xa7, 0x70, 0xd3, 0x76, 0x02, 0xeb, 0xc4, 0x25, 0x66,
|
||||
0x48, 0x26, 0x53, 0xd3, 0xf1, 0x6c, 0xf2, 0x9a, 0x04, 0x6a, 0x8e, 0x63, 0x51, 0xe4, 0x1b, 0x90,
|
||||
0xc9, 0xb4, 0x23, 0x3c, 0x68, 0x17, 0x72, 0x53, 0x6b, 0x16, 0x10, 0x5b, 0xcd, 0x73, 0x4c, 0x34,
|
||||
0x63, 0x2c, 0x89, 0x0a, 0x08, 0x54, 0xe5, 0x2a, 0x4b, 0x6d, 0xee, 0x88, 0x59, 0x8a, 0x60, 0xb5,
|
||||
0x7f, 0xa5, 0x21, 0x27, 0x3c, 0xe8, 0xc3, 0x15, 0x4b, 0xe5, 0xe6, 0x2e, 0x43, 0xfd, 0xe3, 0xa2,
|
||||
0x5a, 0x10, 0xbe, 0x4e, 0x3b, 0xc1, 0x1a, 0x02, 0x39, 0x51, 0x51, 0x7c, 0x8c, 0xee, 0x40, 0xd1,
|
||||
0xb2, 0x6d, 0x96, 0x3d, 0x12, 0xa8, 0x19, 0x3d, 0x53, 0x2f, 0xe2, 0xb5, 0x01, 0xfd, 0x6c, 0xb3,
|
||||
0x1a, 0xe4, 0xab, 0xf5, 0xf3, 0xbe, 0x32, 0x60, 0xa9, 0x18, 0x12, 0x3f, 0xaa, 0xe0, 0x2c, 0xdf,
|
||||
0xaf, 0xc0, 0x0c, 0xbc, 0x7e, 0xef, 0x41, 0x79, 0x62, 0xbd, 0x36, 0x03, 0xf2, 0x87, 0x19, 0xf1,
|
||||
0x86, 0x84, 0xd3, 0x95, 0xc1, 0xa5, 0x89, 0xf5, 0xba, 0x1f, 0x99, 0x50, 0x05, 0xc0, 0xf1, 0x42,
|
||||
0x9f, 0xda, 0xb3, 0x21, 0xf1, 0x23, 0xae, 0x12, 0x16, 0xf4, 0x13, 0x28, 0x70, 0xb2, 0x4d, 0xc7,
|
||||
0x56, 0x0b, 0xba, 0x54, 0x97, 0x9b, 0x5a, 0x74, 0xf0, 0x3c, 0xa7, 0x9a, 0x9f, 0x3b, 0x1e, 0xe2,
|
||||
0x3c, 0xc7, 0x76, 0x6c, 0xf4, 0x4b, 0xd0, 0x82, 0x57, 0x0e, 0x4b, 0x94, 0x88, 0x14, 0x3a, 0xd4,
|
||||
0x33, 0x7d, 0x32, 0xa1, 0x67, 0x96, 0x1b, 0xa8, 0x45, 0xbe, 0x8d, 0xca, 0x10, 0x9d, 0x04, 0x00,
|
||||
0x47, 0xfe, 0x5a, 0x0f, 0xb2, 0x3c, 0x22, 0xcb, 0xa2, 0x28, 0xd6, 0xa8, 0x7b, 0xa3, 0x19, 0x7a,
|
||||
0x0c, 0xd9, 0x91, 0xe3, 0x92, 0x40, 0x4d, 0xf3, 0x1c, 0xa2, 0x44, 0xa5, 0x3b, 0x2e, 0xe9, 0x78,
|
||||
0x23, 0x1a, 0x65, 0x51, 0xc0, 0x6a, 0xc7, 0x50, 0xe2, 0x01, 0x8f, 0xa7, 0xb6, 0x15, 0x92, 0xff,
|
||||
0x5b, 0xd8, 0xff, 0xca, 0x50, 0x88, 0x3d, 0xab, 0xa4, 0x4b, 0x89, 0xa4, 0x23, 0x90, 0x03, 0xe7,
|
||||
0x4b, 0xc2, 0x7b, 0x24, 0x83, 0xf9, 0x18, 0xdd, 0x05, 0x98, 0x50, 0xdb, 0x19, 0x39, 0xc4, 0x36,
|
||||
0x03, 0x9e, 0xb2, 0x0c, 0x2e, 0xc6, 0x96, 0x3e, 0x7a, 0x0a, 0xa5, 0x95, 0xfb, 0x64, 0xae, 0x96,
|
||||
0x39, 0xe7, 0x1f, 0xc4, 0x9c, 0xf7, 0x4f, 0xa9, 0x1f, 0x76, 0xda, 0x78, 0x15, 0xa2, 0x39, 0x67,
|
||||
0x25, 0x1d, 0xcb, 0x13, 0x23, 0x76, 0xa3, 0xa4, 0x5f, 0x90, 0x61, 0x48, 0x57, 0x8d, 0x1f, 0xc1,
|
||||
0x90, 0x06, 0x85, 0x55, 0x4d, 0x00, 0xff, 0x81, 0xd5, 0x1c, 0xfd, 0x18, 0x72, 0x27, 0x2e, 0x1d,
|
||||
0xbe, 0x8a, 0xfb, 0xe3, 0xc6, 0x3a, 0x58, 0x93, 0xd9, 0x13, 0x2c, 0x44, 0x40, 0x26, 0x93, 0xc1,
|
||||
0x7c, 0xe2, 0x3a, 0xde, 0x2b, 0x33, 0xb4, 0xfc, 0x31, 0x09, 0xd5, 0x1d, 0x21, 0x93, 0x91, 0x75,
|
||||
0xc0, 0x8d, 0x4c, 0x6e, 0xc5, 0x02, 0xf3, 0xd4, 0x0a, 0x4e, 0x55, 0xc4, 0xda, 0x08, 0x83, 0x30,
|
||||
0x3d, 0xb3, 0x82, 0x53, 0xb4, 0x17, 0xa9, 0xa7, 0xd0, 0xc2, 0xdd, 0xeb, 0xec, 0x27, 0xe4, 0x53,
|
||||
0x87, 0xd2, 0x55, 0x79, 0xd9, 0xc2, 0x49, 0x13, 0xdb, 0x6e, 0x45, 0xa4, 0x17, 0xa8, 0x25, 0x5d,
|
||||
0xaa, 0x67, 0xd7, 0xbc, 0x75, 0x03, 0xf4, 0x04, 0xc4, 0xe6, 0x26, 0x4f, 0xd1, 0x16, 0xf3, 0x37,
|
||||
0x95, 0xcb, 0x8b, 0x6a, 0x19, 0x5b, 0xe7, 0xfc, 0xa8, 0x7d, 0xe7, 0x4b, 0x82, 0x8b, 0x27, 0xf1,
|
||||
0x90, 0xed, 0xe9, 0xd2, 0xa1, 0xe5, 0x9a, 0x23, 0xd7, 0x1a, 0x07, 0xea, 0x37, 0x79, 0xbe, 0x29,
|
||||
0x70, 0xdb, 0x3e, 0x33, 0x21, 0x95, 0xa9, 0x0b, 0x53, 0x2c, 0x3b, 0x92, 0xa6, 0x78, 0x8a, 0xea,
|
||||
0x90, 0x77, 0xbc, 0x33, 0xcb, 0x75, 0x22, 0x41, 0x6a, 0x6e, 0x5f, 0x5e, 0x54, 0x01, 0x5b, 0xe7,
|
||||
0x1d, 0x61, 0xc5, 0xb1, 0x9b, 0xb1, 0xe9, 0xd1, 0x0d, 0xed, 0x2c, 0xf0, 0x50, 0x5b, 0x1e, 0x4d,
|
||||
0xe8, 0xe6, 0x2f, 0xe4, 0x3f, 0x7d, 0x55, 0x4d, 0xd5, 0x3c, 0x28, 0xae, 0xb2, 0xc2, 0xaa, 0x8d,
|
||||
0x33, 0x9b, 0xe1, 0xcc, 0xf2, 0x31, 0x2b, 0x75, 0x3a, 0x1a, 0x05, 0x24, 0xe4, 0x75, 0x99, 0xc1,
|
||||
0xd1, 0x6c, 0x55, 0x99, 0x69, 0x4e, 0x8b, 0xa8, 0xcc, 0xdb, 0x50, 0x3c, 0x27, 0xd6, 0x2b, 0x91,
|
||||
0x1e, 0xc1, 0x68, 0x81, 0x19, 0x58, 0x72, 0xa2, 0xfd, 0x7e, 0x05, 0x39, 0x51, 0x52, 0xe8, 0x53,
|
||||
0x28, 0x0c, 0xe9, 0xcc, 0x0b, 0xd7, 0xf7, 0xcd, 0x4e, 0x52, 0xae, 0xb8, 0x27, 0xaa, 0x93, 0x15,
|
||||
0xb0, 0xb6, 0x0f, 0xf9, 0xc8, 0x85, 0x1e, 0xae, 0xb4, 0x54, 0x6e, 0xde, 0xba, 0x52, 0xde, 0x9b,
|
||||
0x17, 0xd0, 0x99, 0xe5, 0xce, 0xc4, 0x8f, 0xca, 0x58, 0x4c, 0x6a, 0x7f, 0x95, 0x20, 0x8f, 0x59,
|
||||
0xc5, 0x06, 0x61, 0xe2, 0xea, 0xca, 0x6e, 0x5c, 0x5d, 0xeb, 0x26, 0x4f, 0x6f, 0x34, 0x79, 0xdc,
|
||||
0xa7, 0x99, 0x44, 0x9f, 0xae, 0x59, 0x92, 0xbf, 0x95, 0xa5, 0x6c, 0x82, 0xa5, 0x98, 0xe5, 0x5c,
|
||||
0x82, 0xe5, 0x87, 0xb0, 0x3d, 0xf2, 0xe9, 0x84, 0x5f, 0x4e, 0xd4, 0xb7, 0xfc, 0x79, 0xa4, 0xa4,
|
||||
0x5b, 0xcc, 0x3a, 0x88, 0x8d, 0x9b, 0x04, 0x17, 0x36, 0x09, 0xae, 0x99, 0x50, 0xc0, 0x24, 0x98,
|
||||
0x52, 0x2f, 0x20, 0xef, 0x3d, 0x13, 0x02, 0xd9, 0xb6, 0x42, 0x8b, 0x9f, 0xa8, 0x8c, 0xf9, 0x18,
|
||||
0x3d, 0x02, 0x79, 0x48, 0x6d, 0x71, 0x9e, 0xed, 0x64, 0xbb, 0x1a, 0xbe, 0x4f, 0xfd, 0x16, 0xb5,
|
||||
0x09, 0xe6, 0x80, 0xda, 0x14, 0x94, 0x36, 0x3d, 0xf7, 0x5c, 0x6a, 0xd9, 0x47, 0x3e, 0x1d, 0xb3,
|
||||
0x1b, 0xe4, 0xbd, 0x4a, 0xd8, 0x86, 0xfc, 0x8c, 0x6b, 0x65, 0xac, 0x85, 0x0f, 0x36, 0xbb, 0xf1,
|
||||
0x6a, 0x20, 0x21, 0xac, 0xb1, 0xce, 0x44, 0x4b, 0x6b, 0xff, 0x96, 0x40, 0x7b, 0x3f, 0x1a, 0x75,
|
||||
0xa0, 0x24, 0x90, 0x66, 0xe2, 0xd1, 0x54, 0xff, 0x3e, 0x1b, 0x71, 0x21, 0x80, 0xd9, 0x6a, 0xfc,
|
||||
0xad, 0x37, 0x6e, 0x42, 0x17, 0x33, 0xdf, 0x4f, 0x17, 0x1f, 0xc1, 0x96, 0x50, 0x84, 0xf8, 0x7d,
|
||||
0x21, 0xeb, 0x99, 0x7a, 0xb6, 0x99, 0x56, 0x52, 0xb8, 0x7c, 0x22, 0xda, 0x4c, 0xbc, 0x2e, 0xee,
|
||||
0x6e, 0x48, 0x87, 0xa8, 0x8e, 0xb5, 0x50, 0xd4, 0x72, 0x20, 0x1f, 0x39, 0xde, 0xb8, 0x56, 0x85,
|
||||
0x6c, 0xcb, 0xa5, 0x3c, 0x9f, 0x39, 0x9f, 0x58, 0x01, 0xf5, 0x62, 0x9a, 0xc5, 0x6c, 0xef, 0x6f,
|
||||
0x69, 0x28, 0x25, 0x9e, 0x86, 0xe8, 0x29, 0x6c, 0xb7, 0x0e, 0x8f, 0xfb, 0x03, 0x03, 0x9b, 0xad,
|
||||
0x5e, 0x77, 0xbf, 0x73, 0xa0, 0xa4, 0xb4, 0x3b, 0x8b, 0xa5, 0xae, 0x4e, 0xd6, 0xa0, 0xcd, 0x57,
|
||||
0x5f, 0x15, 0xb2, 0x9d, 0x6e, 0xdb, 0xf8, 0x9d, 0x22, 0x69, 0x37, 0x17, 0x4b, 0x5d, 0x49, 0x00,
|
||||
0xc5, 0x15, 0xfa, 0x31, 0x94, 0x39, 0xc0, 0x3c, 0x3e, 0x6a, 0x37, 0x06, 0x86, 0x92, 0xd6, 0xb4,
|
||||
0xc5, 0x52, 0xdf, 0xbd, 0x8a, 0x8b, 0x52, 0x72, 0x1f, 0xf2, 0xd8, 0xf8, 0xed, 0xb1, 0xd1, 0x1f,
|
||||
0x28, 0x19, 0x6d, 0x77, 0xb1, 0xd4, 0x51, 0x02, 0x18, 0x77, 0xdc, 0x43, 0x28, 0x60, 0xa3, 0x7f,
|
||||
0xd4, 0xeb, 0xf6, 0x0d, 0x45, 0xd6, 0x7e, 0xb0, 0x58, 0xea, 0x37, 0x36, 0x50, 0x51, 0x11, 0xff,
|
||||
0x14, 0x76, 0xda, 0xbd, 0xcf, 0xbb, 0x87, 0xbd, 0x46, 0xdb, 0x3c, 0xc2, 0xbd, 0x03, 0x6c, 0xf4,
|
||||
0xfb, 0x4a, 0x56, 0xab, 0x2e, 0x96, 0xfa, 0xed, 0x04, 0xfe, 0x5a, 0x4d, 0xde, 0x05, 0xf9, 0xa8,
|
||||
0xd3, 0x3d, 0x50, 0x72, 0xda, 0x8d, 0xc5, 0x52, 0xff, 0x20, 0x01, 0x65, 0xa4, 0xb2, 0x13, 0xb7,
|
||||
0x0e, 0x7b, 0x7d, 0x43, 0xc9, 0x5f, 0x3b, 0x31, 0x27, 0x7b, 0xef, 0xf7, 0x80, 0xae, 0x3f, 0x9e,
|
||||
0xd1, 0x03, 0x90, 0xbb, 0xbd, 0xae, 0xa1, 0xa4, 0xc4, 0xf9, 0xaf, 0x23, 0xba, 0xd4, 0x23, 0xa8,
|
||||
0x06, 0x99, 0xc3, 0x2f, 0x3e, 0x53, 0x24, 0xed, 0x87, 0x8b, 0xa5, 0x7e, 0xeb, 0x3a, 0xe8, 0xf0,
|
||||
0x8b, 0xcf, 0xf6, 0x28, 0x94, 0x92, 0x81, 0x6b, 0x50, 0x78, 0x6e, 0x0c, 0x1a, 0xed, 0xc6, 0xa0,
|
||||
0xa1, 0xa4, 0xc4, 0x2f, 0xc5, 0xee, 0xe7, 0x24, 0xb4, 0x78, 0x8f, 0xde, 0x81, 0x6c, 0xd7, 0x78,
|
||||
0x61, 0x60, 0x45, 0xd2, 0x76, 0x16, 0x4b, 0x7d, 0x2b, 0x06, 0x74, 0xc9, 0x19, 0xf1, 0x51, 0x05,
|
||||
0x72, 0x8d, 0xc3, 0xcf, 0x1b, 0x2f, 0xfb, 0x4a, 0x5a, 0x43, 0x8b, 0xa5, 0xbe, 0x1d, 0xbb, 0x1b,
|
||||
0xee, 0xb9, 0x35, 0x0f, 0xf6, 0xfe, 0x23, 0x41, 0x39, 0x79, 0x05, 0xa2, 0x0a, 0xc8, 0xfb, 0x9d,
|
||||
0x43, 0x23, 0xde, 0x2e, 0xe9, 0x63, 0x63, 0x54, 0x87, 0x62, 0xbb, 0x83, 0x8d, 0xd6, 0xa0, 0x87,
|
||||
0x5f, 0xc6, 0x67, 0x49, 0x82, 0xda, 0x8e, 0xcf, 0xeb, 0x7f, 0x8e, 0x7e, 0x0e, 0xe5, 0xfe, 0xcb,
|
||||
0xe7, 0x87, 0x9d, 0xee, 0x6f, 0x4c, 0x1e, 0x31, 0xad, 0x3d, 0x5a, 0x2c, 0xf5, 0x7b, 0x1b, 0x60,
|
||||
0x32, 0xf5, 0xc9, 0xd0, 0x0a, 0x89, 0xdd, 0x17, 0xd7, 0x39, 0x73, 0x16, 0x24, 0xd4, 0x82, 0x9d,
|
||||
0x78, 0xe9, 0x7a, 0xb3, 0x8c, 0xf6, 0xf1, 0x62, 0xa9, 0x7f, 0xf8, 0x9d, 0xeb, 0x57, 0xbb, 0x17,
|
||||
0x24, 0xf4, 0x00, 0xf2, 0x51, 0x90, 0xb8, 0x92, 0x92, 0x4b, 0xa3, 0x05, 0x7b, 0x7f, 0x91, 0xa0,
|
||||
0xb8, 0x52, 0x33, 0x46, 0x78, 0xb7, 0x67, 0x1a, 0x18, 0xf7, 0x70, 0xcc, 0xc0, 0xca, 0xd9, 0xa5,
|
||||
0x7c, 0x88, 0xee, 0x41, 0xfe, 0xc0, 0xe8, 0x1a, 0xb8, 0xd3, 0x8a, 0x1b, 0x63, 0x05, 0x39, 0x20,
|
||||
0x1e, 0xf1, 0x9d, 0x21, 0xfa, 0x08, 0xca, 0xdd, 0x9e, 0xd9, 0x3f, 0x6e, 0x3d, 0x8b, 0x8f, 0xce,
|
||||
0xf7, 0x4f, 0x84, 0xea, 0xcf, 0x86, 0xa7, 0x9c, 0xcf, 0x3d, 0xd6, 0x43, 0x2f, 0x1a, 0x87, 0x9d,
|
||||
0xb6, 0x80, 0x66, 0x34, 0x75, 0xb1, 0xd4, 0x6f, 0xae, 0xa0, 0xd1, 0x1d, 0xce, 0xb0, 0x7b, 0x36,
|
||||
0x54, 0xbe, 0x5b, 0xb7, 0x90, 0x0e, 0xb9, 0xc6, 0xd1, 0x91, 0xd1, 0x6d, 0xc7, 0x7f, 0xbf, 0xf6,
|
||||
0x35, 0xa6, 0x53, 0xe2, 0xd9, 0x0c, 0xb1, 0xdf, 0xc3, 0x07, 0xc6, 0x20, 0xfe, 0xf9, 0x35, 0x62,
|
||||
0x9f, 0xb2, 0xb7, 0x54, 0xb3, 0xfe, 0xe6, 0xeb, 0x4a, 0xea, 0xed, 0xd7, 0x95, 0xd4, 0x9b, 0xcb,
|
||||
0x8a, 0xf4, 0xf6, 0xb2, 0x22, 0xfd, 0xf3, 0xb2, 0x92, 0xfa, 0xe6, 0xb2, 0x22, 0xfd, 0xf1, 0x5d,
|
||||
0x25, 0xf5, 0xd5, 0xbb, 0x8a, 0xf4, 0xf6, 0x5d, 0x25, 0xf5, 0xf7, 0x77, 0x95, 0xd4, 0x49, 0x8e,
|
||||
0x6b, 0xde, 0xa7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x75, 0x30, 0x0d, 0x51, 0x0f, 0x00,
|
||||
0x00,
|
||||
0xf5, 0x27, 0x48, 0xf0, 0xd7, 0x23, 0xa5, 0x40, 0x6b, 0x5b, 0x5f, 0x7c, 0x61, 0x9b, 0x84, 0x69,
|
||||
0x3b, 0x66, 0x34, 0xa9, 0xed, 0x26, 0x69, 0x3b, 0xed, 0xb4, 0x9d, 0xe1, 0x0f, 0x48, 0xe6, 0x54,
|
||||
0x26, 0xd5, 0x25, 0xe5, 0xd4, 0x39, 0x14, 0x03, 0x11, 0x4b, 0x0a, 0x63, 0x10, 0xcb, 0x02, 0xa0,
|
||||
0x64, 0xe6, 0x4f, 0xe0, 0xa9, 0xc7, 0x5e, 0x38, 0x93, 0x99, 0x9c, 0xfa, 0x9f, 0xf8, 0xe8, 0xf6,
|
||||
0xd4, 0xe9, 0x41, 0xd3, 0xc8, 0x97, 0xf4, 0xd6, 0x4b, 0xaf, 0x9d, 0xce, 0xee, 0x02, 0x24, 0x28,
|
||||
0xc5, 0x99, 0x1c, 0x7a, 0xc2, 0xee, 0x7b, 0x9f, 0x7d, 0xbb, 0xef, 0xb3, 0xef, 0x7d, 0x16, 0x50,
|
||||
0x3c, 0x21, 0xd3, 0xc7, 0x53, 0x9f, 0x86, 0x14, 0x15, 0xf8, 0x67, 0x48, 0x5d, 0xed, 0xbe, 0x4f,
|
||||
0xa6, 0x34, 0x78, 0xc2, 0xe7, 0x27, 0xb3, 0xd1, 0x93, 0x31, 0x1d, 0x53, 0x3e, 0xe1, 0x23, 0x01,
|
||||
0xaf, 0x4d, 0x21, 0xfb, 0x8c, 0xb8, 0x2e, 0x45, 0x55, 0x28, 0xd9, 0xe4, 0xcc, 0x19, 0x12, 0xd3,
|
||||
0xb3, 0x26, 0x44, 0x95, 0x74, 0xa9, 0x5e, 0xc4, 0x20, 0x4c, 0x5d, 0x6b, 0x42, 0x18, 0x60, 0xe8,
|
||||
0x3a, 0xc4, 0x0b, 0x05, 0x20, 0x2d, 0x00, 0xc2, 0xc4, 0x01, 0x0f, 0x61, 0x3b, 0x02, 0x9c, 0x11,
|
||||
0x3f, 0x70, 0xa8, 0xa7, 0x66, 0x38, 0x66, 0x4b, 0x58, 0x5f, 0x08, 0x63, 0x2d, 0x80, 0xdc, 0x33,
|
||||
0x62, 0xd9, 0xc4, 0x47, 0x1f, 0x81, 0x1c, 0xce, 0xa7, 0x62, 0xaf, 0xed, 0x4f, 0x6e, 0x3d, 0x8e,
|
||||
0x4f, 0xfe, 0xf8, 0x39, 0x09, 0x02, 0x6b, 0x4c, 0x06, 0xf3, 0x29, 0xc1, 0x1c, 0x82, 0x7e, 0x0d,
|
||||
0xa5, 0x21, 0x9d, 0x4c, 0x7d, 0x12, 0xf0, 0xc0, 0x69, 0xbe, 0xe2, 0xce, 0xb5, 0x15, 0xad, 0x35,
|
||||
0x06, 0x27, 0x17, 0xd4, 0x1a, 0xb0, 0xd5, 0x72, 0x67, 0x41, 0x48, 0xfc, 0x16, 0xf5, 0x46, 0xce,
|
||||
0x18, 0x3d, 0x85, 0xfc, 0x88, 0xba, 0x36, 0xf1, 0x03, 0x55, 0xd2, 0x33, 0xf5, 0xd2, 0x27, 0xca,
|
||||
0x3a, 0xd8, 0x3e, 0x77, 0x34, 0xe5, 0x37, 0x17, 0xd5, 0x14, 0x8e, 0x61, 0xb5, 0xaf, 0xd3, 0x90,
|
||||
0x13, 0x1e, 0xb4, 0x0b, 0x69, 0xc7, 0x16, 0x14, 0x35, 0x73, 0x97, 0x17, 0xd5, 0x74, 0xa7, 0x8d,
|
||||
0xd3, 0x8e, 0x8d, 0x6e, 0x42, 0xd6, 0xb5, 0x4e, 0x88, 0x1b, 0x91, 0x23, 0x26, 0xe8, 0x36, 0x14,
|
||||
0x7d, 0x62, 0xd9, 0x26, 0xf5, 0xdc, 0x39, 0xa7, 0xa4, 0x80, 0x0b, 0xcc, 0xd0, 0xf3, 0xdc, 0x39,
|
||||
0xfa, 0x11, 0x20, 0x67, 0xec, 0x51, 0x9f, 0x98, 0x53, 0xe2, 0x4f, 0x1c, 0x7e, 0xda, 0x40, 0x95,
|
||||
0x39, 0x6a, 0x47, 0x78, 0x8e, 0xd6, 0x0e, 0x74, 0x1f, 0xb6, 0x22, 0xb8, 0x4d, 0x5c, 0x12, 0x12,
|
||||
0x35, 0xcb, 0x91, 0x65, 0x61, 0x6c, 0x73, 0x1b, 0x7a, 0x0a, 0x37, 0x6d, 0x27, 0xb0, 0x4e, 0x5c,
|
||||
0x62, 0x86, 0x64, 0x32, 0x35, 0x1d, 0xcf, 0x26, 0xaf, 0x49, 0xa0, 0xe6, 0x38, 0x16, 0x45, 0xbe,
|
||||
0x01, 0x99, 0x4c, 0x3b, 0xc2, 0x83, 0x76, 0x21, 0x37, 0xb5, 0x66, 0x01, 0xb1, 0xd5, 0x3c, 0xc7,
|
||||
0x44, 0x33, 0xc6, 0x92, 0xa8, 0x80, 0x40, 0x55, 0xae, 0xb2, 0xd4, 0xe6, 0x8e, 0x98, 0xa5, 0x08,
|
||||
0x56, 0xfb, 0x57, 0x1a, 0x72, 0xc2, 0x83, 0x3e, 0x5c, 0xb1, 0x54, 0x6e, 0xee, 0x32, 0xd4, 0xdf,
|
||||
0x2f, 0xaa, 0x05, 0xe1, 0xeb, 0xb4, 0x13, 0xac, 0x21, 0x90, 0x13, 0x15, 0xc5, 0xc7, 0xe8, 0x0e,
|
||||
0x14, 0x2d, 0xdb, 0x66, 0xb7, 0x47, 0x02, 0x35, 0xa3, 0x67, 0xea, 0x45, 0xbc, 0x36, 0xa0, 0x9f,
|
||||
0x6d, 0x56, 0x83, 0x7c, 0xb5, 0x7e, 0xde, 0x57, 0x06, 0xec, 0x2a, 0x86, 0xc4, 0x8f, 0x2a, 0x38,
|
||||
0xcb, 0xf7, 0x2b, 0x30, 0x03, 0xaf, 0xdf, 0x7b, 0x50, 0x9e, 0x58, 0xaf, 0xcd, 0x80, 0xfc, 0x61,
|
||||
0x46, 0xbc, 0x21, 0xe1, 0x74, 0x65, 0x70, 0x69, 0x62, 0xbd, 0xee, 0x47, 0x26, 0x54, 0x01, 0x70,
|
||||
0xbc, 0xd0, 0xa7, 0xf6, 0x6c, 0x48, 0xfc, 0x88, 0xab, 0x84, 0x05, 0xfd, 0x04, 0x0a, 0x9c, 0x6c,
|
||||
0xd3, 0xb1, 0xd5, 0x82, 0x2e, 0xd5, 0xe5, 0xa6, 0x16, 0x25, 0x9e, 0xe7, 0x54, 0xf3, 0xbc, 0xe3,
|
||||
0x21, 0xce, 0x73, 0x6c, 0xc7, 0x46, 0xbf, 0x04, 0x2d, 0x78, 0xe5, 0xb0, 0x8b, 0x12, 0x91, 0x42,
|
||||
0x87, 0x7a, 0xa6, 0x4f, 0x26, 0xf4, 0xcc, 0x72, 0x03, 0xb5, 0xc8, 0xb7, 0x51, 0x19, 0xa2, 0x93,
|
||||
0x00, 0xe0, 0xc8, 0x5f, 0xeb, 0x41, 0x96, 0x47, 0x64, 0xb7, 0x28, 0x8a, 0x35, 0xea, 0xde, 0x68,
|
||||
0x86, 0x1e, 0x43, 0x76, 0xe4, 0xb8, 0x24, 0x50, 0xd3, 0xfc, 0x0e, 0x51, 0xa2, 0xd2, 0x1d, 0x97,
|
||||
0x74, 0xbc, 0x11, 0x8d, 0x6e, 0x51, 0xc0, 0x6a, 0xc7, 0x50, 0xe2, 0x01, 0x8f, 0xa7, 0xb6, 0x15,
|
||||
0x92, 0xff, 0x59, 0xd8, 0xaf, 0xb3, 0x50, 0x88, 0x3d, 0xab, 0x4b, 0x97, 0x12, 0x97, 0x8e, 0x40,
|
||||
0x0e, 0x9c, 0x2f, 0x09, 0xef, 0x91, 0x0c, 0xe6, 0x63, 0x74, 0x17, 0x60, 0x42, 0x6d, 0x67, 0xe4,
|
||||
0x10, 0xdb, 0x0c, 0xf8, 0x95, 0x65, 0x70, 0x31, 0xb6, 0xf4, 0xd1, 0x53, 0x28, 0xad, 0xdc, 0x27,
|
||||
0x73, 0xb5, 0xcc, 0x39, 0xff, 0x20, 0xe6, 0xbc, 0x7f, 0x4a, 0xfd, 0xb0, 0xd3, 0xc6, 0xab, 0x10,
|
||||
0xcd, 0x39, 0x2b, 0xe9, 0x58, 0x9e, 0x18, 0xb1, 0x1b, 0x25, 0xfd, 0x82, 0x0c, 0x43, 0xba, 0x6a,
|
||||
0xfc, 0x08, 0x86, 0x34, 0x28, 0xac, 0x6a, 0x02, 0xf8, 0x01, 0x56, 0x73, 0xf4, 0x63, 0xc8, 0x9d,
|
||||
0xb8, 0x74, 0xf8, 0x2a, 0xee, 0x8f, 0x1b, 0xeb, 0x60, 0x4d, 0x66, 0x4f, 0xb0, 0x10, 0x01, 0x99,
|
||||
0x4c, 0x06, 0xf3, 0x89, 0xeb, 0x78, 0xaf, 0xcc, 0xd0, 0xf2, 0xc7, 0x24, 0x54, 0x77, 0x84, 0x4c,
|
||||
0x46, 0xd6, 0x01, 0x37, 0x32, 0xb9, 0x15, 0x0b, 0xcc, 0x53, 0x2b, 0x38, 0x55, 0x11, 0x6b, 0x23,
|
||||
0x0c, 0xc2, 0xf4, 0xcc, 0x0a, 0x4e, 0xd1, 0x5e, 0xa4, 0x9e, 0x42, 0x0b, 0x77, 0xaf, 0xb3, 0x9f,
|
||||
0x90, 0x4f, 0x1d, 0x4a, 0x57, 0xe5, 0x65, 0x0b, 0x27, 0x4d, 0x6c, 0xbb, 0x15, 0x91, 0x5e, 0xa0,
|
||||
0x96, 0x74, 0xa9, 0x9e, 0x5d, 0xf3, 0xd6, 0x0d, 0xd0, 0x13, 0x10, 0x9b, 0x9b, 0xfc, 0x8a, 0xb6,
|
||||
0x98, 0xbf, 0xa9, 0x5c, 0x5e, 0x54, 0xcb, 0xd8, 0x3a, 0xe7, 0xa9, 0xf6, 0x9d, 0x2f, 0x09, 0x2e,
|
||||
0x9e, 0xc4, 0x43, 0xb6, 0xa7, 0x4b, 0x87, 0x96, 0x6b, 0x8e, 0x5c, 0x6b, 0x1c, 0xa8, 0xdf, 0xe6,
|
||||
0xf9, 0xa6, 0xc0, 0x6d, 0xfb, 0xcc, 0x84, 0x6a, 0x50, 0x8e, 0x38, 0x16, 0x39, 0xfe, 0x33, 0xcf,
|
||||
0x93, 0x2c, 0x45, 0x46, 0x9e, 0xa5, 0xca, 0x14, 0x88, 0xa9, 0x9a, 0x1d, 0xc9, 0x57, 0x3c, 0x45,
|
||||
0x75, 0xc8, 0x3b, 0xde, 0x99, 0xe5, 0x3a, 0x91, 0x68, 0x35, 0xb7, 0x2f, 0x2f, 0xaa, 0x80, 0xad,
|
||||
0xf3, 0x8e, 0xb0, 0xe2, 0xd8, 0xcd, 0x18, 0xf7, 0xe8, 0x86, 0xbe, 0x16, 0x78, 0xa8, 0x2d, 0x8f,
|
||||
0x26, 0xb4, 0xf5, 0x17, 0xf2, 0x9f, 0xbe, 0xaa, 0xa6, 0x6a, 0x1e, 0x14, 0x57, 0x37, 0xc7, 0x2a,
|
||||
0x92, 0x9f, 0x2c, 0xc3, 0x0f, 0xc6, 0xc7, 0xac, 0x1d, 0xe8, 0x68, 0x14, 0x90, 0x90, 0xd7, 0x6e,
|
||||
0x06, 0x47, 0xb3, 0x55, 0xf5, 0xa6, 0x39, 0x75, 0xa2, 0x7a, 0x6f, 0x43, 0xf1, 0x9c, 0x58, 0xaf,
|
||||
0x44, 0x7a, 0x82, 0xf5, 0x02, 0x33, 0xb0, 0xd4, 0xa2, 0xfd, 0x7e, 0x05, 0x39, 0x51, 0x76, 0xe8,
|
||||
0x53, 0x28, 0x0c, 0xe9, 0xcc, 0x0b, 0xd7, 0x6f, 0xd2, 0x4e, 0x52, 0xd2, 0xb8, 0x27, 0xaa, 0xa5,
|
||||
0x15, 0xb0, 0xb6, 0x0f, 0xf9, 0xc8, 0x85, 0x1e, 0xae, 0xf4, 0x56, 0x6e, 0xde, 0xba, 0xd2, 0x02,
|
||||
0x9b, 0x8f, 0xd4, 0x99, 0xe5, 0xce, 0xc4, 0x41, 0x65, 0x2c, 0x26, 0xb5, 0xbf, 0x48, 0x90, 0xc7,
|
||||
0xac, 0xaa, 0x83, 0x30, 0xf1, 0xbc, 0x65, 0x37, 0x9e, 0xb7, 0xb5, 0x10, 0xa4, 0x37, 0x84, 0x20,
|
||||
0xee, 0xe5, 0x4c, 0xa2, 0x97, 0xd7, 0x2c, 0xc9, 0xdf, 0xc9, 0x52, 0x36, 0xc1, 0x52, 0xcc, 0x72,
|
||||
0x2e, 0xc1, 0xf2, 0x43, 0xd8, 0x1e, 0xf9, 0x74, 0xc2, 0x1f, 0x30, 0xea, 0x5b, 0xfe, 0x3c, 0x52,
|
||||
0xdb, 0x2d, 0x66, 0x1d, 0xc4, 0xc6, 0x4d, 0x82, 0x0b, 0x9b, 0x04, 0xd7, 0x4c, 0x28, 0x60, 0x12,
|
||||
0x4c, 0xa9, 0x17, 0x90, 0xf7, 0xe6, 0x84, 0x40, 0xb6, 0xad, 0xd0, 0xe2, 0x19, 0x95, 0x31, 0x1f,
|
||||
0xa3, 0x47, 0x20, 0x0f, 0xa9, 0x2d, 0xf2, 0xd9, 0x4e, 0xb6, 0xb4, 0xe1, 0xfb, 0xd4, 0x6f, 0x51,
|
||||
0x9b, 0x60, 0x0e, 0xa8, 0x4d, 0x41, 0x69, 0xd3, 0x73, 0xcf, 0xa5, 0x96, 0x7d, 0xe4, 0xd3, 0x31,
|
||||
0x7b, 0x65, 0xde, 0xab, 0x96, 0x6d, 0xc8, 0xcf, 0xb8, 0x9e, 0xc6, 0x7a, 0xf9, 0x60, 0xb3, 0x63,
|
||||
0xaf, 0x06, 0x12, 0xe2, 0x1b, 0x6b, 0x51, 0xb4, 0xb4, 0xf6, 0x6f, 0x09, 0xb4, 0xf7, 0xa3, 0x51,
|
||||
0x07, 0x4a, 0x02, 0x69, 0x26, 0x7e, 0xac, 0xea, 0x3f, 0x64, 0x23, 0x2e, 0x16, 0x30, 0x5b, 0x8d,
|
||||
0xbf, 0xf3, 0x55, 0x4e, 0x68, 0x67, 0xe6, 0x87, 0x69, 0xe7, 0x23, 0xd8, 0x12, 0xaa, 0x11, 0xff,
|
||||
0x83, 0xc8, 0x7a, 0xa6, 0x9e, 0x6d, 0xa6, 0x95, 0x14, 0x2e, 0x9f, 0x88, 0x36, 0x13, 0x7f, 0x20,
|
||||
0x77, 0x37, 0xe4, 0x45, 0x54, 0xc7, 0x5a, 0x4c, 0x6a, 0x39, 0x90, 0x8f, 0x1c, 0x6f, 0x5c, 0xab,
|
||||
0x42, 0xb6, 0xe5, 0x52, 0x7e, 0x9f, 0x39, 0x9f, 0x58, 0x01, 0xf5, 0x62, 0x9a, 0xc5, 0x6c, 0xef,
|
||||
0xaf, 0x69, 0x28, 0x25, 0x7e, 0x1f, 0xd1, 0x53, 0xd8, 0x6e, 0x1d, 0x1e, 0xf7, 0x07, 0x06, 0x36,
|
||||
0x5b, 0xbd, 0xee, 0x7e, 0xe7, 0x40, 0x49, 0x69, 0x77, 0x16, 0x4b, 0x5d, 0x9d, 0xac, 0x41, 0x9b,
|
||||
0x7f, 0x86, 0x55, 0xc8, 0x76, 0xba, 0x6d, 0xe3, 0x77, 0x8a, 0xa4, 0xdd, 0x5c, 0x2c, 0x75, 0x25,
|
||||
0x01, 0x14, 0xcf, 0xec, 0xc7, 0x50, 0xe6, 0x00, 0xf3, 0xf8, 0xa8, 0xdd, 0x18, 0x18, 0x4a, 0x5a,
|
||||
0xd3, 0x16, 0x4b, 0x7d, 0xf7, 0x2a, 0x2e, 0xba, 0x92, 0xfb, 0x90, 0xc7, 0xc6, 0x6f, 0x8f, 0x8d,
|
||||
0xfe, 0x40, 0xc9, 0x68, 0xbb, 0x8b, 0xa5, 0x8e, 0x12, 0xc0, 0xb8, 0xe3, 0x1e, 0x42, 0x01, 0x1b,
|
||||
0xfd, 0xa3, 0x5e, 0xb7, 0x6f, 0x28, 0xb2, 0xf6, 0x7f, 0x8b, 0xa5, 0x7e, 0x63, 0x03, 0x15, 0x15,
|
||||
0xf1, 0x4f, 0x61, 0xa7, 0xdd, 0xfb, 0xbc, 0x7b, 0xd8, 0x6b, 0xb4, 0xcd, 0x23, 0xdc, 0x3b, 0xc0,
|
||||
0x46, 0xbf, 0xaf, 0x64, 0xb5, 0xea, 0x62, 0xa9, 0xdf, 0x4e, 0xe0, 0xaf, 0xd5, 0xe4, 0x5d, 0x90,
|
||||
0x8f, 0x3a, 0xdd, 0x03, 0x25, 0xa7, 0xdd, 0x58, 0x2c, 0xf5, 0x0f, 0x12, 0x50, 0x46, 0x2a, 0xcb,
|
||||
0xb8, 0x75, 0xd8, 0xeb, 0x1b, 0x4a, 0xfe, 0x5a, 0xc6, 0x9c, 0xec, 0xbd, 0xdf, 0x03, 0xba, 0xfe,
|
||||
0x83, 0x8d, 0x1e, 0x80, 0xdc, 0xed, 0x75, 0x0d, 0x25, 0x25, 0xf2, 0xbf, 0x8e, 0xe8, 0x52, 0x8f,
|
||||
0xa0, 0x1a, 0x64, 0x0e, 0xbf, 0xf8, 0x4c, 0x91, 0xb4, 0xff, 0x5f, 0x2c, 0xf5, 0x5b, 0xd7, 0x41,
|
||||
0x87, 0x5f, 0x7c, 0xb6, 0x47, 0xa1, 0x94, 0x0c, 0x5c, 0x83, 0xc2, 0x73, 0x63, 0xd0, 0x68, 0x37,
|
||||
0x06, 0x0d, 0x25, 0x25, 0x8e, 0x14, 0xbb, 0x9f, 0x93, 0xd0, 0xe2, 0x3d, 0x7a, 0x07, 0xb2, 0x5d,
|
||||
0xe3, 0x85, 0x81, 0x15, 0x49, 0xdb, 0x59, 0x2c, 0xf5, 0xad, 0x18, 0xd0, 0x25, 0x67, 0xc4, 0x47,
|
||||
0x15, 0xc8, 0x35, 0x0e, 0x3f, 0x6f, 0xbc, 0xec, 0x2b, 0x69, 0x0d, 0x2d, 0x96, 0xfa, 0x76, 0xec,
|
||||
0x6e, 0xb8, 0xe7, 0xd6, 0x3c, 0xd8, 0xfb, 0x8f, 0x04, 0xe5, 0xe4, 0x33, 0x89, 0x2a, 0x20, 0xef,
|
||||
0x77, 0x0e, 0x8d, 0x78, 0xbb, 0xa4, 0x8f, 0x8d, 0x51, 0x1d, 0x8a, 0xed, 0x0e, 0x36, 0x5a, 0x83,
|
||||
0x1e, 0x7e, 0x19, 0xe7, 0x92, 0x04, 0xb5, 0x1d, 0x9f, 0xd7, 0xff, 0x1c, 0xfd, 0x1c, 0xca, 0xfd,
|
||||
0x97, 0xcf, 0x0f, 0x3b, 0xdd, 0xdf, 0x98, 0x3c, 0x62, 0x5a, 0x7b, 0xb4, 0x58, 0xea, 0xf7, 0x36,
|
||||
0xc0, 0x64, 0xea, 0x93, 0xa1, 0x15, 0x12, 0xbb, 0x2f, 0x9e, 0x7c, 0xe6, 0x2c, 0x48, 0xa8, 0x05,
|
||||
0x3b, 0xf1, 0xd2, 0xf5, 0x66, 0x19, 0xed, 0xe3, 0xc5, 0x52, 0xff, 0xf0, 0x7b, 0xd7, 0xaf, 0x76,
|
||||
0x2f, 0x48, 0xe8, 0x01, 0xe4, 0xa3, 0x20, 0x71, 0x25, 0x25, 0x97, 0x46, 0x0b, 0xf6, 0xfe, 0x2c,
|
||||
0x41, 0x71, 0xa5, 0x66, 0x8c, 0xf0, 0x6e, 0xcf, 0x34, 0x30, 0xee, 0xe1, 0x98, 0x81, 0x95, 0xb3,
|
||||
0x4b, 0xf9, 0x10, 0xdd, 0x83, 0xfc, 0x81, 0xd1, 0x35, 0x70, 0xa7, 0x15, 0x37, 0xc6, 0x0a, 0x72,
|
||||
0x40, 0x3c, 0xe2, 0x3b, 0x43, 0xf4, 0x11, 0x94, 0xbb, 0x3d, 0xb3, 0x7f, 0xdc, 0x7a, 0x16, 0xa7,
|
||||
0xce, 0xf7, 0x4f, 0x84, 0xea, 0xcf, 0x86, 0xa7, 0x9c, 0xcf, 0x3d, 0xd6, 0x43, 0x2f, 0x1a, 0x87,
|
||||
0x9d, 0xb6, 0x80, 0x66, 0x34, 0x75, 0xb1, 0xd4, 0x6f, 0xae, 0xa0, 0xd1, 0x1b, 0xce, 0xb0, 0x7b,
|
||||
0x36, 0x54, 0xbe, 0x5f, 0xb7, 0x90, 0x0e, 0xb9, 0xc6, 0xd1, 0x91, 0xd1, 0x6d, 0xc7, 0xa7, 0x5f,
|
||||
0xfb, 0x1a, 0xd3, 0x29, 0xf1, 0x6c, 0x86, 0xd8, 0xef, 0xe1, 0x03, 0x63, 0x10, 0x1f, 0x7e, 0x8d,
|
||||
0xd8, 0xa7, 0xec, 0x7f, 0xab, 0x59, 0x7f, 0xf3, 0x4d, 0x25, 0xf5, 0xf6, 0x9b, 0x4a, 0xea, 0xcd,
|
||||
0x65, 0x45, 0x7a, 0x7b, 0x59, 0x91, 0xfe, 0x71, 0x59, 0x49, 0x7d, 0x7b, 0x59, 0x91, 0xfe, 0xf8,
|
||||
0xae, 0x92, 0xfa, 0xea, 0x5d, 0x45, 0x7a, 0xfb, 0xae, 0x92, 0xfa, 0xdb, 0xbb, 0x4a, 0xea, 0x24,
|
||||
0xc7, 0x35, 0xef, 0xd3, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x99, 0x31, 0x0e, 0x75, 0x0f,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Hello) Marshal() (dAtA []byte, err error) {
|
||||
@@ -1456,6 +1460,15 @@ func (m *FileInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.VersionHash) > 0 {
|
||||
i -= len(m.VersionHash)
|
||||
copy(dAtA[i:], m.VersionHash)
|
||||
i = encodeVarintBep(dAtA, i, uint64(len(m.VersionHash)))
|
||||
i--
|
||||
dAtA[i] = 0x3e
|
||||
i--
|
||||
dAtA[i] = 0xca
|
||||
}
|
||||
if m.LocalFlags != 0 {
|
||||
i = encodeVarintBep(dAtA, i, uint64(m.LocalFlags))
|
||||
i--
|
||||
@@ -2210,6 +2223,10 @@ func (m *FileInfo) ProtoSize() (n int) {
|
||||
if m.LocalFlags != 0 {
|
||||
n += 2 + sovBep(uint64(m.LocalFlags))
|
||||
}
|
||||
l = len(m.VersionHash)
|
||||
if l > 0 {
|
||||
n += 2 + l + sovBep(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -3913,6 +3930,40 @@ func (m *FileInfo) Unmarshal(dAtA []byte) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1001:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field VersionHash", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowBep
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthBep
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthBep
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.VersionHash = append(m.VersionHash[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.VersionHash == nil {
|
||||
m.VersionHash = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipBep(dAtA[iNdEx:])
|
||||
|
||||
@@ -119,6 +119,9 @@ message FileInfo {
|
||||
// received (we make sure to zero it), nonetheless we need it on our
|
||||
// struct and to be able to serialize it to/from the database.
|
||||
uint32 local_flags = 1000;
|
||||
// The version_hash is an implementation detail and not part of the wire
|
||||
// format.
|
||||
bytes version_hash = 1001;
|
||||
|
||||
bool deleted = 6;
|
||||
bool invalid = 7 [(gogoproto.customname) = "RawInvalid"];
|
||||
|
||||
@@ -23,6 +23,31 @@ const (
|
||||
Version13HelloMagic uint32 = 0x9F79BC40 // old
|
||||
)
|
||||
|
||||
// FileIntf is the set of methods implemented by both FileInfo and
|
||||
// db.FileInfoTruncated.
|
||||
type FileIntf interface {
|
||||
FileSize() int64
|
||||
FileName() string
|
||||
FileLocalFlags() uint32
|
||||
IsDeleted() bool
|
||||
IsInvalid() bool
|
||||
IsIgnored() bool
|
||||
IsUnsupported() bool
|
||||
MustRescan() bool
|
||||
IsReceiveOnlyChanged() bool
|
||||
IsDirectory() bool
|
||||
IsSymlink() bool
|
||||
ShouldConflict() bool
|
||||
HasPermissionBits() bool
|
||||
SequenceNo() int64
|
||||
BlockSize() int
|
||||
FileVersion() Vector
|
||||
FileType() FileInfoType
|
||||
FilePermissions() uint32
|
||||
FileModifiedBy() ShortID
|
||||
ModTime() time.Time
|
||||
}
|
||||
|
||||
func (m Hello) Magic() uint32 {
|
||||
return HelloMessageMagic
|
||||
}
|
||||
@@ -139,7 +164,7 @@ func (f FileInfo) FileModifiedBy() ShortID {
|
||||
|
||||
// WinsConflict returns true if "f" is the one to choose when it is in
|
||||
// conflict with "other".
|
||||
func (f FileInfo) WinsConflict(other FileInfo) bool {
|
||||
func WinsConflict(f, other FileIntf) bool {
|
||||
// If only one of the files is invalid, that one loses.
|
||||
if f.IsInvalid() != other.IsInvalid() {
|
||||
return !f.IsInvalid()
|
||||
@@ -164,7 +189,7 @@ func (f FileInfo) WinsConflict(other FileInfo) bool {
|
||||
|
||||
// The modification times were equal. Use the device ID in the version
|
||||
// vector as tie breaker.
|
||||
return f.Version.Compare(other.Version) == ConcurrentGreater
|
||||
return f.FileVersion().Compare(other.FileVersion()) == ConcurrentGreater
|
||||
}
|
||||
|
||||
func (f FileInfo) IsEmpty() bool {
|
||||
@@ -360,3 +385,16 @@ func BlocksHash(bs []BlockInfo) []byte {
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func VectorHash(v Vector) []byte {
|
||||
h := sha256.New()
|
||||
for _, c := range v.Counters {
|
||||
if err := binary.Write(h, binary.BigEndian, c.ID); err != nil {
|
||||
panic("impossible: failed to write c.ID to hash function: " + err.Error())
|
||||
}
|
||||
if err := binary.Write(h, binary.BigEndian, c.Value); err != nil {
|
||||
panic("impossible: failed to write c.Value to hash function: " + err.Error())
|
||||
}
|
||||
}
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@ func TestWinsConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
if !tc[0].WinsConflict(tc[1]) {
|
||||
if !WinsConflict(tc[0], tc[1]) {
|
||||
t.Errorf("%v should win over %v", tc[0], tc[1])
|
||||
}
|
||||
if tc[1].WinsConflict(tc[0]) {
|
||||
if WinsConflict(tc[1], tc[0]) {
|
||||
t.Errorf("%v should not win over %v", tc[1], tc[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,13 +46,13 @@ func DeviceIDFromString(s string) (DeviceID, error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
func DeviceIDFromBytes(bs []byte) DeviceID {
|
||||
func DeviceIDFromBytes(bs []byte) (DeviceID, error) {
|
||||
var n DeviceID
|
||||
if len(bs) != len(n) {
|
||||
panic("incorrect length of byte slice representing device ID")
|
||||
return n, fmt.Errorf("incorrect length of byte slice representing device ID")
|
||||
}
|
||||
copy(n[:], bs)
|
||||
return n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// String returns the canonical string representation of the device ID
|
||||
|
||||
@@ -99,8 +99,10 @@ func TestShortIDString(t *testing.T) {
|
||||
|
||||
func TestDeviceIDFromBytes(t *testing.T) {
|
||||
id0, _ := DeviceIDFromString(formatted)
|
||||
id1 := DeviceIDFromBytes(id0[:])
|
||||
if id1.String() != formatted {
|
||||
id1, err := DeviceIDFromBytes(id0[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if id1.String() != formatted {
|
||||
t.Errorf("Wrong device ID, got %q, want %q", id1, formatted)
|
||||
}
|
||||
}
|
||||
@@ -150,7 +152,10 @@ func TestNewDeviceIDMarshalling(t *testing.T) {
|
||||
|
||||
// Verify it's the same
|
||||
|
||||
if DeviceIDFromBytes(msg2.Test) != id0 {
|
||||
id1, err := DeviceIDFromBytes(msg2.Test)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if id1 != id0 {
|
||||
t.Error("Mismatch in old -> new direction")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,7 +56,11 @@ type SessionInvitation struct {
|
||||
}
|
||||
|
||||
func (i SessionInvitation) String() string {
|
||||
return fmt.Sprintf("%s@%s", syncthingprotocol.DeviceIDFromBytes(i.From), i.AddressString())
|
||||
device := "<invalid>"
|
||||
if address, err := syncthingprotocol.DeviceIDFromBytes(i.From); err == nil {
|
||||
device = address.String()
|
||||
}
|
||||
return fmt.Sprintf("%s@%s", device, i.AddressString())
|
||||
}
|
||||
|
||||
func (i SessionInvitation) GoString() string {
|
||||
|
||||
@@ -108,26 +108,19 @@ func Blocks(ctx context.Context, r io.Reader, blocksize int, sizehint int64, cou
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// Validate quickly validates buf against the cryptohash hash (if len(hash)>0)
|
||||
// and the 32-bit hash weakHash (if not zero). It is satisfied if either hash
|
||||
// matches, or neither is given.
|
||||
func Validate(buf, hash []byte, weakHash uint32) bool {
|
||||
rd := bytes.NewReader(buf)
|
||||
if weakHash != 0 {
|
||||
whf := adler32.New()
|
||||
if _, err := io.Copy(whf, rd); err == nil && whf.Sum32() == weakHash {
|
||||
return true
|
||||
}
|
||||
// Copy error or mismatch, go to next algo.
|
||||
rd.Seek(0, io.SeekStart)
|
||||
return adler32.Checksum(buf) == weakHash
|
||||
}
|
||||
|
||||
if len(hash) > 0 {
|
||||
hf := sha256.New()
|
||||
if _, err := io.Copy(hf, rd); err == nil {
|
||||
// Sum allocates, so let's hope we don't hit this often.
|
||||
return bytes.Equal(hf.Sum(nil), hash)
|
||||
}
|
||||
hbuf := sha256.Sum256(buf)
|
||||
return bytes.Equal(hbuf[:], hash)
|
||||
}
|
||||
|
||||
// Both algos failed or no hashes were specified. Assume it's all good.
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,13 @@ import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
origAdler32 "hash/adler32"
|
||||
mrand "math/rand"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
rollingAdler32 "github.com/chmduquesne/rollinghash/adler32"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
var blocksTestData = []struct {
|
||||
@@ -165,3 +167,43 @@ func TestAdler32Variants(t *testing.T) {
|
||||
hf3.Roll(data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValidate(b *testing.B) {
|
||||
type block struct {
|
||||
data []byte
|
||||
hash [sha256.Size]byte
|
||||
weakhash uint32
|
||||
}
|
||||
var blocks []block
|
||||
const blocksPerType = 100
|
||||
|
||||
r := mrand.New(mrand.NewSource(0x136bea689e851))
|
||||
|
||||
// Valid blocks.
|
||||
for i := 0; i < blocksPerType; i++ {
|
||||
var b block
|
||||
b.data = make([]byte, 128<<10)
|
||||
r.Read(b.data[:])
|
||||
b.hash = sha256.Sum256(b.data[:])
|
||||
b.weakhash = origAdler32.Checksum(b.data[:])
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
// Blocks where the hash matches, but the weakhash doesn't.
|
||||
for i := 0; i < blocksPerType; i++ {
|
||||
var b block
|
||||
b.data = make([]byte, 128<<10)
|
||||
r.Read(b.data[:])
|
||||
b.hash = sha256.Sum256(b.data[:])
|
||||
b.weakhash = 1 // Zeros causes Validate to skip the weakhash.
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, b := range blocks {
|
||||
Validate(b.data[:], b.hash[:], b.weakhash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protoco
|
||||
|
||||
if ignoredParent == "" {
|
||||
// parent isn't ignored, nothing special
|
||||
return w.handleItem(ctx, path, toHashChan, finishedChan, skip)
|
||||
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
|
||||
}
|
||||
|
||||
// Part of current path below the ignored (potential) parent
|
||||
@@ -274,17 +274,22 @@ func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protoco
|
||||
// ignored path isn't actually a parent of the current path
|
||||
if rel == path {
|
||||
ignoredParent = ""
|
||||
return w.handleItem(ctx, path, toHashChan, finishedChan, skip)
|
||||
return w.handleItem(ctx, path, info, toHashChan, finishedChan, skip)
|
||||
}
|
||||
|
||||
// The previously ignored parent directories of the current, not
|
||||
// ignored path need to be handled as well.
|
||||
if err = w.handleItem(ctx, ignoredParent, toHashChan, finishedChan, skip); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, name := range strings.Split(rel, string(fs.PathSeparator)) {
|
||||
// Prepend an empty string to handle ignoredParent without anything
|
||||
// appended in the first iteration.
|
||||
for _, name := range append([]string{""}, strings.Split(rel, string(fs.PathSeparator))...) {
|
||||
ignoredParent = filepath.Join(ignoredParent, name)
|
||||
if err = w.handleItem(ctx, ignoredParent, toHashChan, finishedChan, skip); err != nil {
|
||||
info, err = w.Filesystem.Lstat(ignoredParent)
|
||||
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
|
||||
if err != nil {
|
||||
w.handleError(ctx, "scan", ignoredParent, err, finishedChan)
|
||||
return skip
|
||||
}
|
||||
if err = w.handleItem(ctx, ignoredParent, info, toHashChan, finishedChan, skip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -294,16 +299,9 @@ func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protoco
|
||||
}
|
||||
}
|
||||
|
||||
func (w *walker) handleItem(ctx context.Context, path string, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
|
||||
info, err := w.Filesystem.Lstat(path)
|
||||
// An error here would be weird as we've already gotten to this point, but act on it nonetheless
|
||||
if err != nil {
|
||||
w.handleError(ctx, "scan", path, err, finishedChan)
|
||||
return skip
|
||||
}
|
||||
|
||||
func (w *walker) handleItem(ctx context.Context, path string, info fs.FileInfo, toHashChan chan<- protocol.FileInfo, finishedChan chan<- ScanResult, skip error) error {
|
||||
oldPath := path
|
||||
path, err = w.normalizePath(path, info)
|
||||
path, err := w.normalizePath(path, info)
|
||||
if err != nil {
|
||||
w.handleError(ctx, "normalizing path", oldPath, err, finishedChan)
|
||||
return skip
|
||||
|
||||
@@ -26,6 +26,7 @@ const (
|
||||
benchmarkingDuration = 150 * time.Millisecond
|
||||
defaultImpl = "crypto/sha256"
|
||||
minioImpl = "minio/sha256-simd"
|
||||
Size = cryptoSha256.Size
|
||||
)
|
||||
|
||||
// May be switched out for another implementation
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STDISCOSRV" "1" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "STDISCOSRV" "1" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
stdiscosrv \- Syncthing Discovery Server
|
||||
.
|
||||
@@ -286,18 +286,30 @@ Use of a subdomain name without requiring a port number added to the URL
|
||||
.IP \(bu 2
|
||||
Sharing an SSL certificate with multiple services on the same server
|
||||
.UNINDENT
|
||||
.sp
|
||||
Note that after this configuration, if the proxy uses a valid HTTPS
|
||||
certificate, \fBclients should omit the\fP \fB?id=...\fP \fBparameter from the
|
||||
discovery server URL on their configuration\fP\&. Client\-side validation will be
|
||||
done by checking the visible proxy server’s HTTPS certificate. If, however, the
|
||||
proxy uses a self\-signed or somehow invalid certificate, clients must still set
|
||||
the \fB?id=...\fP parameter with the computed hash of the proxy’s
|
||||
certificate. Using such setup is discouraged and is not covered in this page.
|
||||
Always favour using valid and widely recognised certificates.
|
||||
.SS Requirements
|
||||
.INDENT 0.0
|
||||
.IP \(bu 2
|
||||
Run the discovery server using the \-http flag \fBstdiscosrv \-http\fP\&.
|
||||
Run the discovery server using the \-http flag: \fBstdiscosrv \-http\fP\&.
|
||||
.IP \(bu 2
|
||||
SSL certificate/key configured for the reverse proxy
|
||||
SSL certificate/key configured for the reverse proxy.
|
||||
.IP \(bu 2
|
||||
The “X\-Forwarded\-For” http header must be passed through with the client’s
|
||||
real IP address
|
||||
The “X\-Forwarded\-For” HTTP header must be passed through with the client’s
|
||||
real IP address.
|
||||
.IP \(bu 2
|
||||
The “X\-SSL\-Cert” must be passed through with the PEM\-encoded client SSL
|
||||
certificate
|
||||
The “X\-SSL\-Cert” HTTP header must be passed through with the PEM\-encoded
|
||||
client SSL certificate. This will be present in POST requests and may be empty
|
||||
in GET requests from clients. If you see syncthing\-discosrv outputting
|
||||
\fBno certificates\fP when receiving POST requests, that’s because the proxy
|
||||
is not passing this header through.
|
||||
.IP \(bu 2
|
||||
The proxy must request the client SSL certificate but not require it to be
|
||||
signed by a trusted CA.
|
||||
@@ -372,6 +384,43 @@ server {
|
||||
.sp
|
||||
An example of automating the SSL certificates and reverse\-proxying the Discovery
|
||||
Server and Syncthing using Nginx, \fI\%Let’s Encrypt\fP <\fBhttps://letsencrypt.org/\fP> and Docker can be found \fI\%here\fP <\fBhttps://forum.syncthing.net/t/docker-syncthing-and-syncthing-discovery-behind-nginx-reverse-proxy-with-lets-encrypt/6880\fP>\&.
|
||||
.SS Apache
|
||||
.sp
|
||||
The following lines must be added to the configuration:
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
SSLProxyEngine On
|
||||
SSLVerifyClient optional_no_ca
|
||||
RequestHeader set X\-SSL\-Cert "%{SSL_CLIENT_CERT}s"
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
The following was observed to not be required at least under
|
||||
Apache httpd 2.4.38, as the proxy module adds the needed header by default.
|
||||
If you need to explicitly add the following directive, make sure to issue
|
||||
\fBa2enmod remoteip\fP first. Then, add the following to your Apache httpd
|
||||
configuration:
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
RemoteIPHeader X\-Forwarded\-For
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.sp
|
||||
For more details, see also the recommendations in the
|
||||
\fI\%Reverse Proxy Setup\fP <\fBhttps://docs.syncthing.net/users/reverseproxy.html\fP>
|
||||
page. Note that that page is directed at setting up a proxy for the
|
||||
Syncthing web UI. You should do the proper path and port adjustments to proxying
|
||||
the discovery server and your particular setup.
|
||||
.SH SEE ALSO
|
||||
.sp
|
||||
\fBsyncthing\-networking(7)\fP, \fBsyncthing\-faq(7)\fP
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STRELAYSRV" "1" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "STRELAYSRV" "1" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
strelaysrv \- Syncthing Relay Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
@@ -38,8 +38,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.ft C
|
||||
$HOME/.config/syncthing
|
||||
$HOME/Library/Application Support/Syncthing
|
||||
%AppData%/Syncthing
|
||||
%localappdata%/Syncthing
|
||||
%LOCALAPPDATA%/Syncthing
|
||||
.ft P
|
||||
.fi
|
||||
.UNINDENT
|
||||
@@ -49,9 +48,9 @@ $HOME/Library/Application Support/Syncthing
|
||||
Syncthing uses a single directory to store configuration, crypto keys
|
||||
and index caches. The location defaults to \fB$HOME/.config/syncthing\fP
|
||||
(Unix\-like), \fB$HOME/Library/Application Support/Syncthing\fP (Mac),
|
||||
\fB%AppData%/Syncthing\fP (Windows XP) or \fB%LocalAppData%/Syncthing\fP
|
||||
(Windows 7+). It can be changed at runtime using the \fB\-home\fP flag. In this
|
||||
directory the following files are located:
|
||||
or \fB%LOCALAPPDATA%/Syncthing\fP (Windows). It can be changed at runtime
|
||||
using the \fB\-home\fP flag. In this directory the following files are
|
||||
located:
|
||||
.INDENT 0.0
|
||||
.TP
|
||||
.B \fBconfig.xml\fP
|
||||
@@ -81,14 +80,14 @@ The following shows an example of the default configuration file (IDs will diffe
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<configuration version="26">
|
||||
<folder id="zj2AA\-q55a7" label="Default Folder" path="/Users/jb/Sync/" type="sendreceive" rescanIntervalS="60" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
|
||||
<device id="3LT2GA5\-CQI4XJM\-WTZ264P\-MLOGMHL\-MCRLDNT\-MZV4RD3\-KA745CL\-OGAERQZ"></device>
|
||||
<configuration version="30">
|
||||
<folder id="default" label="Default Folder" path="/Users/jb/Sync/" type="sendreceive" rescanIntervalS="3600" fsWatcherEnabled="true" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
|
||||
<filesystemType>basic</filesystemType>
|
||||
<device id="3LT2GA5\-CQI4XJM\-WTZ264P\-MLOGMHL\-MCRLDNT\-MZV4RD3\-KA745CL\-OGAERQZ"></device>
|
||||
<minDiskFree unit="%">1</minDiskFree>
|
||||
<versioning></versioning>
|
||||
<copiers>0</copiers>
|
||||
<pullers>0</pullers>
|
||||
<pullerMaxPendingKiB>0</pullerMaxPendingKiB>
|
||||
<hashers>0</hashers>
|
||||
<order>random</order>
|
||||
<ignoreDelete>false</ignoreDelete>
|
||||
@@ -100,18 +99,26 @@ The following shows an example of the default configuration file (IDs will diffe
|
||||
<paused>false</paused>
|
||||
<weakHashThresholdPct>25</weakHashThresholdPct>
|
||||
<markerName>.stfolder</markerName>
|
||||
<copyOwnershipFromParent>false</copyOwnershipFromParent>
|
||||
<modTimeWindowS>0</modTimeWindowS>
|
||||
</folder>
|
||||
<device id="3LT2GA5\-CQI4XJM\-WTZ264P\-MLOGMHL\-MCRLDNT\-MZV4RD3\-KA745CL\-OGAERQZ" name="syno" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
|
||||
<address>dynamic</address>
|
||||
<paused>false</paused>
|
||||
<autoAcceptFolders>false</autoAcceptFolders>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<maxRequestKiB>0</maxRequestKiB>
|
||||
</device>
|
||||
<gui enabled="true" tls="false" debugging="false">
|
||||
<address>127.0.0.1:8384</address>
|
||||
<apikey>k1dnz1Dd0rzTBjjFFh7CXPnrF12C49B1</apikey>
|
||||
<theme>default</theme>
|
||||
</gui>
|
||||
<ldap></ldap>
|
||||
<options>
|
||||
<listenAddress>default</listenAddress>
|
||||
<listenAddress>tcp://0.0.0.0:8384</listenAddress>
|
||||
<listenAddress>dynamic+https://relays.syncthing.net/endpoint</listenAddress>
|
||||
<globalAnnounceServer>default</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>true</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
@@ -129,7 +136,7 @@ The following shows an example of the default configuration file (IDs will diffe
|
||||
<natTimeoutSeconds>10</natTimeoutSeconds>
|
||||
<urAccepted>0</urAccepted>
|
||||
<urSeen>0</urSeen>
|
||||
<urUniqueID>LFWe2vn3</urUniqueID>
|
||||
<urUniqueID></urUniqueID>
|
||||
<urURL>https://data.syncthing.net/newdata</urURL>
|
||||
<urPostInsecurely>false</urPostInsecurely>
|
||||
<urInitialDelayS>1800</urInitialDelayS>
|
||||
@@ -145,11 +152,16 @@ The following shows an example of the default configuration file (IDs will diffe
|
||||
<overwriteRemoteDeviceNamesOnConnect>false</overwriteRemoteDeviceNamesOnConnect>
|
||||
<tempIndexMinBlocks>10</tempIndexMinBlocks>
|
||||
<trafficClass>0</trafficClass>
|
||||
<stunServer>default</stunServer>
|
||||
<stunKeepaliveSeconds>24</stunKeepaliveSeconds>
|
||||
<defaultFolderPath>~</defaultFolderPath>
|
||||
<minHomeDiskFreePct>0</minHomeDiskFreePct>
|
||||
<setLowPriority>true</setLowPriority>
|
||||
<maxFolderConcurrency>0</maxFolderConcurrency>
|
||||
<crashReportingURL>https://crash.syncthing.net/newcrash</crashReportingURL>
|
||||
<crashReportingEnabled>true</crashReportingEnabled>
|
||||
<stunKeepaliveStartS>180</stunKeepaliveStartS>
|
||||
<stunKeepaliveMinS>20</stunKeepaliveMinS>
|
||||
<stunServer>default</stunServer>
|
||||
<databaseTuning>auto</databaseTuning>
|
||||
<maxConcurrentIncomingRequestKiB>0</maxConcurrentIncomingRequestKiB>
|
||||
</options>
|
||||
</configuration>
|
||||
.ft P
|
||||
@@ -162,10 +174,11 @@ The following shows an example of the default configuration file (IDs will diffe
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<configuration version="26">
|
||||
<configuration version="30">
|
||||
<folder></folder>
|
||||
<device></device>
|
||||
<gui></gui>
|
||||
<ldap></ldap>
|
||||
<options></options>
|
||||
<ignoredDevice>5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU</ignoredDevice>
|
||||
<ignoredFolder>bd7q3\-zskm5</ignoredFolder>
|
||||
@@ -203,13 +216,13 @@ logged, but there will be no dialog about it in the web GUI.
|
||||
.sp
|
||||
.nf
|
||||
.ft C
|
||||
<folder id="zj2AA\-q55a7" label="Default Folder" path="/Users/jb/Sync/" type="sendreceive" rescanIntervalS="60" fsWatcherEnabled="false" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
|
||||
<device id="3LT2GA5\-CQI4XJM\-WTZ264P\-MLOGMHL\-MCRLDNT\-MZV4RD3\-KA745CL\-OGAERQZ"></device>
|
||||
<folder id="default" label="Default Folder" path="/Users/jb/Sync/" type="sendreceive" rescanIntervalS="3600" fsWatcherEnabled="true" fsWatcherDelayS="10" ignorePerms="false" autoNormalize="true">
|
||||
<filesystemType>basic</filesystemType>
|
||||
<device id="3LT2GA5\-CQI4XJM\-WTZ264P\-MLOGMHL\-MCRLDNT\-MZV4RD3\-KA745CL\-OGAERQZ"></device>
|
||||
<minDiskFree unit="%">1</minDiskFree>
|
||||
<versioning></versioning>
|
||||
<copiers>0</copiers>
|
||||
<pullers>0</pullers>
|
||||
<pullerMaxPendingKiB>0</pullerMaxPendingKiB>
|
||||
<hashers>0</hashers>
|
||||
<order>random</order>
|
||||
<ignoreDelete>false</ignoreDelete>
|
||||
@@ -221,6 +234,8 @@ logged, but there will be no dialog about it in the web GUI.
|
||||
<paused>false</paused>
|
||||
<weakHashThresholdPct>25</weakHashThresholdPct>
|
||||
<markerName>.stfolder</markerName>
|
||||
<copyOwnershipFromParent>false</copyOwnershipFromParent>
|
||||
<modTimeWindowS>0</modTimeWindowS>
|
||||
</folder>
|
||||
.ft P
|
||||
.fi
|
||||
@@ -370,8 +385,10 @@ sparse files will not be created.
|
||||
.TP
|
||||
.B disableTempIndexes
|
||||
By default, devices exchange information about blocks available in
|
||||
transfers that are still in progress. When set to true, such information
|
||||
is not exchanged for this folder.
|
||||
transfers that are still in progress, which allows other devices to
|
||||
download parts of files that are not yet fully downloaded on your own
|
||||
device, essentially making transfers more torrent like. When set to
|
||||
true, such information is not exchanged for this folder.
|
||||
.TP
|
||||
.B paused
|
||||
True if this folder is (temporarily) suspended.
|
||||
@@ -406,10 +423,15 @@ what you’re doing.
|
||||
.ft C
|
||||
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="2CYF2WQ\-AKZO2QZ\-JAKWLYD\-AGHMQUM\-BGXUOIS\-GYILW34\-HJG3DUK\-LRRYQAR">
|
||||
<address>dynamic</address>
|
||||
<paused>false</paused>
|
||||
<autoAcceptFolders>false</autoAcceptFolders>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<maxRecvKbps>0</maxRecvKbps>
|
||||
<maxRequestKiB>0</maxRequestKiB>
|
||||
</device>
|
||||
<device id="2CYF2WQ\-AKZO2QZ\-JAKWLYD\-AGHMQUM\-BGXUOIS\-GYILW34\-HJG3DUK\-LRRYQAR" name="syno local" compression="metadata" introducer="false">
|
||||
<device id="2CYF2WQ\-AKZO2QZ\-JAKWLYD\-AGHMQUM\-BGXUOIS\-GYILW34\-HJG3DUK\-LRRYQAR" name="syno local" compression="metadata" introducer="false" skipIntroductionRemovals="false" introducedBy="">
|
||||
<address>tcp://192.0.2.1:22001</address>
|
||||
<paused>true<paused>
|
||||
<paused>true</paused>
|
||||
<allowedNetwork>192.168.0.0/16</allowedNetwork>
|
||||
<autoAcceptFolders>false</autoAcceptFolders>
|
||||
<maxSendKbps>100</maxSendKbps>
|
||||
@@ -694,7 +716,8 @@ Skip verification (true or false).
|
||||
.nf
|
||||
.ft C
|
||||
<options>
|
||||
<listenAddress>default</listenAddress>
|
||||
<listenAddress>tcp://0.0.0.0:8384</listenAddress>
|
||||
<listenAddress>dynamic+https://relays.syncthing.net/endpoint</listenAddress>
|
||||
<globalAnnounceServer>default</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>true</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
@@ -711,21 +734,33 @@ Skip verification (true or false).
|
||||
<natRenewalMinutes>30</natRenewalMinutes>
|
||||
<natTimeoutSeconds>10</natTimeoutSeconds>
|
||||
<urAccepted>0</urAccepted>
|
||||
<urSeen>0</urSeen>
|
||||
<urUniqueID></urUniqueID>
|
||||
<urURL>https://data.syncthing.net/newdata</urURL>
|
||||
<urPostInsecurely>false</urPostInsecurely>
|
||||
<urInitialDelayS>1800</urInitialDelayS>
|
||||
<restartOnWakeup>true</restartOnWakeup>
|
||||
<autoUpgradeIntervalH>12</autoUpgradeIntervalH>
|
||||
<upgradeToPreReleases>false</upgradeToPreReleases>
|
||||
<keepTemporariesH>24</keepTemporariesH>
|
||||
<cacheIgnoredFiles>false</cacheIgnoredFiles>
|
||||
<progressUpdateIntervalS>5</progressUpdateIntervalS>
|
||||
<limitBandwidthInLan>false</limitBandwidthInLan>
|
||||
<minHomeDiskFree unit="%">1</minHomeDiskFree>
|
||||
<releasesURL>https://api.github.com/repos/syncthing/syncthing/releases?per_page=30</releasesURL>
|
||||
<releasesURL>https://upgrades.syncthing.net/meta.json</releasesURL>
|
||||
<overwriteRemoteDeviceNamesOnConnect>false</overwriteRemoteDeviceNamesOnConnect>
|
||||
<tempIndexMinBlocks>10</tempIndexMinBlocks>
|
||||
<trafficClass>0</trafficClass>
|
||||
<defaultFolderPath>~</defaultFolderPath>
|
||||
<setLowPriority>true</setLowPriority>
|
||||
<maxFolderConcurrency>0</maxFolderConcurrency>
|
||||
<crashReportingURL>https://crash.syncthing.net/newcrash</crashReportingURL>
|
||||
<crashReportingEnabled>true</crashReportingEnabled>
|
||||
<stunKeepaliveStartS>180</stunKeepaliveStartS>
|
||||
<stunKeepaliveMinS>20</stunKeepaliveMinS>
|
||||
<stunServer>default</stunServer>
|
||||
<databaseTuning>auto</databaseTuning>
|
||||
<maxConcurrentIncomingRequestKiB>0</maxConcurrentIncomingRequestKiB>
|
||||
</options>
|
||||
.ft P
|
||||
.fi
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
@@ -64,34 +64,34 @@ Sync uses an undocumented, closed protocol with unknown security properties.
|
||||
The following things are \fIalways\fP synchronized:
|
||||
.INDENT 0.0
|
||||
.IP \(bu 2
|
||||
File Contents
|
||||
File contents
|
||||
.IP \(bu 2
|
||||
File Modification Times
|
||||
File modification times
|
||||
.UNINDENT
|
||||
.sp
|
||||
The following may be synchronized or not, depending:
|
||||
.INDENT 0.0
|
||||
.IP \(bu 2
|
||||
File Permissions (When supported by file system. On Windows, only the
|
||||
read only bit is synchronized.)
|
||||
File permissions (when supported by file system; on Windows only the
|
||||
read only bit is synchronized)
|
||||
.IP \(bu 2
|
||||
Symbolic Links (synced, except on Windows, but never followed.)
|
||||
Symbolic links (synced, except on Windows, but never followed)
|
||||
.UNINDENT
|
||||
.sp
|
||||
The following are \fInot\fP synchronized;
|
||||
.INDENT 0.0
|
||||
.IP \(bu 2
|
||||
File or Directory Owners and Groups (not preserved)
|
||||
File or directory owners and Groups (not preserved)
|
||||
.IP \(bu 2
|
||||
Directory Modification Times (not preserved)
|
||||
Directory modification times (not preserved)
|
||||
.IP \(bu 2
|
||||
Hard Links (followed, not preserved)
|
||||
Hard links and Windows directory junctions (followed, not preserved)
|
||||
.IP \(bu 2
|
||||
Extended Attributes, Resource Forks (not preserved)
|
||||
Extended attributes, resource forks (not preserved)
|
||||
.IP \(bu 2
|
||||
Windows, POSIX or NFS ACLs (not preserved)
|
||||
.IP \(bu 2
|
||||
Devices, FIFOs, and Other Specials (ignored)
|
||||
Devices, FIFOs, and other specials (ignored)
|
||||
.IP \(bu 2
|
||||
Sparse file sparseness (will become sparse, when supported by the OS & filesystem)
|
||||
.UNINDENT
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v4
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-NETWORKING" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-NETWORKING" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-networking \- Firewall Setup
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-RELAY" "7" "May 05, 2020" "v1" "Syncthing"
|
||||
.TH "SYNCTHING-RELAY" "7" "May 16, 2020" "v1" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-relay \- Relay Protocol v1
|
||||
.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user