Compare commits

...

24 Commits

Author SHA1 Message Date
Jakob Borg
459a3dc58c Update docs & translations 2016-02-08 17:39:45 +01:00
Jakob Borg
194a8b0922 Merge commit 'a7a9d7d' into v0.12
* commit 'a7a9d7d':
  Return correct content type for /rest/events
  Rename RawAPIKey -> APIKey in GUIConfiguration
  Add -paths option to print config, key, database paths
  Clean up error handling a bit in protocol.readMessage
  Remove old reference to moved protocol
  Support multiple API keys (command-line and config) (fixes #2747)
2016-02-08 17:38:52 +01:00
Jakob Borg
8a8336ae08 Merge branch 'master' into v0.12
* master:
  Update docs & translations
  build.sh prerelease should rebuild author credits in about dialog
  Use dialer in relay checks (fixes #2732)
  Handle null case for invalid ng-model value (fixes #2392)
  Return status code 307 instead of 302 when redirecting from HTTP to HTTPS
  Benchmark for single database update
  Add a CORS handler to deal with preflight OPTIONS requests
  Add letiemble
  Update docs and translations
  Correct order of pkill(1) arguments in debian script (fixes #2728)
2016-01-31 10:39:10 +01:00
Jakob Borg
458e0b3b8b Merge branch 'master' into v0.12
* master:
  Update docs and translations
  Model.internalScanFolder: Don't ignore special .stfolder and .stignore files.
  Model.internalScanFolderSubs: Scan only requested subs.
  A couple of protocol tests
  Humanize serialization of version vectors (again)
  FetchLatestReleases: fix the error log message
  Add postinst script to restart after upgrade
  Templatize Debian files
  Don't require restart for usage reporting changes (fixes #2704)
  RLimit comment typo
2016-01-24 08:09:28 +01:00
Jakob Borg
9758dc6422 Update docs and translations 2016-01-24 08:08:08 +01:00
Jakob Borg
e4a9fb8a27 Merge master into v0.12 (no changes) 2016-01-17 22:13:34 +01:00
Jakob Borg
f0473fde17 Docs and translations update 2016-01-17 10:57:20 +01:00
Jakob Borg
e2980a5210 Don't crash on folder remove while pulling (fixes #2705) 2016-01-17 10:55:39 +01:00
Jakob Borg
764da14440 Improve API/GUI shutdown handling (fixes #2694)
This fixes both a race condition where we could assign s.stop from one
goroutine and then read it from another without locking, and handles the
fact that listener may be nil at shutdown if we've had a bad
CommitConfiguration call in the meantime.
2016-01-17 10:55:26 +01:00
Audrius Butkevicius
7da6c627fe Handle race within the job queue (fixes #1263) 2016-01-17 10:55:19 +01:00
Jakob Borg
0a092b5b7f Codesign binaries in Mac OS X distribution packages 2016-01-17 10:54:42 +01:00
Jakob Borg
9f8af2327d Update docs & translations 2016-01-13 21:11:02 +01:00
kluppy
345e24142e Update 'Edit' menu to 'Action' menu (fixes #2662) 2016-01-13 21:09:17 +01:00
kluppy
0fdd03ddee Fix location of build translation scripts. 2016-01-13 21:08:34 +01:00
Jakob Borg
a0fa288cb6 Undo the hash algorithm additions; retain flag checks 2016-01-12 16:03:06 +01:00
Jakob Borg
70bac24832 Always run relaying when enabled (fixes #2665) 2016-01-12 16:02:59 +01:00
Jakob Borg
1df40fbdeb Mend protocol tests, for sure 2016-01-12 16:02:38 +01:00
Jakob Borg
91e9ffff85 Rebuild assets 2016-01-12 14:15:00 +01:00
Jakob Borg
853df14e2f Improve protocol tests, close handling 2016-01-12 10:08:50 +01:00
Jakob Borg
e17a772bb6 Don't leak sendIndexes on disconnect (fixes #2589)
Adds a Closed() method on protocol.Connection and clears up
wireformatConnection a little too.
2016-01-12 10:08:41 +01:00
Audrius Butkevicius
90e027d9a4 Silence the linter 2016-01-12 10:08:32 +01:00
alessandro.g89
fdc9a5d8b0 Add dark theme by alessandro.g89
Source: https://userstyles.org/styles/122502/syncthing-dark
2016-01-12 10:08:23 +01:00
Audrius Butkevicius
543891a0a0 Add support for themes (fixes #1925) 2016-01-12 10:08:04 +01:00
Jakob Borg
06921443fc Docs & translations update 2016-01-10 10:10:53 +01:00
37 changed files with 228 additions and 613 deletions

View File

@@ -48,7 +48,7 @@ var locations = map[locationEnum]string{
locKeyFile: "${config}/key.pem",
locHTTPSCertFile: "${config}/https-cert.pem",
locHTTPSKeyFile: "${config}/https-key.pem",
locDatabase: "${config}/index-v0.13.0.db",
locDatabase: "${config}/index-v0.11.0.db",
locLogFile: "${config}/syncthing.log", // -logfile on Windows
locCsrfTokens: "${config}/csrftokens.txt",
locPanicLog: "${config}/panic-${timestamp}.log",

View File

@@ -49,16 +49,15 @@ import (
)
var (
Version = "unknown-dev"
Codename = "Copper Cockroach"
BuildStamp = "0"
BuildDate time.Time
BuildHost = "unknown"
BuildUser = "unknown"
IsRelease bool
IsBeta bool
LongVersion string
allowedVersionExp = regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z0-9]+)*(\.\d+)*(\+\d+-g[0-9a-f]+)?(-dirty)?$`)
Version = "unknown-dev"
Codename = "Beryllium Bedbug"
BuildStamp = "0"
BuildDate time.Time
BuildHost = "unknown"
BuildUser = "unknown"
IsRelease bool
IsBeta bool
LongVersion string
)
const (
@@ -90,8 +89,9 @@ const (
func init() {
if Version != "unknown-dev" {
// If not a generic dev build, version string should come from git describe
if !allowedVersionExp.MatchString(Version) {
l.Fatalf("Invalid version string %q;\n\tdoes not match regexp %v", Version, allowedVersionExp)
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z0-9]+)*(\+\d+-g[0-9a-f]+)?(-dirty)?$`)
if !exp.MatchString(Version) {
l.Fatalf("Invalid version string %q;\n\tdoes not match regexp %v", Version, exp)
}
}
@@ -340,7 +340,7 @@ func main() {
if err != nil {
l.Fatalln("Upgrade:", err) // exits 1
}
l.Infoln("Upgraded from", options.upgradeTo)
l.Okln("Upgraded from", options.upgradeTo)
return
}
@@ -464,14 +464,14 @@ func performUpgrade(release upgrade.Release) {
if err != nil {
l.Fatalln("Upgrade:", err)
}
l.Infof("Upgraded to %q", release.Tag)
l.Okf("Upgraded to %q", release.Tag)
} else {
l.Infoln("Attempting upgrade through running Syncthing...")
err = upgradeViaRest()
if err != nil {
l.Fatalln("Upgrade:", err)
}
l.Infoln("Syncthing upgrading")
l.Okln("Syncthing upgrading")
os.Exit(exitUpgrading)
}
}
@@ -640,7 +640,6 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
dbFile := locations[locDatabase]
ldb, err := db.Open(dbFile)
if err != nil {
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
@@ -852,7 +851,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
mainService.Stop()
l.Infoln("Exiting")
l.Okln("Exiting")
if runtimeOptions.cpuProfile {
pprof.StopCPUProfile()
@@ -1171,13 +1170,12 @@ func autoUpgrade(cfg *config.Wrapper) {
// suitable time after they have gone out of fashion.
func cleanConfigDirectory() {
patterns := map[string]time.Duration{
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
}
for pat, dur := range patterns {

View File

@@ -175,29 +175,3 @@ func TestShortIDCheck(t *testing.T) {
t.Error("Should have gotten an error")
}
}
func TestAllowedVersions(t *testing.T) {
testcases := []struct {
ver string
allowed bool
}{
{"v0.13.0", true},
{"v0.12.11+22-gabcdef0", true},
{"v0.13.0-beta0", true},
{"v0.13.0-beta47", true},
{"v0.13.0-beta47+1-gabcdef0", true},
{"v0.13.0-beta.0", true},
{"v0.13.0-beta.47", true},
{"v0.13.0-beta.0+1-gabcdef0", true},
{"v0.13.0-beta.47+1-gabcdef0", true},
{"v0.13.0-some-weird-but-allowed-tag", true},
{"v0.13.0-not.allowed.to.do.this", false},
{"v0.13.0+not.allowed.to.do.this", false},
}
for i, c := range testcases {
if allowed := allowedVersionExp.MatchString(c.ver); allowed != c.allowed {
t.Errorf("%d: incorrect result %v != %v for %q", i, allowed, c.allowed, c.ver)
}
}
}

View File

@@ -76,7 +76,7 @@
"Generate": "Generoi",
"Global Discovery": "Globaali etsintä",
"Global Discovery Server": "Globaali etsintäpalvelin",
"Global Discovery Servers": "Global Discovery Servers",
"Global Discovery Servers": "Globaalit etsintäpalvelimet",
"Global State": "Globaali tila",
"Help": "Apua",
"Home page": "Kotisivu",

View File

@@ -18,7 +18,7 @@
"Alphabetic": "Alphabétique",
"An external command handles the versioning. It has to remove the file from the synced folder.": "Une commande externe gère les versions de fichiers. Elle supprime les fichiers dans le dossier synchronisé.",
"Anonymous Usage Reporting": "Rapport anonyme de statistiques d'utilisation",
"Any devices configured on an introducer device will be added to this device as well.": "Toute machine ajoutée depuis une machine introductrice sera aussi ajoutée sur cette machine.",
"Any devices configured on an introducer device will be added to this device as well.": "Toute machine ajoutée depuis une machine initiatrice sera aussi ajoutée sur cette machine.",
"Automatic upgrades": "Mises à jour automatiques",
"Be careful!": "Faites attention !",
"Bugs": "Bugs",

View File

@@ -12,7 +12,7 @@
"Address": "Adresas",
"Addresses": "Adresai",
"Advanced": "Pažangus",
"Advanced Configuration": "Pažangus nustatymai",
"Advanced Configuration": "Išplėstinė konfigūracija",
"All Data": "Visiems duomenims",
"Allow Anonymous Usage Reporting?": "Siųsti anonimišką vartojimo ataskaitą?",
"Alphabetic": "Abėcėlės tvarka",
@@ -34,7 +34,7 @@
"Copied from original": "Nukopijuota iš originalo",
"Copyright © 2015 the following Contributors:": "Visos teisės saugomos © 2015 šių bendraautorių:",
"Danger!": "Pavojus!",
"Delete": "Trinti",
"Delete": "Ištrinti",
"Deleted": "Ištrinta",
"Device ID": "Įrenginio ID",
"Device Identification": "Įrenginio identifikacija",
@@ -111,15 +111,15 @@
"OK": "Gerai",
"Off": "Netaikoma",
"Oldest First": "Seniausi pirmiau",
"Options": "Nustatymai",
"Options": "Parametrai",
"Out of Sync": "Išsisinchronizavę",
"Out of Sync Items": "Nesutikrinta",
"Outgoing Rate Limit (KiB/s)": "Išeinančio srauto maksimalus greitis (KiB/s)",
"Override Changes": "Perrašyti pakeitimus",
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Kelias iki aplanko šiame kompiuteryje. Bus sukurtas, jei neegzistuoja. Tildės simbolis (~) gali būti naudojamas kaip trumpinys",
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Kelias, kur bus saugomos versijos (palikite tuščią numatytam .stversions aplankui).",
"Pause": "Sustabdyti",
"Paused": "Sustabdyta",
"Pause": "Pristabdyti",
"Paused": "Pristabdyta",
"Please consult the release notes before performing a major upgrade.": "Peržvelkite laidos informaciją prieš atlikdami stambų atnaujinimą.",
"Please set a GUI Authentication User and Password in the Settings dialog.": "Prašome nustatymų dialoge nustatyti valdymo skydelio vartotojo vardą ir slaptažodį.",
"Please wait": "Prašome palaukti",

View File

@@ -1,5 +1,5 @@
{
"A device with that ID is already added.": "A device with that ID is already added.",
"A device with that ID is already added.": "Urządzenie o tym ID jest już dodane.",
"A negative number of days doesn't make sense.": "Ujemna ilość dni nie ma sensu.",
"A new major version may not be compatible with previous versions.": "Nowa wersja może być niekompatybilna z poprzednimi wersjami.",
"API Key": "Klucz API",
@@ -33,7 +33,7 @@
"Copied from elsewhere": "Skopiowane z innego miejsca ",
"Copied from original": "Skopiowane z oryginału",
"Copyright © 2015 the following Contributors:": "Copyright © 2015: ",
"Danger!": "Danger!",
"Danger!": "Niebezpieczne!",
"Delete": "Usuń",
"Deleted": "Usunięto",
"Device ID": "ID urządzenia",
@@ -51,7 +51,7 @@
"Edit Device": "Edytuj urządzenie",
"Edit Folder": "Edytuj folder",
"Editing": "Edytowanie",
"Enable Relaying": "Enable Relaying",
"Enable Relaying": "Włącz przekazywanie",
"Enable UPnP": "Włącz UPnP",
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Wpisz oddzielone przecinkiem adresy (\"tcp://ip:port\", \"tcp://host:port\") lub \"dynamic\" by przeprowadzić automatyczne odnalezienie adresu.",
"Enter ignore patterns, one per line.": "Wprowadź wzorce ignorowania, jeden w każdej linii.",
@@ -76,7 +76,7 @@
"Generate": "Generuj",
"Global Discovery": "Globalne odnajdywanie",
"Global Discovery Server": "Globalny serwer rozgłoszeniowy",
"Global Discovery Servers": "Global Discovery Servers",
"Global Discovery Servers": "Globalne serwery odkrywania",
"Global State": "Status globalny",
"Help": "Pomoc",
"Home page": "Strona domowa",
@@ -121,14 +121,14 @@
"Pause": "Zatrzymaj",
"Paused": "Zatrzymany",
"Please consult the release notes before performing a major upgrade.": "Zaleca się przeanalizowanie \"release notes\" przed przeprowadzeniem znaczącej aktualizacji.",
"Please set a GUI Authentication User and Password in the Settings dialog.": "Please set a GUI Authentication User and Password in the Settings dialog.",
"Please set a GUI Authentication User and Password in the Settings dialog.": "Ustaw proszę użytkownika i hasło dostępowe do GUI w Ustawieniach",
"Please wait": "Proszę czekać",
"Preview": "Podgląd",
"Preview Usage Report": "Podgląd raportu użycia.",
"Quick guide to supported patterns": "Krótki przewodnik po obsługiwanych wzorcach",
"RAM Utilization": "Użycie pamięci RAM",
"Random": "Losowo",
"Relay Servers": "Relay Servers",
"Relay Servers": "Serwery przekazywania",
"Relayed via": "Przekazane przez",
"Relays": "Przekaźniki",
"Release Notes": "Informacje o wydaniu",
@@ -142,7 +142,7 @@
"Resume": "Wznów",
"Reused": "Ponownie użyte",
"Save": "Zapisz",
"Scan Time Remaining": "Scan Time Remaining",
"Scan Time Remaining": "Pozostały czas skanowania",
"Scanning": "Skanowanie",
"Select the devices to share this folder with.": "Wybierz urządzenie, któremu udostępnić folder.",
"Select the folders to share with this device.": "Wybierz foldery do współdzielenia z tym urządzeniem.",
@@ -155,7 +155,7 @@
"Shared With": "Współdzielony z",
"Short identifier for the folder. Must be the same on all cluster devices.": "Krótki identyfikator folderu. Musi być taki sam na wszystkich urządzeniach.",
"Show ID": "Pokaż ID",
"Show QR": "Show QR",
"Show QR": "Pokaż kod QR",
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Pokazane w statusie zamiast ID urządzenia.Zostanie wysłane do innych urządzeń jako opcjonalna domyślna nazwa.",
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Pokazane w statusie zamiast ID urządzenia. Zostanie zaktualizowane do nazwy urządzenia jeżeli pozostanie puste.",
"Shutdown": "Wyłącz",
@@ -177,11 +177,11 @@
"Syncthing is upgrading.": "Aktualizowanie Syncthing",
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing wydaje się być wyłączony lub jest problem z twoim połączeniem internetowym. Próbuje ponownie...",
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing nie może przetworzyć twojego zapytania. Proszę przeładuj stronę lub zrestartuj Syncthing, jeśli problem pozostanie.",
"The Syncthing admin interface is configured to allow remote access without a password.": "The Syncthing admin interface is configured to allow remote access without a password.",
"The Syncthing admin interface is configured to allow remote access without a password.": "Interfejs administracyjny Syncthing jest skonfigurowany w sposób pozwalający na zdalny dostęp bez hasła.",
"The aggregated statistics are publicly available at {%url%}.": "Zebrane statystyki są publicznie dostępne pod adresem {{url}}.",
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Konfiguracja została zapisana lecz nie jest aktywna. Syncthing musi zostać zrestartowany aby aktywować nową konfiguracje.",
"The device ID cannot be blank.": "ID urządzenia nie może być puste.",
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).",
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "ID urządzenia może być znalezione w \"Akcja > Pokaż ID\" na innym urządzeniu. Spacje i myślniki są opcjonalne (są one ignorowane).",
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "ID urządzenia można znaleźć w \"Edytuj -> Pokaż ID\" na zdalnym urządzeniu.\nOdstępy i myślniki są opcjonalne (ignorowane)",
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Zaszyfrowane raporty użycia są wysyłane codziennie. Są one używane w celach statystycznych platform, rozmiarów katalogów i wersji programu. Jeżeli zgłaszane dane ulegną zmianie, ponownie wyświetli się ta informacja.",
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Wprowadzone ID urządzenia wygląda na niepoprawne. Musi zawierać 52 lub 56 znaków składających się z liter i cyfr. Odstępy i myślniki są opcjonalne.",
@@ -203,7 +203,7 @@
"The rate limit must be a non-negative number (0: no limit)": "Ograniczenie prędkości powinno być nieujemną liczbą całkowitą (0: brak ograniczeń)",
"The rescan interval must be a non-negative number of seconds.": "Interwał skanowania musi być niezerową liczbą sekund.",
"They are retried automatically and will be synced when the error is resolved.": "Ponowne próby zachodzą automatycznie, synchronizacja nastąpi po usunięciu usterki.",
"This can easily give hackers access to read and change any files on your computer.": "This can easily give hackers access to read and change any files on your computer.",
"This can easily give hackers access to read and change any files on your computer.": "Może to umożliwić osobom trzecim dostęp do odczytu i zmian dowolnych plików na urządzeniu.",
"This is a major version upgrade.": "To jest ważna aktualizacja",
"Trash Can File Versioning": "Kontrola werjsi plików w koszu",
"Unknown": "Nieznany",

View File

File diff suppressed because one or more lines are too long

1
lib/db/.gitignore vendored
View File

@@ -1,2 +1 @@
!*.zip
testdata/*.db

View File

@@ -4,9 +4,16 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// Package db provides a set type to track local/remote files with newness
// checks. We must do a certain amount of normalization in here. We will get
// fed paths with either native or wire-format separators and encodings
// depending on who calls us. We transform paths to wire-format (NFC and
// slashes) on the way to the database, and transform to native format
// (varying separator and encoding) on the way back out.
package db
import (
"bytes"
"encoding/binary"
"fmt"
@@ -23,10 +30,10 @@ const maxBatchSize = 256 << 10
type BlockMap struct {
db *Instance
folder uint32
folder string
}
func NewBlockMap(db *Instance, folder uint32) *BlockMap {
func NewBlockMap(db *Instance, folder string) *BlockMap {
return &BlockMap{
db: db,
folder: folder,
@@ -116,7 +123,7 @@ func (m *BlockMap) Discard(files []protocol.FileInfo) error {
// Drop block map, removing all entries related to this block map from the db.
func (m *BlockMap) Drop() error {
batch := new(leveldb.Batch)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:keyPrefixLen+keyFolderLen]), nil)
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
defer iter.Release()
for iter.Next() {
if batch.Len() > maxBatchSize {
@@ -166,13 +173,12 @@ func (f *BlockFinder) String() string {
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
var key []byte
for _, folder := range folders {
folderID := f.db.folderIdx.ID([]byte(folder))
key = blockKeyInto(key, hash, folderID, "")
key = blockKeyInto(key, hash, folder, "")
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
defer iter.Release()
for iter.Next() && iter.Error() == nil {
file := blockKeyName(iter.Key())
folder, file := fromBlockKey(iter.Key())
index := int32(binary.BigEndian.Uint32(iter.Value()))
if iterFn(folder, osutil.NativeFilename(file), index) {
return true
@@ -188,41 +194,48 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(index))
folderID := f.db.folderIdx.ID([]byte(folder))
batch := new(leveldb.Batch)
batch.Delete(blockKeyInto(nil, oldHash, folderID, file))
batch.Put(blockKeyInto(nil, newHash, folderID, file), buf)
batch.Delete(blockKeyInto(nil, oldHash, folder, file))
batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
return f.db.Write(batch, nil)
}
// m.blockKey returns a byte slice encoding the following information:
// keyTypeBlock (1 byte)
// folder (4 bytes)
// folder (64 bytes)
// block hash (32 bytes)
// file name (variable size)
func blockKeyInto(o, hash []byte, folder uint32, file string) []byte {
reqLen := keyPrefixLen + keyFolderLen + keyHashLen + len(file)
func blockKeyInto(o, hash []byte, folder, file string) []byte {
reqLen := 1 + 64 + 32 + len(file)
if cap(o) < reqLen {
o = make([]byte, reqLen)
} else {
o = o[:reqLen]
}
o[0] = KeyTypeBlock
binary.BigEndian.PutUint32(o[keyPrefixLen:], folder)
copy(o[keyPrefixLen+keyFolderLen:], []byte(hash))
copy(o[keyPrefixLen+keyFolderLen+keyHashLen:], []byte(file))
copy(o[1:], []byte(folder))
for i := len(folder); i < 64; i++ {
o[1+i] = 0
}
copy(o[1+64:], []byte(hash))
copy(o[1+64+32:], []byte(file))
return o
}
// blockKeyName returns the file name from the block key
func blockKeyName(data []byte) string {
if len(data) < keyPrefixLen+keyFolderLen+keyHashLen+1 {
func fromBlockKey(data []byte) (string, string) {
if len(data) < 1+64+32+1 {
panic("Incorrect key length")
}
if data[0] != KeyTypeBlock {
panic("Incorrect key type")
}
file := string(data[keyPrefixLen+keyFolderLen+keyHashLen:])
return file
file := string(data[1+64+32:])
slice := data[1 : 1+64]
izero := bytes.IndexByte(slice, 0)
if izero > -1 {
return string(slice[:izero]), file
}
return string(slice), file
}

View File

@@ -10,7 +10,6 @@ import (
"testing"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syndtr/goleveldb/leveldb/util"
)
func genBlocks(n int) []protocol.BlockInfo {
@@ -56,7 +55,7 @@ func setup() (*Instance, *BlockFinder) {
}
func dbEmpty(db *Instance) bool {
iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
iter := db.NewIterator(nil, nil)
defer iter.Release()
if iter.Next() {
return false
@@ -71,7 +70,7 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
t.Fatal("db not empty")
}
m := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
m := NewBlockMap(db, "folder1")
f3.Flags |= protocol.FlagDirectory
@@ -153,8 +152,8 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
func TestBlockFinderLookup(t *testing.T) {
db, f := setup()
m1 := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
m2 := NewBlockMap(db, db.folderIdx.ID([]byte("folder2")))
m1 := NewBlockMap(db, "folder1")
m2 := NewBlockMap(db, "folder2")
err := m1.Add([]protocol.FileInfo{f1})
if err != nil {
@@ -222,7 +221,7 @@ func TestBlockFinderFix(t *testing.T) {
return true
}
m := NewBlockMap(db, db.folderIdx.ID([]byte("folder1")))
m := NewBlockMap(db, "folder1")
err := m.Add([]protocol.FileInfo{f1})
if err != nil {
t.Fatal(err)

View File

@@ -42,8 +42,6 @@ const (
KeyTypeDeviceStatistic
KeyTypeFolderStatistic
KeyTypeVirtualMtime
KeyTypeFolderIdx
KeyTypeDeviceIdx
)
type fileVersion struct {

View File

@@ -1,114 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"github.com/syndtr/goleveldb/leveldb"
)
// convertKeyFormat converts from the v0.12 to the v0.13 database format, to
// avoid having to do rescan. The change is in the key format for folder
// labels, so we basically just iterate over the database rewriting keys as
// necessary.
func convertKeyFormat(from, to *leveldb.DB) error {
l.Infoln("Converting database key format")
blocks, files, globals, unchanged := 0, 0, 0, 0
dbi := newDBInstance(to)
i := from.NewIterator(nil, nil)
for i.Next() {
key := i.Key()
switch key[0] {
case KeyTypeBlock:
folder, file := oldFromBlockKey(key)
folderIdx := dbi.folderIdx.ID([]byte(folder))
hash := key[1+64:]
newKey := blockKeyInto(nil, hash, folderIdx, file)
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
blocks++
case KeyTypeDevice:
newKey := dbi.deviceKey(oldDeviceKeyFolder(key), oldDeviceKeyDevice(key), oldDeviceKeyName(key))
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
files++
case KeyTypeGlobal:
newKey := dbi.globalKey(oldGlobalKeyFolder(key), oldGlobalKeyName(key))
if err := to.Put(newKey, i.Value(), nil); err != nil {
return err
}
globals++
case KeyTypeVirtualMtime:
// Cannot be converted, we drop it instead :(
default:
if err := to.Put(key, i.Value(), nil); err != nil {
return err
}
unchanged++
}
}
l.Infof("Converted %d blocks, %d files, %d globals (%d unchanged).", blocks, files, globals, unchanged)
return nil
}
func oldDeviceKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder[:izero]
}
func oldDeviceKeyDevice(key []byte) []byte {
return key[1+64 : 1+64+32]
}
func oldDeviceKeyName(key []byte) []byte {
return key[1+64+32:]
}
func oldGlobalKeyName(key []byte) []byte {
return key[1+64:]
}
func oldGlobalKeyFolder(key []byte) []byte {
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder[:izero]
}
func oldFromBlockKey(data []byte) (string, string) {
if len(data) < 1+64+32+1 {
panic("Incorrect key length")
}
if data[0] != KeyTypeBlock {
panic("Incorrect key type")
}
file := string(data[1+64+32:])
slice := data[1 : 1+64]
izero := bytes.IndexByte(slice, 0)
if izero > -1 {
return string(slice[:izero]), file
}
return string(slice), file
}

View File

@@ -1,136 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package db
import (
"archive/zip"
"io"
"os"
"path/filepath"
"testing"
"github.com/syndtr/goleveldb/leveldb"
)
func TestLabelConversion(t *testing.T) {
os.RemoveAll("testdata/oldformat.db")
defer os.RemoveAll("testdata/oldformat.db")
os.RemoveAll("testdata/newformat.db")
defer os.RemoveAll("testdata/newformat.db")
if err := unzip("testdata/oldformat.db.zip", "testdata"); err != nil {
t.Fatal(err)
}
odb, err := leveldb.OpenFile("testdata/oldformat.db", nil)
if err != nil {
t.Fatal(err)
}
ldb, err := leveldb.OpenFile("testdata/newformat.db", nil)
if err != nil {
t.Fatal(err)
}
if err = convertKeyFormat(odb, ldb); err != nil {
t.Fatal(err)
}
ldb.Close()
odb.Close()
inst, err := Open("testdata/newformat.db")
if err != nil {
t.Fatal(err)
}
fs := NewFileSet("default", inst)
files, deleted, _ := fs.GlobalSize()
if files+deleted != 953 {
// Expected number of global entries determined by
// ../../bin/stindex testdata/oldformat.db/ | grep global | grep -c default
t.Errorf("Conversion error, global list differs (%d != 953)", files+deleted)
}
files, deleted, _ = fs.LocalSize()
if files+deleted != 953 {
t.Errorf("Conversion error, device list differs (%d != 953)", files+deleted)
}
f := NewBlockFinder(inst)
// [block] F:"default" H:1c25dea9003cc16216e2a22900be1ec1cc5aaf270442904e2f9812c314e929d8 N:"f/f2/f25f1b3e6e029231b933531b2138796d" I:3
h := []byte{0x1c, 0x25, 0xde, 0xa9, 0x00, 0x3c, 0xc1, 0x62, 0x16, 0xe2, 0xa2, 0x29, 0x00, 0xbe, 0x1e, 0xc1, 0xcc, 0x5a, 0xaf, 0x27, 0x04, 0x42, 0x90, 0x4e, 0x2f, 0x98, 0x12, 0xc3, 0x14, 0xe9, 0x29, 0xd8}
found := 0
f.Iterate([]string{"default"}, h, func(folder, file string, idx int32) bool {
if folder == "default" && file == filepath.FromSlash("f/f2/f25f1b3e6e029231b933531b2138796d") && idx == 3 {
found++
}
return true
})
if found != 1 {
t.Errorf("Found %d blocks instead of expected 1", found)
}
inst.Close()
}
func unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
panic(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
panic(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
panic(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}

View File

@@ -8,15 +8,11 @@ package db
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"sort"
"strings"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -29,33 +25,14 @@ type deletionHandler func(t readWriteTransaction, folder, device, name []byte, d
type Instance struct {
*leveldb.DB
folderIdx *smallIndex
deviceIdx *smallIndex
}
const (
keyPrefixLen = 1
keyFolderLen = 4 // indexed
keyDeviceLen = 4 // indexed
keyHashLen = 32
)
func Open(file string) (*Instance, error) {
opts := &opt.Options{
OpenFilesCacheCapacity: 100,
WriteBuffer: 4 << 20,
}
if _, err := os.Stat(file); os.IsNotExist(err) {
// The file we are looking to open does not exist. This may be the
// first launch so we should look for an old version and try to
// convert it.
if err := checkConvertDatabase(file); err != nil {
l.Infoln("Converting old database:", err)
l.Infoln("Will rescan from scratch.")
}
}
db, err := leveldb.OpenFile(file, opts)
if leveldbIsCorrupted(err) {
db, err = leveldb.RecoverFile(file, opts)
@@ -83,12 +60,9 @@ func OpenMemory() *Instance {
}
func newDBInstance(db *leveldb.DB) *Instance {
i := &Instance{
return &Instance{
DB: db,
}
i.folderIdx = newSmallIndex(i, []byte{KeyTypeFolderIdx})
i.deviceIdx = newSmallIndex(i, []byte{KeyTypeDeviceIdx})
return i
}
func (db *Instance) Compact() error {
@@ -98,10 +72,13 @@ func (db *Instance) Compact() error {
func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
sort.Sort(fileList(fs)) // sort list on name, same as in the database
start := db.deviceKey(folder, device, nil) // before all folder/device files
limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
moreDb := dbi.Next()
@@ -260,10 +237,13 @@ func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, l
}
func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator) {
start := db.deviceKey(folder, device, nil) // before all folder/device files
limit := db.deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadOnlyTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
for dbi.Next() {
@@ -278,10 +258,13 @@ func (db *Instance) withHave(folder, device []byte, truncate bool, fn Iterator)
}
func (db *Instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
start := db.deviceKey(folder, nil, nil) // before all folder/device files
limit := db.deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, nil, nil)[:keyPrefixLen+keyFolderLen]), nil)
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
for dbi.Next() {
@@ -376,10 +359,7 @@ func (db *Instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator
l.Debugf("vl.versions[0].device: %x", vl.versions[0].device)
l.Debugf("name: %q (%x)", name, name)
l.Debugf("fk: %q", fk)
l.Debugf("fk: %x %x %x",
fk[keyPrefixLen:keyPrefixLen+keyFolderLen],
fk[keyPrefixLen+keyFolderLen:keyPrefixLen+keyFolderLen+keyDeviceLen],
fk[keyPrefixLen+keyFolderLen+keyDeviceLen:])
l.Debugf("fk: %x %x %x", fk[1:1+64], fk[1+64:1+64+32], fk[1+64+32:])
panic(err)
}
@@ -423,10 +403,13 @@ func (db *Instance) availability(folder, file []byte) []protocol.DeviceID {
}
func (db *Instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
start := db.globalKey(folder, nil)
limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
t := db.newReadOnlyTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
var fk []byte
@@ -563,7 +546,9 @@ func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.globalKey(folder, nil)[:keyPrefixLen+keyFolderLen]), nil)
start := db.globalKey(folder, nil)
limit := db.globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
dbi := t.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
var fk []byte
@@ -613,72 +598,71 @@ func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
// deviceKey returns a byte slice encoding the following information:
// keyTypeDevice (1 byte)
// folder (4 bytes)
// device (4 bytes)
// folder (64 bytes)
// device (32 bytes)
// name (variable size)
func (db *Instance) deviceKey(folder, device, file []byte) []byte {
return db.deviceKeyInto(nil, folder, device, file)
}
func (db *Instance) deviceKeyInto(k []byte, folder, device, file []byte) []byte {
reqLen := keyPrefixLen + keyFolderLen + keyDeviceLen + len(file)
reqLen := 1 + 64 + 32 + len(file)
if len(k) < reqLen {
k = make([]byte, reqLen)
}
k[0] = KeyTypeDevice
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
binary.BigEndian.PutUint32(k[keyPrefixLen+keyFolderLen:], db.deviceIdx.ID(device))
copy(k[keyPrefixLen+keyFolderLen+keyDeviceLen:], []byte(file))
if len(folder) > 64 {
panic("folder name too long")
}
copy(k[1:], []byte(folder))
copy(k[1+64:], device[:])
copy(k[1+64+32:], []byte(file))
return k[:reqLen]
}
// deviceKeyName returns the device ID from the key
func (db *Instance) deviceKeyName(key []byte) []byte {
return key[keyPrefixLen+keyFolderLen+keyDeviceLen:]
return key[1+64+32:]
}
// deviceKeyFolder returns the folder name from the key
func (db *Instance) deviceKeyFolder(key []byte) []byte {
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
if !ok {
panic("bug: lookup of nonexistent folder ID")
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder
return folder[:izero]
}
// deviceKeyDevice returns the device ID from the key
func (db *Instance) deviceKeyDevice(key []byte) []byte {
device, ok := db.deviceIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen+keyFolderLen:]))
if !ok {
panic("bug: lookup of nonexistent device ID")
}
return device
return key[1+64 : 1+64+32]
}
// globalKey returns a byte slice encoding the following information:
// keyTypeGlobal (1 byte)
// folder (4 bytes)
// folder (64 bytes)
// name (variable size)
func (db *Instance) globalKey(folder, file []byte) []byte {
k := make([]byte, keyPrefixLen+keyFolderLen+len(file))
k := make([]byte, 1+64+len(file))
k[0] = KeyTypeGlobal
binary.BigEndian.PutUint32(k[keyPrefixLen:], db.folderIdx.ID(folder))
copy(k[keyPrefixLen+keyFolderLen:], []byte(file))
if len(folder) > 64 {
panic("folder name too long")
}
copy(k[1:], []byte(folder))
copy(k[1+64:], []byte(file))
return k
}
// globalKeyName returns the filename from the key
func (db *Instance) globalKeyName(key []byte) []byte {
return key[keyPrefixLen+keyFolderLen:]
return key[1+64:]
}
// globalKeyFolder returns the folder name from the key
func (db *Instance) globalKeyFolder(key []byte) []byte {
folder, ok := db.folderIdx.Val(binary.BigEndian.Uint32(key[keyPrefixLen:]))
if !ok {
panic("bug: lookup of nonexistent folder ID")
folder := key[1 : 1+64]
izero := bytes.IndexByte(folder, 0)
if izero < 0 {
return folder
}
return folder
return folder[:izero]
}
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
@@ -708,132 +692,3 @@ func leveldbIsCorrupted(err error) bool {
return false
}
// checkConvertDatabase tries to convert an existing old (v0.11) database to
// new (v0.13) format.
func checkConvertDatabase(dbFile string) error {
oldLoc := filepath.Join(filepath.Dir(dbFile), "index-v0.11.0.db")
if _, err := os.Stat(oldLoc); os.IsNotExist(err) {
// The old database file does not exist; that's ok, continue as if
// everything succeeded.
return nil
} else if err != nil {
// Any other error is weird.
return err
}
// There exists a database in the old format. We run a one time
// conversion from old to new.
fromDb, err := leveldb.OpenFile(oldLoc, nil)
if err != nil {
return err
}
toDb, err := leveldb.OpenFile(dbFile, nil)
if err != nil {
return err
}
err = convertKeyFormat(fromDb, toDb)
if err != nil {
return err
}
err = toDb.Close()
if err != nil {
return err
}
// We've done this one, we don't want to do it again (if the user runs
// -reset or so). We don't care too much about errors any more at this stage.
fromDb.Close()
osutil.Rename(oldLoc, oldLoc+".converted")
return nil
}
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
// fast lookups in both directions and persists to the database. Don't use for
// storing more items than fit comfortably in RAM.
type smallIndex struct {
db *Instance
prefix []byte
id2val map[uint32]string
val2id map[string]uint32
nextID uint32
mut sync.Mutex
}
func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
idx := &smallIndex{
db: db,
prefix: prefix,
id2val: make(map[uint32]string),
val2id: make(map[string]uint32),
mut: sync.NewMutex(),
}
idx.load()
return idx
}
// load iterates over the prefix space in the database and populates the in
// memory maps.
func (i *smallIndex) load() {
tr := i.db.newReadOnlyTransaction()
it := tr.NewIterator(util.BytesPrefix(i.prefix), nil)
for it.Next() {
val := string(it.Value())
id := binary.BigEndian.Uint32(it.Key()[len(i.prefix):])
i.id2val[id] = val
i.val2id[val] = id
if id >= i.nextID {
i.nextID = id + 1
}
}
it.Release()
tr.close()
}
// ID returns the index number for the given byte slice, allocating a new one
// and persisting this to the database if necessary.
func (i *smallIndex) ID(val []byte) uint32 {
i.mut.Lock()
// intentionally avoiding defer here as we want this call to be as fast as
// possible in the general case (folder ID already exists). The map lookup
// with the conversion of []byte to string is compiler optimized to not
// copy the []byte, which is why we don't assign it to a temp variable
// here.
if id, ok := i.val2id[string(val)]; ok {
i.mut.Unlock()
return id
}
id := i.nextID
i.nextID++
valStr := string(val)
i.val2id[valStr] = id
i.id2val[id] = valStr
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
i.db.Put(key, val, nil)
i.mut.Unlock()
return id
}
// Val returns the value for the given index number, or (nil, false) if there
// is no such index number.
func (i *smallIndex) Val(id uint32) ([]byte, bool) {
i.mut.Lock()
val, ok := i.id2val[id]
i.mut.Unlock()
if !ok {
return nil, false
}
return []byte(val), true
}

View File

@@ -16,9 +16,7 @@ func TestDeviceKey(t *testing.T) {
dev := []byte("device67890123456789012345678901")
name := []byte("name")
db := OpenMemory()
db.folderIdx.ID(fld)
db.deviceIdx.ID(dev)
db := &Instance{}
key := db.deviceKey(fld, dev, name)
@@ -40,8 +38,7 @@ func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name")
db := OpenMemory()
db.folderIdx.ID(fld)
db := &Instance{}
key := db.globalKey(fld, name)

View File

@@ -97,7 +97,7 @@ func NewFileSet(folder string, db *Instance) *FileSet {
localVersion: make(map[protocol.DeviceID]int64),
folder: folder,
db: db,
blockmap: NewBlockMap(db, db.folderIdx.ID([]byte(folder))),
blockmap: NewBlockMap(db, folder),
mutex: sync.NewMutex(),
}
@@ -244,7 +244,7 @@ func DropFolder(db *Instance, folder string) {
db.dropFolder([]byte(folder))
bm := &BlockMap{
db: db,
folder: db.folderIdx.ID([]byte(folder)),
folder: folder,
}
bm.Drop()
NewVirtualMtimeRepo(db, folder).Drop()

View File

Binary file not shown.

View File

@@ -7,7 +7,6 @@
package db
import (
"encoding/binary"
"fmt"
"time"
)
@@ -25,12 +24,10 @@ type VirtualMtimeRepo struct {
}
func NewVirtualMtimeRepo(ldb *Instance, folder string) *VirtualMtimeRepo {
var prefix [5]byte // key type + 4 bytes folder idx number
prefix[0] = KeyTypeVirtualMtime
binary.BigEndian.PutUint32(prefix[1:], ldb.folderIdx.ID([]byte(folder)))
prefix := string(KeyTypeVirtualMtime) + folder
return &VirtualMtimeRepo{
ns: NewNamespacedKV(ldb, string(prefix[:])),
ns: NewNamespacedKV(ldb, prefix),
}
}

View File

@@ -23,6 +23,7 @@ const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelOK
LevelWarn
LevelFatal
NumLevels
@@ -41,6 +42,8 @@ type Logger interface {
Verbosef(format string, vals ...interface{})
Infoln(vals ...interface{})
Infof(format string, vals ...interface{})
Okln(vals ...interface{})
Okf(format string, vals ...interface{})
Warnln(vals ...interface{})
Warnf(format string, vals ...interface{})
Fatalln(vals ...interface{})
@@ -162,6 +165,24 @@ func (l *logger) Infof(format string, vals ...interface{}) {
l.callHandlers(LevelInfo, s)
}
// Okln logs a line with an OK prefix.
func (l *logger) Okln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Okf logs a formatted line with an OK prefix.
func (l *logger) Okf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Warnln logs a formatted line with a WARNING prefix.
func (l *logger) Warnln(vals ...interface{}) {
l.mut.Lock()

View File

@@ -19,6 +19,8 @@ func TestAPI(t *testing.T) {
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, &debug))
info := 0
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, &info))
ok := 0
l.AddHandler(LevelOK, checkFunc(t, LevelOK, &ok))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, &warn))
@@ -26,15 +28,20 @@ func TestAPI(t *testing.T) {
l.Debugln("test", 0)
l.Infof("test %d", 1)
l.Infoln("test", 1)
l.Okf("test %d", 2)
l.Okln("test", 2)
l.Warnf("test %d", 3)
l.Warnln("test", 3)
if debug != 6 {
if debug != 8 {
t.Errorf("Debug handler called %d != 8 times", debug)
}
if info != 4 {
if info != 6 {
t.Errorf("Info handler called %d != 6 times", info)
}
if ok != 4 {
t.Errorf("Ok handler called %d != 4 times", ok)
}
if warn != 2 {
t.Errorf("Warn handler called %d != 2 times", warn)
}

View File

@@ -189,7 +189,7 @@ func (m *Model) StartFolderRW(folder string) {
m.folderRunnerTokens[folder] = append(m.folderRunnerTokens[folder], token)
m.fmut.Unlock()
l.Infoln("Ready to synchronize", folder, "(read-write)")
l.Okln("Ready to synchronize", folder, "(read-write)")
}
func (m *Model) warnAboutOverwritingProtectedFiles(folder string) {
@@ -241,7 +241,7 @@ func (m *Model) StartFolderRO(folder string) {
m.folderRunnerTokens[folder] = append(m.folderRunnerTokens[folder], token)
m.fmut.Unlock()
l.Infoln("Ready to synchronize", folder, "(read only; no external updates accepted)")
l.Okln("Ready to synchronize", folder, "(read only; no external updates accepted)")
}
func (m *Model) RemoveFolder(folder string) {

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-BEP" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-BEP" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-bep \- Block Exchange Protocol v1
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-CONFIG" "5" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-CONFIG" "5" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-config \- Syncthing Configuration
.
@@ -284,6 +284,9 @@ conflict copies altogether.
<device id="5SYI2FS\-LW6YAXI\-JJDYETS\-NDBBPIO\-256MWBO\-XDPXWVG\-24QPUM4\-PDW4UQU" name="syno" compression="metadata" introducer="false">
<address>dynamic</address>
</device>
<device id="2CYF2WQ\-AKZO2QZ\-JAKWLYD\-AGHMQUM\-BGXUOIS\-GYILW34\-HJG3DUK\-LRRYQAR" name="syno local" compression="metadata" introducer="false">
<address>tcp://192.0.2.1:22001</address>
</device>
.ft P
.fi
.UNINDENT
@@ -327,25 +330,32 @@ should copy their list of devices per folder when connecting.
.UNINDENT
.sp
In addition, one or more \fBaddress\fP child elements must be present. Each
contains an address to use when attempting to connect to this device and will
be tried in order. Accepted formats are:
contains an address or host name to use when attempting to connect to this device and will
be tried in order. Entries other than \fBdynamic\fP must be prefixed with \fBtcp://\fP\&. Accepted formats are:
.INDENT 0.0
.TP
.B IPv4 address (\fB192.0.2.42\fP)
.B IPv4 address (\fBtcp://192.0.2.42\fP)
The default port (22000) is used.
.TP
.B IPv4 address and port (\fB192.0.2.42:12345\fP)
.B IPv4 address and port (\fBtcp://192.0.2.42:12345\fP)
The address and port is used as given.
.TP
.B IPv6 address (\fB2001:db8::23:42\fP)
The default port (22000) is used.
.B IPv6 address (\fBtcp://[2001:db8::23:42]\fP)
The default port (22000) is used. The address must be enclosed in
square brackets.
.TP
.B IPv6 address and port (\fB[2001:db8::23:42]:12345\fP)
.B IPv6 address and port (\fBtcp://[2001:db8::23:42]:12345\fP)
The address and port is used as given. The address must be enclosed in
square brackets.
.TP
.B Host name (\fBtcp://fileserver\fP)
The host name will be used on the default port (22000) and connections will be attempted via both IPv4 and IPv6, depending on name resolution.
.TP
.B Host name and port (\fBtcp://fileserver:12345\fP)
The host name will be used on the given port and connections will be attempted via both IPv4 and IPv6, depending on name resolution.
.TP
.B \fBdynamic\fP
The word \fBdynamic\fP means to use local and global discovery to find the
The word \fBdynamic\fP (without \fBtcp://\fP prefix) means to use local and global discovery to find the
device.
.UNINDENT
.SH IGNOREDDEVICE ELEMENT

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-DEVICE-IDS" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-DEVICE-IDS" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-device-ids \- Understanding Device IDs
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-EVENT-API" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-EVENT-API" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-event-api \- Event API
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-FAQ" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-FAQ" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-faq \- Frequently Asked Questions
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-GLOBALDISCO" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-GLOBALDISCO" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-globaldisco \- Global Discovery Protocol v3
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-LOCALDISCO" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-LOCALDISCO" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-localdisco \- Local Discovery Protocol v3
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-NETWORKING" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-NETWORKING" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-networking \- Firewall Setup
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-RELAY" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-RELAY" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-relay \- Relay Protocol v1
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-REST-API" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-REST-API" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-rest-api \- REST API
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-SECURITY" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-SECURITY" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-security \- Security Principles
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING-STIGNORE" "5" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING-STIGNORE" "5" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing-stignore \- Prevent files from being synchronized to other nodes
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "TODO" "7" "January 30, 2016" "v0.12" "Syncthing"
.TH "TODO" "7" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
Todo \- Keep automatic backups of deleted files by other nodes
.

View File

@@ -1,6 +1,6 @@
.\" Man page generated from reStructuredText.
.
.TH "SYNCTHING" "1" "January 30, 2016" "v0.12" "Syncthing"
.TH "SYNCTHING" "1" "February 08, 2016" "v0.12" "Syncthing"
.SH NAME
syncthing \- Syncthing
.

View File

@@ -14,6 +14,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -191,34 +192,30 @@ func alterFiles(dir string) error {
return osutil.TryRename(path, newPath)
}
/*
This doesn't in fact work. Sometimes it appears to. We need to get this sorted...
// Switch between files and directories
case r == 3 && comps > 3 && rand.Float64() < 0.2:
if !info.Mode().IsRegular() {
err = removeAll(path)
if err != nil {
return err
}
d1 := []byte("I used to be a dir: " + path)
err := ioutil.WriteFile(path, d1, 0644)
if err != nil {
return err
}
} else {
err := osutil.Remove(path)
if err != nil {
return err
}
err = os.MkdirAll(path, 0755)
if err != nil {
return err
}
generateFiles(path, 10, 20, "../LICENSE")
}
// Switch between files and directories
case r == 3 && comps > 3 && rand.Float64() < 0.2:
if !info.Mode().IsRegular() {
err = removeAll(path)
if err != nil {
return err
*/
}
d1 := []byte("I used to be a dir: " + path)
err := ioutil.WriteFile(path, d1, 0644)
if err != nil {
return err
}
} else {
err := osutil.Remove(path)
if err != nil {
return err
}
err = os.MkdirAll(path, 0755)
if err != nil {
return err
}
generateFiles(path, 10, 20, "../LICENSE")
}
return err
/*
This fails. Bug?