Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
223e14b0d0 | ||
|
|
a58f69be04 | ||
|
|
e194eb1f69 | ||
|
|
672824641b | ||
|
|
6d357211b2 | ||
|
|
8e39e2889d | ||
|
|
a9ee4bb9f1 | ||
|
|
80fd6c2400 | ||
|
|
3cbe7d40d1 | ||
|
|
af0bc95de5 | ||
|
|
4bf3e7485b | ||
|
|
b701de60ce | ||
|
|
7ef2743964 | ||
|
|
a165838cbd | ||
|
|
3c77b8388c | ||
|
|
9d16f4545d |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 3.4 KiB After Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 4.9 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 8.2 KiB |
8
build.go
@@ -717,10 +717,18 @@ func getBranchSuffix() string {
|
||||
}
|
||||
|
||||
func buildStamp() int64 {
|
||||
// If SOURCE_DATE_EPOCH is set, use that.
|
||||
if s, _ := strconv.ParseInt(os.Getenv("SOURCE_DATE_EPOCH"), 10, 64); s > 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
// Try to get the timestamp of the latest commit.
|
||||
bs, err := runError("git", "show", "-s", "--format=%ct")
|
||||
if err != nil {
|
||||
// Fall back to "now".
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
s, _ := strconv.ParseInt(string(bs), 10, 64)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -330,6 +330,16 @@ func (s *querysrv) handleAnnounce(ctx context.Context, remote net.IP, deviceID p
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Do not use IPv6 remote address if requested scheme is tcp4
|
||||
if uri.Scheme == "tcp4" && remote.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is tcp6
|
||||
if uri.Scheme == "tcp6" && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ func main() {
|
||||
flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
|
||||
flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
|
||||
flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertising as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -111,7 +111,7 @@ func main() {
|
||||
|
||||
go monitorLimits()
|
||||
} else if err != nil && runtime.GOOS != "windows" {
|
||||
log.Println("Assuming no connection limit, due to error retrievign rlimits:", err)
|
||||
log.Println("Assuming no connection limit, due to error retrieving rlimits:", err)
|
||||
}
|
||||
|
||||
sessionAddress = addr.IP[:]
|
||||
|
||||
@@ -86,7 +86,7 @@ type modelIntf interface {
|
||||
DelayScan(folder string, next time.Duration)
|
||||
ScanFolder(folder string) error
|
||||
ScanFolders() map[string]error
|
||||
ScanFolderSubs(folder string, subs []string) error
|
||||
ScanFolderSubdirs(folder string, subs []string) error
|
||||
BringToFront(folder, file string)
|
||||
ConnectedTo(deviceID protocol.DeviceID) bool
|
||||
GlobalSize(folder string) (nfiles, deleted int, bytes int64)
|
||||
@@ -577,7 +577,7 @@ func (s *apiService) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
||||
func folderSummary(cfg configIntf, m modelIntf, folder string) map[string]interface{} {
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
res["invalid"] = cfg.Folders()[folder].Invalid
|
||||
res["invalid"] = "" // Deprecated, retains external API for now
|
||||
|
||||
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
|
||||
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
||||
@@ -690,7 +690,7 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
to, err := config.ReadJSON(r.Body, myID)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
l.Warnln("decoding posted config:", err)
|
||||
l.Warnln("Decoding posted config:", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -1071,7 +1071,7 @@ func (s *apiService) postDBScan(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
subs := qs["sub"]
|
||||
err = s.model.ScanFolderSubs(folder, subs)
|
||||
err = s.model.ScanFolderSubdirs(folder, subs)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -613,3 +614,55 @@ func TestRandomString(t *testing.T) {
|
||||
t.Errorf("Expected 27 random characters, got %q of length %d", res["random"], len(res["random"]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostOK(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Expected 200 OK, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostDupFolder(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"},
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusBadRequest {
|
||||
t.Error("Expected 400 Bad Request, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigPost(data io.Reader) (*http.Response, error) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("POST", baseURL+"/rest/system/config", data)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
return cli.Do(req)
|
||||
}
|
||||
|
||||
@@ -539,7 +539,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
|
||||
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
|
||||
|
||||
// Event subscription for the API; must start early to catch the early events. The LocalDiskUpdated
|
||||
// Event subscription for the API; must start early to catch the early events. The LocalChangeDetected
|
||||
// event might overwhelm the event reciever in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
|
||||
@@ -863,7 +863,6 @@ func loadConfig() (*config.Wrapper, error) {
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
|
||||
if err != nil {
|
||||
l.Infoln("Error loading config file; using defaults for now")
|
||||
myName, _ := os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
|
||||
@@ -85,7 +85,7 @@ func (m *mockedModel) ScanFolders() map[string]error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) ScanFolderSubs(folder string, subs []string) error {
|
||||
func (m *mockedModel) ScanFolderSubdirs(folder string, subs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 5.0 KiB After Width: | Height: | Size: 4.0 KiB |
|
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 3.5 KiB |
|
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 3.5 KiB |
|
Before Width: | Height: | Size: 4.9 KiB After Width: | Height: | Size: 4.0 KiB |
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"A device with that ID is already added.": "En enhet med det ID är redan tillagt.",
|
||||
"A negative number of days doesn't make sense.": "Negativt antal dagar är inte troligt.",
|
||||
"A negative number of days doesn't make sense.": "Ett negativt antal dagar är inte troligt.",
|
||||
"A new major version may not be compatible with previous versions.": "En ny huvudversion kan eventuellt vara inkompatibel med tidigare versioner.",
|
||||
"API Key": "API-nyckel",
|
||||
"About": "Om",
|
||||
@@ -16,16 +16,16 @@
|
||||
"Advanced Configuration": "Avancerad konfiguration",
|
||||
"Advanced settings": "Avancerade inställningar",
|
||||
"All Data": "All data",
|
||||
"Allow Anonymous Usage Reporting?": "Tillåt anonym användarstatistik?",
|
||||
"Allow Anonymous Usage Reporting?": "Tillåt anonym användarstatistiksrapportering?",
|
||||
"Alphabetic": "Alfabetisk",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Ett externt kommando sköter versionshanteringen. Det måste ta bort filen från den synkroniserade mappen.",
|
||||
"Anonymous Usage Reporting": "Anonym användarstatistik",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Enheter konfigurerade på en introduktörsenhet kommer också att läggas till den här enheten.",
|
||||
"Automatic upgrades": "Automatisk uppgradering",
|
||||
"Automatic upgrades": "Automatiska uppgraderingar",
|
||||
"Be careful!": "Var aktsam!",
|
||||
"Bugs": "Buggar",
|
||||
"CPU Utilization": "CPU-användning",
|
||||
"Changelog": "Changelog",
|
||||
"Changelog": "Ändringslogg",
|
||||
"Clean out after": "Rensa efteråt",
|
||||
"Close": "Stäng",
|
||||
"Command": "Kommando",
|
||||
@@ -38,7 +38,7 @@
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 följande bidragande:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 följande medverkande:",
|
||||
"Danger!": "Fara!",
|
||||
"Delete": "Radera",
|
||||
"Delete": "Ta bort",
|
||||
"Deleted": "Borttaget",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Enhet \"{{name}}\" ({{device}} på {{address}}) vill ansluta. Lägg till ny enhet?",
|
||||
"Device ID": "Enhets-ID",
|
||||
@@ -107,7 +107,7 @@
|
||||
"Local State (Total)": "Lokal status (Total)",
|
||||
"Major Upgrade": "Stor uppgradering",
|
||||
"Master": "Huvud",
|
||||
"Maximum Age": "Högsta åldersgräns",
|
||||
"Maximum Age": "Högsta ålder",
|
||||
"Metadata Only": "Endast metadata",
|
||||
"Minimum Free Disk Space": "Minimum ledigt diskutrymme",
|
||||
"Move to top of queue": "Flytta till överst i kön",
|
||||
@@ -125,8 +125,8 @@
|
||||
"Oldest First": "Äldst först",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Valfri beskrivande etikett för katalogen. Kan vara olika på varje enhet.",
|
||||
"Options": "Alternativ",
|
||||
"Out of Sync": "Osynkad",
|
||||
"Out of Sync Items": "Osynkade poster",
|
||||
"Out of Sync": "Osynkroniserad",
|
||||
"Out of Sync Items": "Osynkroniserade poster",
|
||||
"Outgoing Rate Limit (KiB/s)": "Max uppladdningshastighet (KiB/s)",
|
||||
"Override Changes": "Skriv över ändringar",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Sökväg till katalogen på din dator. Kommer att skapas om det inte finns. Tecknet tilde (~) kan användas som en genväg för",
|
||||
@@ -144,7 +144,7 @@
|
||||
"Relay Servers": "Reläservrar",
|
||||
"Relayed via": "Vidarbefordras via",
|
||||
"Relays": "Vidarbefordringar",
|
||||
"Release Notes": "versionsnyheter",
|
||||
"Release Notes": "Versionsanteckningar",
|
||||
"Remote Devices": "Fjärrenheter",
|
||||
"Remove": "Ta bort",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Krävs identifierare för katalogen. Måste vara densamma på alla kluster enheter.",
|
||||
@@ -186,7 +186,7 @@
|
||||
"Support": "Support",
|
||||
"Sync Protocol Listen Addresses": "Address för inkommande anslutningar",
|
||||
"Syncing": "Synkroniserar",
|
||||
"Syncthing has been shut down.": "Syncthing har stängts ner.",
|
||||
"Syncthing has been shut down.": "Syncthing har stängts.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing innehåller följande mjukvarupaket eller delar av dem:",
|
||||
"Syncthing is restarting.": "Syncthing startar om.",
|
||||
"Syncthing is upgrading.": "Syncthing uppgraderas.",
|
||||
@@ -219,7 +219,7 @@
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Frekvensgränsen måste vara ett icke-negativt tal (0: ingen gräns)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Förnyelseintervallet måste vara ett positivt antal sekunder",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "De omprövas automatiskt och kommer att synkroniseras när felet är löst.",
|
||||
"This Device": "Enheten",
|
||||
"This Device": "Denna enhet",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Detta kan lätt ge hackare tillgång till att läsa och ändra några filer på datorn.",
|
||||
"This is a major version upgrade.": "Det här är en stor uppgradering.",
|
||||
"Trash Can File Versioning": "Versionshantering på filer i papperskorgen",
|
||||
|
||||
@@ -1318,7 +1318,7 @@ angular.module('syncthing.core')
|
||||
$scope.editingExisting = false;
|
||||
$scope.folderEditor.$setPristine();
|
||||
$http.get(urlbase + '/svc/random/string?length=10').success(function (data) {
|
||||
$scope.currentFolder.id = data.random.substr(0, 5) + '-' + data.random.substr(5, 5);
|
||||
$scope.currentFolder.id = (data.random.substr(0, 5) + '-' + data.random.substr(5, 5)).toLowerCase();
|
||||
$('#editFolder').modal();
|
||||
});
|
||||
};
|
||||
|
||||
@@ -10,6 +10,7 @@ package config
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
@@ -81,11 +82,15 @@ func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
|
||||
util.SetDefaults(&cfg.Options)
|
||||
util.SetDefaults(&cfg.GUI)
|
||||
|
||||
err := xml.NewDecoder(r).Decode(&cfg)
|
||||
if err := xml.NewDecoder(r).Decode(&cfg); err != nil {
|
||||
return Configuration{}, err
|
||||
}
|
||||
cfg.OriginalVersion = cfg.Version
|
||||
|
||||
cfg.prepare(myID)
|
||||
return cfg, err
|
||||
if err := cfg.prepare(myID); err != nil {
|
||||
return Configuration{}, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
|
||||
@@ -97,14 +102,16 @@ func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
|
||||
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
return Configuration{}, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bs, &cfg)
|
||||
cfg.OriginalVersion = cfg.Version
|
||||
|
||||
cfg.prepare(myID)
|
||||
return cfg, err
|
||||
if err := cfg.prepare(myID); err != nil {
|
||||
return Configuration{}, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
@@ -154,7 +161,7 @@ func (cfg *Configuration) WriteXML(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
func (cfg *Configuration) prepare(myID protocol.DeviceID) error {
|
||||
util.FillNilSlices(&cfg.Options)
|
||||
|
||||
// Initialize any empty slices
|
||||
@@ -168,19 +175,19 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
cfg.Options.AlwaysLocalNets = []string{}
|
||||
}
|
||||
|
||||
// Check for missing, bad or duplicate folder ID:s
|
||||
var seenFolders = map[string]*FolderConfiguration{}
|
||||
// Prepare folders and check for duplicates. Duplicates are bad and
|
||||
// dangerous, can't currently be resolved in the GUI, and shouldn't
|
||||
// happen when configured by the GUI. We return with an error in that
|
||||
// situation.
|
||||
seenFolders := make(map[string]struct{})
|
||||
for i := range cfg.Folders {
|
||||
folder := &cfg.Folders[i]
|
||||
folder.prepare()
|
||||
|
||||
if seen, ok := seenFolders[folder.ID]; ok {
|
||||
l.Warnf("Multiple folders with ID %q; disabling", folder.ID)
|
||||
seen.Invalid = "duplicate folder ID"
|
||||
folder.Invalid = "duplicate folder ID"
|
||||
} else {
|
||||
seenFolders[folder.ID] = folder
|
||||
if _, ok := seenFolders[folder.ID]; ok {
|
||||
return fmt.Errorf("duplicate folder ID %q in configuration", folder.ID)
|
||||
}
|
||||
seenFolders[folder.ID] = struct{}{}
|
||||
}
|
||||
|
||||
cfg.Options.ListenAddresses = util.UniqueStrings(cfg.Options.ListenAddresses)
|
||||
@@ -257,6 +264,8 @@ func (cfg *Configuration) prepare(myID protocol.DeviceID) {
|
||||
if cfg.GUI.APIKey == "" {
|
||||
cfg.GUI.APIKey = rand.String(32)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertV14V15(cfg *Configuration) {
|
||||
|
||||
@@ -595,22 +595,14 @@ func TestGUIConfigURL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDuplicateDevicesFolders(t *testing.T) {
|
||||
wrapper, err := Load("testdata/duplicates.xml", device1)
|
||||
func TestDuplicateDevices(t *testing.T) {
|
||||
// Duplicate devices should be removed
|
||||
|
||||
wrapper, err := Load("testdata/dupdevices.xml", device1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// All folders are loaded, but the duplicate ones are disabled.
|
||||
if l := len(wrapper.Raw().Folders); l != 3 {
|
||||
t.Errorf("Incorrect number of folders, %d != 3", l)
|
||||
}
|
||||
for i, f := range wrapper.Raw().Folders {
|
||||
if f.ID == "f1" && f.Invalid == "" {
|
||||
t.Errorf("Folder %d (%q) is not set invalid", i, f.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if l := len(wrapper.Raw().Devices); l != 3 {
|
||||
t.Errorf("Incorrect number of devices, %d != 3", l)
|
||||
}
|
||||
@@ -621,6 +613,30 @@ func TestRemoveDuplicateDevicesFolders(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuplicateFolders(t *testing.T) {
|
||||
// Duplicate folders are a loading error
|
||||
|
||||
_, err := Load("testdata/dupfolders.xml", device1)
|
||||
if err == nil || !strings.HasPrefix(err.Error(), "duplicate folder ID") {
|
||||
t.Fatal(`Expected error to mention "duplicate folder ID":`, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyFolderPaths(t *testing.T) {
|
||||
// Empty folder paths are allowed at the loading stage, and should not
|
||||
// get messed up by the prepare steps (e.g., become the current dir or
|
||||
// get a slash added so that it becomes the root directory or similar).
|
||||
|
||||
wrapper, err := Load("testdata/nopath.xml", device1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
folder := wrapper.Folders()["f1"]
|
||||
if folder.Path() != "" {
|
||||
t.Errorf("Expected %q to be empty", folder.Path())
|
||||
}
|
||||
}
|
||||
|
||||
func TestV14ListenAddressesMigration(t *testing.T) {
|
||||
tcs := [][3][]string{
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@ type FolderConfiguration struct {
|
||||
DisableSparseFiles bool `xml:"disableSparseFiles" json:"disableSparseFiles"`
|
||||
DisableTempIndexes bool `xml:"disableTempIndexes" json:"disableTempIndexes"`
|
||||
|
||||
Invalid string `xml:"-" json:"invalid"` // Set at runtime when there is an error, not saved
|
||||
cachedPath string
|
||||
|
||||
DeprecatedReadOnly bool `xml:"ro,attr,omitempty" json:"-"`
|
||||
@@ -70,7 +69,7 @@ func (f FolderConfiguration) Path() string {
|
||||
// This is intentionally not a pointer method, because things like
|
||||
// cfg.Folders["default"].Path() should be valid.
|
||||
|
||||
if f.cachedPath == "" {
|
||||
if f.cachedPath == "" && f.RawPath != "" {
|
||||
l.Infoln("bug: uncached path call (should only happen in tests)")
|
||||
return f.cleanedPath()
|
||||
}
|
||||
@@ -108,31 +107,24 @@ func (f *FolderConfiguration) DeviceIDs() []protocol.DeviceID {
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) prepare() {
|
||||
if len(f.RawPath) == 0 {
|
||||
f.Invalid = "no directory configured"
|
||||
return
|
||||
}
|
||||
if f.RawPath != "" {
|
||||
// The reason it's done like this:
|
||||
// C: -> C:\ -> C:\ (issue that this is trying to fix)
|
||||
// C:\somedir -> C:\somedir\ -> C:\somedir
|
||||
// C:\somedir\ -> C:\somedir\\ -> C:\somedir
|
||||
// This way in the tests, we get away without OS specific separators
|
||||
// in the test configs.
|
||||
f.RawPath = filepath.Dir(f.RawPath + string(filepath.Separator))
|
||||
|
||||
// The reason it's done like this:
|
||||
// C: -> C:\ -> C:\ (issue that this is trying to fix)
|
||||
// C:\somedir -> C:\somedir\ -> C:\somedir
|
||||
// C:\somedir\ -> C:\somedir\\ -> C:\somedir
|
||||
// This way in the tests, we get away without OS specific separators
|
||||
// in the test configs.
|
||||
f.RawPath = filepath.Dir(f.RawPath + string(filepath.Separator))
|
||||
|
||||
// If we're not on Windows, we want the path to end with a slash to
|
||||
// penetrate symlinks. On Windows, paths must not end with a slash.
|
||||
if runtime.GOOS != "windows" && f.RawPath[len(f.RawPath)-1] != filepath.Separator {
|
||||
f.RawPath = f.RawPath + string(filepath.Separator)
|
||||
// If we're not on Windows, we want the path to end with a slash to
|
||||
// penetrate symlinks. On Windows, paths must not end with a slash.
|
||||
if runtime.GOOS != "windows" && f.RawPath[len(f.RawPath)-1] != filepath.Separator {
|
||||
f.RawPath = f.RawPath + string(filepath.Separator)
|
||||
}
|
||||
}
|
||||
|
||||
f.cachedPath = f.cleanedPath()
|
||||
|
||||
if f.ID == "" {
|
||||
f.ID = "default"
|
||||
}
|
||||
|
||||
if f.RescanIntervalS > MaxRescanIntervalS {
|
||||
f.RescanIntervalS = MaxRescanIntervalS
|
||||
} else if f.RescanIntervalS < 0 {
|
||||
@@ -145,6 +137,10 @@ func (f *FolderConfiguration) prepare() {
|
||||
}
|
||||
|
||||
func (f *FolderConfiguration) cleanedPath() string {
|
||||
if f.RawPath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
cleaned := f.RawPath
|
||||
|
||||
// Attempt tilde expansion; leave unchanged in case of error
|
||||
|
||||
@@ -15,15 +15,6 @@
|
||||
<!-- duplicate, will be removed -->
|
||||
<address>192.0.2.5</address>
|
||||
</device>
|
||||
<folder id="f1" directory="testdata/">
|
||||
<!-- duplicate, will be disabled -->
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
|
||||
<device id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA"></device>
|
||||
</folder>
|
||||
<folder id="f1" directory="testdata/">
|
||||
<!-- duplicate, will be disabled -->
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
|
||||
</folder>
|
||||
<folder id="f2" directory="testdata/">
|
||||
<device id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></device>
|
||||
<device id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA"></device>
|
||||
6
lib/config/testdata/dupfolders.xml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
<configuration version="15">
|
||||
<folder id="f1" directory="testdata/">
|
||||
</folder>
|
||||
<folder id="f1" directory="testdata/">
|
||||
</folder>
|
||||
</configuration>
|
||||
4
lib/config/testdata/nopath.xml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
<configuration version="15">
|
||||
<folder id="f1">
|
||||
</folder>
|
||||
</configuration>
|
||||
@@ -52,7 +52,7 @@ func (d *relayDialer) Dial(id protocol.DeviceID, uri *url.URL) (IntermediateConn
|
||||
tc = tls.Client(conn, d.tlsCfg)
|
||||
}
|
||||
|
||||
err = tc.Handshake()
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
tc.Close()
|
||||
return IntermediateConnection{}, err
|
||||
|
||||
@@ -85,7 +85,7 @@ func (t *relayListener) Serve() {
|
||||
tc = tls.Client(conn, t.tlsCfg)
|
||||
}
|
||||
|
||||
err = tc.Handshake()
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
tc.Close()
|
||||
l.Infoln("TLS handshake (BEP/relay):", err)
|
||||
|
||||
@@ -36,7 +36,10 @@ var (
|
||||
listeners = make(map[string]listenerFactory, 0)
|
||||
)
|
||||
|
||||
const perDeviceWarningRate = 1.0 / (15 * 60) // Once per 15 minutes
|
||||
const (
|
||||
perDeviceWarningRate = 1.0 / (15 * 60) // Once per 15 minutes
|
||||
tlsHandshakeTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Service listens and dials all configured unconnected devices, via supported
|
||||
// dialers. Successful connections are handed to the model.
|
||||
@@ -347,7 +350,7 @@ func (s *Service) connect() {
|
||||
}
|
||||
|
||||
if connected && dialerFactory.Priority() >= ct.Priority {
|
||||
l.Debugf("Not dialing using %s as priorty is less than current connection (%d >= %d)", dialerFactory, dialerFactory.Priority(), ct.Priority)
|
||||
l.Debugf("Not dialing using %s as priority is less than current connection (%d >= %d)", dialerFactory, dialerFactory.Priority(), ct.Priority)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -607,3 +610,9 @@ func warningFor(dev protocol.DeviceID, msg string) {
|
||||
l.Warnln(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func tlsTimedHandshake(tc *tls.Conn) error {
|
||||
tc.SetDeadline(time.Now().Add(tlsHandshakeTimeout))
|
||||
defer tc.SetDeadline(time.Time{})
|
||||
return tc.Handshake()
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func (d *tcpDialer) Dial(id protocol.DeviceID, uri *url.URL) (IntermediateConnec
|
||||
}
|
||||
|
||||
tc := tls.Client(conn, d.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
tc.Close()
|
||||
return IntermediateConnection{}, err
|
||||
|
||||
@@ -108,7 +108,7 @@ func (t *tcpListener) Serve() {
|
||||
}
|
||||
|
||||
tc := tls.Server(conn, t.tlsCfg)
|
||||
err = tc.Handshake()
|
||||
err = tlsTimedHandshake(tc)
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake (BEP/tcp):", err)
|
||||
tc.Close()
|
||||
|
||||
@@ -111,16 +111,20 @@ func (t EventType) MarshalText() ([]byte, error) {
|
||||
const BufferSize = 64
|
||||
|
||||
type Logger struct {
|
||||
subs []*Subscription
|
||||
nextID int
|
||||
mutex sync.Mutex
|
||||
subs []*Subscription
|
||||
nextSubscriptionIDs []int
|
||||
nextGlobalID int
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
ID int `json:"id"`
|
||||
Time time.Time `json:"time"`
|
||||
Type EventType `json:"type"`
|
||||
Data interface{} `json:"data"`
|
||||
// Per-subscription sequential event ID. Named "id" for backwards compatibility with the REST API
|
||||
SubscriptionID int `json:"id"`
|
||||
// Global ID of the event across all subscriptions
|
||||
GlobalID int `json:"globalID"`
|
||||
Time time.Time `json:"time"`
|
||||
Type EventType `json:"type"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
type Subscription struct {
|
||||
@@ -144,16 +148,21 @@ func NewLogger() *Logger {
|
||||
|
||||
func (l *Logger) Log(t EventType, data interface{}) {
|
||||
l.mutex.Lock()
|
||||
dl.Debugln("log", l.nextID, t, data)
|
||||
l.nextID++
|
||||
dl.Debugln("log", l.nextGlobalID, t, data)
|
||||
l.nextGlobalID++
|
||||
|
||||
e := Event{
|
||||
ID: l.nextID,
|
||||
Time: time.Now(),
|
||||
Type: t,
|
||||
Data: data,
|
||||
GlobalID: l.nextGlobalID,
|
||||
Time: time.Now(),
|
||||
Type: t,
|
||||
Data: data,
|
||||
}
|
||||
for _, s := range l.subs {
|
||||
|
||||
for i, s := range l.subs {
|
||||
if s.mask&t != 0 {
|
||||
e.SubscriptionID = l.nextSubscriptionIDs[i]
|
||||
l.nextSubscriptionIDs[i]++
|
||||
|
||||
select {
|
||||
case s.events <- e:
|
||||
default:
|
||||
@@ -182,6 +191,7 @@ func (l *Logger) Subscribe(mask EventType) *Subscription {
|
||||
}
|
||||
|
||||
l.subs = append(l.subs, s)
|
||||
l.nextSubscriptionIDs = append(l.nextSubscriptionIDs, 1)
|
||||
l.mutex.Unlock()
|
||||
return s
|
||||
}
|
||||
@@ -192,9 +202,15 @@ func (l *Logger) Unsubscribe(s *Subscription) {
|
||||
for i, ss := range l.subs {
|
||||
if s == ss {
|
||||
last := len(l.subs) - 1
|
||||
|
||||
l.subs[i] = l.subs[last]
|
||||
l.subs[last] = nil
|
||||
l.subs = l.subs[:last]
|
||||
|
||||
l.nextSubscriptionIDs[i] = l.nextSubscriptionIDs[last]
|
||||
l.nextSubscriptionIDs[last] = 0
|
||||
l.nextSubscriptionIDs = l.nextSubscriptionIDs[:last]
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -234,7 +250,7 @@ type bufferedSubscription struct {
|
||||
sub *Subscription
|
||||
buf []Event
|
||||
next int
|
||||
cur int
|
||||
cur int // Current SubscriptionID
|
||||
mut sync.Mutex
|
||||
cond *stdsync.Cond
|
||||
}
|
||||
@@ -270,7 +286,7 @@ func (s *bufferedSubscription) pollingLoop() {
|
||||
s.mut.Lock()
|
||||
s.buf[s.next] = ev
|
||||
s.next = (s.next + 1) % len(s.buf)
|
||||
s.cur = ev.ID
|
||||
s.cur = ev.SubscriptionID
|
||||
s.cond.Broadcast()
|
||||
s.mut.Unlock()
|
||||
}
|
||||
@@ -285,12 +301,12 @@ func (s *bufferedSubscription) Since(id int, into []Event) []Event {
|
||||
}
|
||||
|
||||
for i := s.next; i < len(s.buf); i++ {
|
||||
if s.buf[i].ID > id {
|
||||
if s.buf[i].SubscriptionID > id {
|
||||
into = append(into, s.buf[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < s.next; i++ {
|
||||
if s.buf[i].ID > id {
|
||||
if s.buf[i].SubscriptionID > id {
|
||||
into = append(into, s.buf[i])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestUnsubscribe(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDs(t *testing.T) {
|
||||
func TestGlobalIDs(t *testing.T) {
|
||||
l := events.NewLogger()
|
||||
|
||||
s := l.Subscribe(events.AllEvents)
|
||||
@@ -144,7 +144,7 @@ func TestIDs(t *testing.T) {
|
||||
if ev.Data.(string) != "foo" {
|
||||
t.Fatal("Incorrect event:", ev)
|
||||
}
|
||||
id := ev.ID
|
||||
id := ev.GlobalID
|
||||
|
||||
ev, err = s.Poll(timeout)
|
||||
if err != nil {
|
||||
@@ -153,8 +153,48 @@ func TestIDs(t *testing.T) {
|
||||
if ev.Data.(string) != "bar" {
|
||||
t.Fatal("Incorrect event:", ev)
|
||||
}
|
||||
if ev.ID != id+1 {
|
||||
t.Fatalf("ID not incremented (%d != %d)", ev.ID, id+1)
|
||||
if ev.GlobalID != id+1 {
|
||||
t.Fatalf("ID not incremented (%d != %d)", ev.GlobalID, id+1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionIDs(t *testing.T) {
|
||||
l := events.NewLogger()
|
||||
|
||||
s := l.Subscribe(events.DeviceConnected)
|
||||
defer l.Unsubscribe(s)
|
||||
|
||||
l.Log(events.DeviceDisconnected, "a")
|
||||
l.Log(events.DeviceConnected, "b")
|
||||
l.Log(events.DeviceConnected, "c")
|
||||
l.Log(events.DeviceDisconnected, "d")
|
||||
|
||||
ev, err := s.Poll(timeout)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
if ev.GlobalID != 2 {
|
||||
t.Fatal("Incorrect GlobalID:", ev.GlobalID)
|
||||
}
|
||||
if ev.SubscriptionID != 1 {
|
||||
t.Fatal("Incorrect SubscriptionID:", ev.SubscriptionID)
|
||||
}
|
||||
|
||||
ev, err = s.Poll(timeout)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
if ev.GlobalID != 3 {
|
||||
t.Fatal("Incorrect GlobalID:", ev.GlobalID)
|
||||
}
|
||||
if ev.SubscriptionID != 2 {
|
||||
t.Fatal("Incorrect SubscriptionID:", ev.SubscriptionID)
|
||||
}
|
||||
|
||||
ev, err = s.Poll(timeout)
|
||||
if err != events.ErrTimeout {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,11 +219,92 @@ func TestBufferedSub(t *testing.T) {
|
||||
for recv < 10*events.BufferSize {
|
||||
evs := bs.Since(recv, nil)
|
||||
for _, ev := range evs {
|
||||
if ev.ID != recv+1 {
|
||||
t.Fatalf("Incorrect ID; %d != %d", ev.ID, recv+1)
|
||||
if ev.GlobalID != recv+1 {
|
||||
t.Fatalf("Incorrect ID; %d != %d", ev.GlobalID, recv+1)
|
||||
}
|
||||
recv = ev.ID
|
||||
recv = ev.GlobalID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBufferedSub(b *testing.B) {
|
||||
l := events.NewLogger()
|
||||
|
||||
s := l.Subscribe(events.AllEvents)
|
||||
defer l.Unsubscribe(s)
|
||||
bufferSize := events.BufferSize
|
||||
bs := events.NewBufferedSubscription(s, bufferSize)
|
||||
|
||||
// The coord channel paces the sender according to the receiver,
|
||||
// ensuring that no events are dropped. The benchmark measures sending +
|
||||
// receiving + synchronization overhead.
|
||||
|
||||
coord := make(chan struct{}, bufferSize)
|
||||
for i := 0; i < bufferSize-1; i++ {
|
||||
coord <- struct{}{}
|
||||
}
|
||||
|
||||
// Receive the events
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
recv := 0
|
||||
var evs []events.Event
|
||||
for i := 0; i < b.N; {
|
||||
evs = bs.Since(recv, evs[:0])
|
||||
for _, ev := range evs {
|
||||
if ev.GlobalID != recv+1 {
|
||||
b.Fatal("skipped event", ev.GlobalID, recv)
|
||||
}
|
||||
recv = ev.GlobalID
|
||||
coord <- struct{}{}
|
||||
}
|
||||
i += len(evs)
|
||||
}
|
||||
}()
|
||||
|
||||
// Send the events
|
||||
eventData := map[string]string{
|
||||
"foo": "bar",
|
||||
"other": "data",
|
||||
"and": "something else",
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.Log(events.DeviceConnected, eventData)
|
||||
<-coord
|
||||
}
|
||||
|
||||
<-done
|
||||
b.ReportAllocs()
|
||||
}
|
||||
|
||||
func TestSinceUsesSubscriptionId(t *testing.T) {
|
||||
l := events.NewLogger()
|
||||
|
||||
s := l.Subscribe(events.DeviceConnected)
|
||||
defer l.Unsubscribe(s)
|
||||
bs := events.NewBufferedSubscription(s, 10*events.BufferSize)
|
||||
|
||||
l.Log(events.DeviceConnected, "a") // SubscriptionID = 1
|
||||
l.Log(events.DeviceDisconnected, "b")
|
||||
l.Log(events.DeviceDisconnected, "c")
|
||||
l.Log(events.DeviceConnected, "d") // SubscriptionID = 2
|
||||
|
||||
// We need to loop for the events, as they may not all have been
|
||||
// delivered to the buffered subscription when we get here.
|
||||
t0 := time.Now()
|
||||
for time.Since(t0) < time.Second {
|
||||
events := bs.Since(0, nil)
|
||||
if len(events) == 2 {
|
||||
break
|
||||
}
|
||||
if len(events) > 2 {
|
||||
t.Fatal("Incorrect number of events:", len(events))
|
||||
}
|
||||
}
|
||||
|
||||
events := bs.Since(1, nil)
|
||||
if len(events) != 1 {
|
||||
t.Fatal("Incorrect number of events:", len(events))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import "time"
|
||||
|
||||
type folder struct {
|
||||
stateTracker
|
||||
scan folderscan
|
||||
scan folderScanner
|
||||
model *Model
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
type rescanRequest struct {
|
||||
subdirs []string
|
||||
err chan error
|
||||
}
|
||||
|
||||
// bundle all folder scan activity
|
||||
type folderscan struct {
|
||||
interval time.Duration
|
||||
timer *time.Timer
|
||||
now chan rescanRequest
|
||||
delay chan time.Duration
|
||||
}
|
||||
|
||||
func (s *folderscan) reschedule() {
|
||||
if s.interval == 0 {
|
||||
return
|
||||
}
|
||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||
sleepNanos := (s.interval.Nanoseconds()*3 + rand.Int63n(2*s.interval.Nanoseconds())) / 4
|
||||
interval := time.Duration(sleepNanos) * time.Nanosecond
|
||||
l.Debugln(s, "next rescan in", interval)
|
||||
s.timer.Reset(interval)
|
||||
}
|
||||
|
||||
func (s *folderscan) Scan(subdirs []string) error {
|
||||
req := rescanRequest{
|
||||
subdirs: subdirs,
|
||||
err: make(chan error),
|
||||
}
|
||||
s.now <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (s *folderscan) Delay(next time.Duration) {
|
||||
s.delay <- next
|
||||
}
|
||||
63
lib/model/folderscanner.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
type rescanRequest struct {
|
||||
subdirs []string
|
||||
err chan error
|
||||
}
|
||||
|
||||
// bundle all folder scan activity
|
||||
type folderScanner struct {
|
||||
interval time.Duration
|
||||
timer *time.Timer
|
||||
now chan rescanRequest
|
||||
delay chan time.Duration
|
||||
}
|
||||
|
||||
func newFolderScanner(config config.FolderConfiguration) folderScanner {
|
||||
return folderScanner{
|
||||
interval: time.Duration(config.RescanIntervalS) * time.Second,
|
||||
timer: time.NewTimer(time.Millisecond), // The first scan should be done immediately.
|
||||
now: make(chan rescanRequest),
|
||||
delay: make(chan time.Duration),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *folderScanner) Reschedule() {
|
||||
if f.interval == 0 {
|
||||
return
|
||||
}
|
||||
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
||||
sleepNanos := (f.interval.Nanoseconds()*3 + rand.Int63n(2*f.interval.Nanoseconds())) / 4
|
||||
interval := time.Duration(sleepNanos) * time.Nanosecond
|
||||
l.Debugln(f, "next rescan in", interval)
|
||||
f.timer.Reset(interval)
|
||||
}
|
||||
|
||||
func (f *folderScanner) Scan(subdirs []string) error {
|
||||
req := rescanRequest{
|
||||
subdirs: subdirs,
|
||||
err: make(chan error),
|
||||
}
|
||||
f.now <- req
|
||||
return <-req.err
|
||||
}
|
||||
|
||||
func (f *folderScanner) Delay(next time.Duration) {
|
||||
f.delay <- next
|
||||
}
|
||||
|
||||
func (f *folderScanner) HasNoInterval() bool {
|
||||
return f.interval == 0
|
||||
}
|
||||
@@ -46,6 +46,13 @@ type stateTracker struct {
|
||||
changed time.Time
|
||||
}
|
||||
|
||||
func newStateTracker(id string) stateTracker {
|
||||
return stateTracker{
|
||||
folderID: id,
|
||||
mut: sync.NewMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
// setState sets the new folder state, for states other than FolderError.
|
||||
func (s *stateTracker) setState(newState folderState) {
|
||||
if newState == FolderError {
|
||||
|
||||
@@ -47,18 +47,18 @@ const (
|
||||
)
|
||||
|
||||
type service interface {
|
||||
Serve()
|
||||
Stop()
|
||||
Jobs() ([]string, []string) // In progress, Queued
|
||||
BringToFront(string)
|
||||
DelayScan(d time.Duration)
|
||||
IndexUpdated() // Remote index was updated notification
|
||||
IndexUpdated() // Remote index was updated notification
|
||||
Jobs() ([]string, []string) // In progress, Queued
|
||||
Scan(subs []string) error
|
||||
Serve()
|
||||
Stop()
|
||||
|
||||
setState(state folderState)
|
||||
setError(err error)
|
||||
clearError()
|
||||
getState() (folderState, time.Time, error)
|
||||
setState(state folderState)
|
||||
clearError()
|
||||
setError(err error)
|
||||
}
|
||||
|
||||
type Availability struct {
|
||||
@@ -110,6 +110,7 @@ var (
|
||||
|
||||
// errors returned by the CheckFolderHealth method
|
||||
var (
|
||||
errFolderPathEmpty = errors.New("folder path empty")
|
||||
errFolderPathMissing = errors.New("folder path missing")
|
||||
errFolderMarkerMissing = errors.New("folder marker missing")
|
||||
errHomeDiskNoSpace = errors.New("home disk has insufficient free space")
|
||||
@@ -574,7 +575,7 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
|
||||
}
|
||||
|
||||
if !ok {
|
||||
l.Fatalf("Index for nonexistant folder %q", folder)
|
||||
l.Fatalf("Index for nonexistent folder %q", folder)
|
||||
}
|
||||
|
||||
m.pmut.RLock()
|
||||
@@ -615,7 +616,7 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
m.fmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
l.Fatalf("IndexUpdate for nonexistant folder %q", folder)
|
||||
l.Fatalf("IndexUpdate for nonexistent folder %q", folder)
|
||||
}
|
||||
|
||||
m.pmut.RLock()
|
||||
@@ -658,23 +659,19 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
|
||||
|
||||
tempIndexFolders := make([]string, 0, len(cm.Folders))
|
||||
|
||||
m.fmut.Lock()
|
||||
nextFolder:
|
||||
for _, folder := range cm.Folders {
|
||||
cfg := m.folderCfgs[folder.ID]
|
||||
|
||||
if folder.Flags&^protocol.FlagFolderAll != 0 {
|
||||
// There are flags set that we don't know what they mean. Scary!
|
||||
// There are flags set that we don't know what they mean. Fatal!
|
||||
l.Warnf("Device %v: unknown flags for folder %s", deviceID, folder.ID)
|
||||
cfg.Invalid = fmt.Sprintf("Unknown flags from device %v", deviceID)
|
||||
m.cfg.SetFolder(cfg)
|
||||
if srv := m.folderRunners[folder.ID]; srv != nil {
|
||||
srv.setError(fmt.Errorf(cfg.Invalid))
|
||||
}
|
||||
continue nextFolder
|
||||
m.fmut.Unlock()
|
||||
m.Close(deviceID, fmt.Errorf("Unknown folder flags from device %v", deviceID))
|
||||
return
|
||||
}
|
||||
|
||||
if !m.folderSharedWithUnlocked(folder.ID, deviceID) {
|
||||
m.fmut.Lock()
|
||||
shared := m.folderSharedWithUnlocked(folder.ID, deviceID)
|
||||
m.fmut.Unlock()
|
||||
if !shared {
|
||||
events.Default.Log(events.FolderRejected, map[string]string{
|
||||
"folder": folder.ID,
|
||||
"folderLabel": folder.Label,
|
||||
@@ -687,7 +684,6 @@ nextFolder:
|
||||
tempIndexFolders = append(tempIndexFolders, folder.ID)
|
||||
}
|
||||
}
|
||||
m.fmut.Unlock()
|
||||
|
||||
// This breaks if we send multiple CM messages during the same connection.
|
||||
if len(tempIndexFolders) > 0 {
|
||||
@@ -1431,10 +1427,10 @@ func (m *Model) ScanFolders() map[string]error {
|
||||
}
|
||||
|
||||
func (m *Model) ScanFolder(folder string) error {
|
||||
return m.ScanFolderSubs(folder, nil)
|
||||
return m.ScanFolderSubdirs(folder, nil)
|
||||
}
|
||||
|
||||
func (m *Model) ScanFolderSubs(folder string, subs []string) error {
|
||||
func (m *Model) ScanFolderSubdirs(folder string, subs []string) error {
|
||||
m.fmut.Lock()
|
||||
runner, ok := m.folderRunners[folder]
|
||||
m.fmut.Unlock()
|
||||
@@ -1449,13 +1445,13 @@ func (m *Model) ScanFolderSubs(folder string, subs []string) error {
|
||||
return runner.Scan(subs)
|
||||
}
|
||||
|
||||
func (m *Model) internalScanFolderSubdirs(folder string, subs []string) error {
|
||||
for i, sub := range subs {
|
||||
func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error {
|
||||
for i, sub := range subDirs {
|
||||
sub = osutil.NativeFilename(sub)
|
||||
if p := filepath.Clean(filepath.Join(folder, sub)); !strings.HasPrefix(p, folder) {
|
||||
return errors.New("invalid subpath")
|
||||
}
|
||||
subs[i] = sub
|
||||
subDirs[i] = sub
|
||||
}
|
||||
|
||||
m.fmut.Lock()
|
||||
@@ -1488,7 +1484,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subs []string) error {
|
||||
// Clean the list of subitems to ensure that we start at a known
|
||||
// directory, and don't scan subdirectories of things we've already
|
||||
// scanned.
|
||||
subs = unifySubs(subs, func(f string) bool {
|
||||
subDirs = unifySubs(subDirs, func(f string) bool {
|
||||
_, ok := fs.Get(protocol.LocalDeviceID, f)
|
||||
return ok
|
||||
})
|
||||
@@ -1503,7 +1499,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subs []string) error {
|
||||
fchan, err := scanner.Walk(scanner.Config{
|
||||
Folder: folderCfg.ID,
|
||||
Dir: folderCfg.Path(),
|
||||
Subs: subs,
|
||||
Subs: subDirs,
|
||||
Matcher: ignores,
|
||||
BlockSize: protocol.BlockSize,
|
||||
TempNamer: defTempNamer,
|
||||
@@ -1556,15 +1552,15 @@ func (m *Model) internalScanFolderSubdirs(folder string, subs []string) error {
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
}
|
||||
|
||||
if len(subs) == 0 {
|
||||
if len(subDirs) == 0 {
|
||||
// If we have no specific subdirectories to traverse, set it to one
|
||||
// empty prefix so we traverse the entire folder contents once.
|
||||
subs = []string{""}
|
||||
subDirs = []string{""}
|
||||
}
|
||||
|
||||
// Do a scan of the database for each prefix, to check for deleted files.
|
||||
batch = batch[:0]
|
||||
for _, sub := range subs {
|
||||
for _, sub := range subDirs {
|
||||
var iterError error
|
||||
|
||||
fs.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
|
||||
@@ -1953,6 +1949,10 @@ func (m *Model) CheckFolderHealth(id string) error {
|
||||
|
||||
// checkFolderPath returns nil if the folder path exists and has the marker file.
|
||||
func (m *Model) checkFolderPath(folder config.FolderConfiguration) error {
|
||||
if folder.Path() == "" {
|
||||
return errFolderPathEmpty
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(folder.Path()); err != nil || !fi.IsDir() {
|
||||
return errFolderPathMissing
|
||||
}
|
||||
|
||||
@@ -642,9 +642,6 @@ func TestROScanRecovery(t *testing.T) {
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
for {
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %s", status, m.cfg.Folders()["default"].Invalid)
|
||||
}
|
||||
_, _, err := m.State("default")
|
||||
if err == nil && status == "" {
|
||||
return nil
|
||||
@@ -652,6 +649,10 @@ func TestROScanRecovery(t *testing.T) {
|
||||
if err != nil && err.Error() == status {
|
||||
return nil
|
||||
}
|
||||
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %v", status, err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@@ -727,9 +728,6 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
for {
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %s", status, m.cfg.Folders()["default"].Invalid)
|
||||
}
|
||||
_, _, err := m.State("default")
|
||||
if err == nil && status == "" {
|
||||
return nil
|
||||
@@ -737,6 +735,10 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
if err != nil && err.Error() == status {
|
||||
return nil
|
||||
}
|
||||
|
||||
if time.Now().After(timeout) {
|
||||
return fmt.Errorf("Timed out waiting for status: %s, current status: %v", status, err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
@@ -1402,7 +1404,7 @@ func TestIssue3028(t *testing.T) {
|
||||
|
||||
os.Remove("testdata/testrm")
|
||||
os.Remove("testdata/testrm2")
|
||||
m.ScanFolderSubs("default", []string{"testrm", "testrm2"})
|
||||
m.ScanFolderSubdirs("default", []string{"testrm", "testrm2"})
|
||||
|
||||
// Verify that the number of files decreased by two and the number of
|
||||
// deleted files increases by two
|
||||
|
||||
@@ -8,10 +8,8 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/versioner"
|
||||
)
|
||||
|
||||
@@ -23,21 +21,13 @@ type roFolder struct {
|
||||
folder
|
||||
}
|
||||
|
||||
func newROFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner) service {
|
||||
func newROFolder(model *Model, config config.FolderConfiguration, ver versioner.Versioner) service {
|
||||
return &roFolder{
|
||||
folder: folder{
|
||||
stateTracker: stateTracker{
|
||||
folderID: cfg.ID,
|
||||
mut: sync.NewMutex(),
|
||||
},
|
||||
scan: folderscan{
|
||||
interval: time.Duration(cfg.RescanIntervalS) * time.Second,
|
||||
timer: time.NewTimer(time.Millisecond),
|
||||
now: make(chan rescanRequest),
|
||||
delay: make(chan time.Duration),
|
||||
},
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
stateTracker: newStateTracker(config.ID),
|
||||
scan: newFolderScanner(config),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -59,7 +49,7 @@ func (f *roFolder) Serve() {
|
||||
case <-f.scan.timer.C:
|
||||
if err := f.model.CheckFolderHealth(f.folderID); err != nil {
|
||||
l.Infoln("Skipping folder", f.folderID, "scan due to folder error:", err)
|
||||
f.scan.reschedule()
|
||||
f.scan.Reschedule()
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -71,7 +61,7 @@ func (f *roFolder) Serve() {
|
||||
// the same one as returned by CheckFolderHealth, though
|
||||
// duplicate set is handled by setError.
|
||||
f.setError(err)
|
||||
f.scan.reschedule()
|
||||
f.scan.Reschedule()
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -80,11 +70,11 @@ func (f *roFolder) Serve() {
|
||||
initialScanCompleted = true
|
||||
}
|
||||
|
||||
if f.scan.interval == 0 {
|
||||
if f.scan.HasNoInterval() {
|
||||
continue
|
||||
}
|
||||
|
||||
f.scan.reschedule()
|
||||
f.scan.Reschedule()
|
||||
|
||||
case req := <-f.scan.now:
|
||||
req.err <- f.scanSubdirsIfHealthy(req.subdirs)
|
||||
|
||||
@@ -107,18 +107,10 @@ type rwFolder struct {
|
||||
func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner) service {
|
||||
f := &rwFolder{
|
||||
folder: folder{
|
||||
stateTracker: stateTracker{
|
||||
folderID: cfg.ID,
|
||||
mut: sync.NewMutex(),
|
||||
},
|
||||
scan: folderscan{
|
||||
interval: time.Duration(cfg.RescanIntervalS) * time.Second,
|
||||
timer: time.NewTimer(time.Millisecond), // The first scan should be done immediately.
|
||||
now: make(chan rescanRequest),
|
||||
delay: make(chan time.Duration),
|
||||
},
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
stateTracker: newStateTracker(cfg.ID),
|
||||
scan: newFolderScanner(cfg),
|
||||
stop: make(chan struct{}),
|
||||
model: model,
|
||||
},
|
||||
|
||||
virtualMtimeRepo: db.NewVirtualMtimeRepo(model.db, cfg.ID),
|
||||
@@ -297,7 +289,7 @@ func (f *rwFolder) Serve() {
|
||||
// same time.
|
||||
case <-f.scan.timer.C:
|
||||
err := f.scanSubdirsIfHealthy(nil)
|
||||
f.scan.reschedule()
|
||||
f.scan.Reschedule()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -69,10 +69,8 @@ func setUpModel(file protocol.FileInfo) *Model {
|
||||
func setUpRwFolder(model *Model) rwFolder {
|
||||
return rwFolder{
|
||||
folder: folder{
|
||||
stateTracker: stateTracker{
|
||||
folderID: "default",
|
||||
},
|
||||
model: model,
|
||||
stateTracker: newStateTracker("default"),
|
||||
model: model,
|
||||
},
|
||||
dir: "testdata",
|
||||
queue: newJobQueue(),
|
||||
|
||||
@@ -20,6 +20,11 @@ type tempNamer struct {
|
||||
|
||||
var defTempNamer tempNamer
|
||||
|
||||
// Real filesystems usually handle 255 bytes. encfs has varying and
|
||||
// confusing file name limits. We take a safe way out and switch to hashing
|
||||
// quite early.
|
||||
const maxFilenameLength = 160 - len(".syncthing.") - len(".tmp")
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
defTempNamer = tempNamer{"~syncthing~"}
|
||||
@@ -35,7 +40,7 @@ func (t tempNamer) IsTemporary(name string) bool {
|
||||
func (t tempNamer) TempName(name string) string {
|
||||
tdir := filepath.Dir(name)
|
||||
tbase := filepath.Base(name)
|
||||
if len(tbase) > 240 {
|
||||
if len(tbase) > maxFilenameLength {
|
||||
hash := md5.New()
|
||||
hash.Write([]byte(name))
|
||||
tbase = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
|
||||
3814
lib/upgrade/testdata/github-releases.json
vendored
@@ -9,8 +9,6 @@
|
||||
package upgrade
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
@@ -59,35 +57,6 @@ func TestCompareVersions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGithubRelease(t *testing.T) {
|
||||
var upgrades = map[string]string{
|
||||
"v0.10.21": "v0.10.30",
|
||||
"v0.10.29": "v0.10.30",
|
||||
"v0.10.0-alpha": "v0.10.30",
|
||||
"v0.10.0-beta": "v0.10.30",
|
||||
"v0.11.0-beta0+40-g53cb66e-dirty": "v0.11.0-beta0",
|
||||
}
|
||||
|
||||
fd, err := os.Open("testdata/github-releases.json")
|
||||
if err != nil {
|
||||
t.Errorf("Missing github-release test data")
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
var rels []Release
|
||||
json.NewDecoder(fd).Decode(&rels)
|
||||
|
||||
for old, target := range upgrades {
|
||||
upgrade, err := SelectLatestRelease(old, rels)
|
||||
if err != nil {
|
||||
t.Error("Error retrieving latest version", err)
|
||||
}
|
||||
if upgrade.Tag != target {
|
||||
t.Errorf("Invalid upgrade release: %v -> %v, but got %v", old, target, upgrade.Tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorRelease(t *testing.T) {
|
||||
_, err := SelectLatestRelease("v0.11.0-beta", nil)
|
||||
if err == nil {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
@@ -190,8 +190,9 @@ element:
|
||||
The folder ID, must be unique. (mandatory)
|
||||
.TP
|
||||
.B label
|
||||
The label of a folder is a human readable and descriptive local name.
|
||||
Can be different on each device. (optional)
|
||||
The label of a folder is a human readable and descriptive local name. May
|
||||
be different on each device, empty, and/or identical to other folder
|
||||
labels. (optional)
|
||||
.TP
|
||||
.B path
|
||||
The path to the directory where the folder is stored on this
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
@@ -34,7 +34,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
|
||||
.sp
|
||||
Every device is identified by a device ID. The device ID is used for address
|
||||
resolution, authentication and authorization. The term "device ID" could
|
||||
interchangably have been "key ID" since the device ID is a direct property of
|
||||
interchangeably have been "key ID" since the device ID is a direct property of
|
||||
the public key in use.
|
||||
.SH KEYS
|
||||
.sp
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-NETWORKING" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-NETWORKING" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-networking \- Firewall Setup
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-RELAY" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-RELAY" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-relay \- Relay Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-REST-API" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-REST-API" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-rest-api \- REST API
|
||||
.
|
||||
@@ -419,13 +419,6 @@ Returns a \fB{"ping": "pong"}\fP object.
|
||||
.SS POST /rest/system/ping
|
||||
.sp
|
||||
Returns a \fB{"ping": "pong"}\fP object.
|
||||
.sp
|
||||
\fBNOTE:\fP
|
||||
.INDENT 0.0
|
||||
.INDENT 3.5
|
||||
Due to being a POST request, this method requires using an API key or CSRF token, as opposed to the GET request to the same URL.
|
||||
.UNINDENT
|
||||
.UNINDENT
|
||||
.SS POST /rest/system/reset
|
||||
.sp
|
||||
Post with empty body to erase the current index database and restart
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-SECURITY" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-SECURITY" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-security \- Security Principles
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-STIGNORE" "5" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING-STIGNORE" "5" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-stignore \- Prevent files from being synchronized to other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "TODO" "7" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "TODO" "7" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
Todo \- Keep automatic backups of deleted files by other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING" "1" "June 26, 2016" "v0.12" "Syncthing"
|
||||
.TH "SYNCTHING" "1" "July 02, 2016" "v0.13" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing \- Syncthing
|
||||
.
|
||||
|
||||
@@ -15,12 +15,10 @@ import (
|
||||
"flag"
|
||||
"go/format"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
var tpl = template.Must(template.New("assets").Parse(`package auto
|
||||
@@ -29,10 +27,6 @@ import (
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
const (
|
||||
AssetsBuildDate = "{{.BuildDate}}"
|
||||
)
|
||||
|
||||
func Assets() map[string][]byte {
|
||||
var assets = make(map[string][]byte, {{.Assets | len}})
|
||||
{{range $asset := .Assets}}
|
||||
@@ -86,7 +80,6 @@ func walkerFor(basePath string) filepath.WalkFunc {
|
||||
|
||||
type templateVars struct {
|
||||
Assets []asset
|
||||
BuildDate string
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -96,7 +89,6 @@ func main() {
|
||||
var buf bytes.Buffer
|
||||
tpl.Execute(&buf, templateVars{
|
||||
Assets: assets,
|
||||
BuildDate: time.Now().UTC().Format(http.TimeFormat),
|
||||
})
|
||||
bs, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
|
||||