Compare commits

..

61 Commits

Author SHA1 Message Date
Jakob Borg
77fe8449ba Test script for REST interface 2014-06-22 18:18:21 +02:00
Jakob Borg
33e9a35f08 Don't deadlock on connect close while sending Index (fixes #386) 2014-06-22 08:17:58 +02:00
Jakob Borg
4ab4816556 Detect deadlock in model and panic 2014-06-21 12:35:53 +02:00
Jakob Borg
8e8a579bb2 Asset update for previous commit 2014-06-20 11:40:38 +02:00
Jakob Borg
efbdf72d20 Lower CPU usage at idle by reducing db polling 2014-06-20 00:28:45 +02:00
Jakob Borg
0e59b5678a Further clarify message ordering requirements (ref #377) 2014-06-19 01:59:58 +02:00
Jakob Borg
de75550415 Clarify requirements on config messages (ref #377) 2014-06-19 01:27:03 +02:00
Jakob Borg
4dbce32738 Simplify memory handling 2014-06-19 01:02:32 +02:00
Jakob Borg
b05fcbc9d7 Simplify usage reporting config options (fixes #370) 2014-06-18 12:54:30 +02:00
Jakob Borg
d09c71b688 Avoid build error in Go1.2 2014-06-18 11:02:59 +02:00
Jakob Borg
874d6760d4 Handle .stignore correctly on Windows (fixes #369) 2014-06-16 16:19:14 +02:00
Jakob Borg
26ebbee877 Hard override on changes from master repo 2014-06-16 10:47:02 +02:00
Jakob Borg
12eda0449a Build and memSize impl for Solaris 2014-06-16 10:19:32 +02:00
Jakob Borg
5a98f4e47c Mark repos with missing dir as invalid on startup (fixes #311) 2014-06-16 09:33:52 +02:00
Jakob Borg
964c903a68 Only keep track of version (not modified) for sent index 2014-06-16 07:40:17 +02:00
Jakob Borg
21b699826d Increase reconnect delay towards max 2014-06-15 20:32:26 +02:00
Jakob Borg
5fa8f8e50c Remove old index files on startup (fixes #366) 2014-06-15 20:31:26 +02:00
Jakob Borg
9ca87f5314 Don't attempt to use broadcast with IPv6 (ref #346) 2014-06-14 11:14:37 +02:00
Jakob Borg
537c6b3b69 Reduce ping time & timeout (ref #358) 2014-06-14 11:07:34 +02:00
Jakob Borg
48a3fac2da Show out of sync items, rename files->items (fixes #312, fixes #352) 2014-06-14 10:58:36 +02:00
Jakob Borg
fd73682806 Don't need to sync deletes for nonexistent files 2014-06-14 10:55:44 +02:00
Jakob Borg
34bd5b9dcf Better android detection 2014-06-13 20:45:57 +02:00
Jakob Borg
58c5e46206 Add build environment variable 2014-06-13 20:44:00 +02:00
Jakob Borg
4c61ab0f18 Request restart for GUI setting changes 2014-06-13 20:25:10 +02:00
Jakob Borg
f241b63e0e Logo with text 2014-06-13 01:57:03 +02:00
Jakob Borg
2ffdb5a82a Actually generate random certificate serials (fixes #361) 2014-06-13 01:49:30 +02:00
Jakob Borg
46e963443d Include system RAM size in usage report 2014-06-12 20:47:46 +02:00
Jakob Borg
66d4e9e5d7 Prevent possible reordering of Index/IndexUpdate on send (ref #344) 2014-06-12 18:07:06 +02:00
Jakob Borg
de382e33a3 Forget go1.2 2014-06-12 02:28:03 +02:00
Jakob Borg
3c6738da73 Limit damage of previous commit to ARM arch 2014-06-12 01:11:04 +02:00
Jakob Borg
18e5cb6793 Work around broken DNS on Android for usage reporting 2014-06-12 01:05:00 +02:00
Jakob Borg
9cd6b85c09 Remove dead code from previous commit 2014-06-11 22:29:49 +02:00
Jakob Borg
f40f3b3b7b Anonymous Usage Reporting 2014-06-11 20:06:53 +02:00
Jakob Borg
7454670b0a Drop and warn about non-normalized file names on Linux/Windows (fixes #329) 2014-06-11 17:51:31 +02:00
Jakob Borg
e63596681d Fix header in protocol spec (fixes #360) 2014-06-11 16:27:39 +02:00
Jakob Borg
3dbaa76dcb Fix embarrasing badge :) 2014-06-10 17:23:00 +02:00
Jakob Borg
8752003b50 Add embarassing badge 2014-06-10 17:05:15 +02:00
Jakob Borg
8716ed5aa4 Fix coveralls.io data pushing 2014-06-10 17:05:15 +02:00
Jakob Borg
38ac4e8f79 Serialize incoming indexes (fixes #344) 2014-06-10 17:05:15 +02:00
Arthur Axel 'fREW' Schmidt
70fc8a3064 push test coverage info to coveralls.io 2014-06-10 17:05:15 +02:00
Jakob Borg
7626c5d526 Merge pull request #357 from jpjp/patch-1
Change Name -> Node Name to match Add Repo dialog.
2014-06-10 16:09:22 +02:00
Jakob Borg
7e04c9d048 Information about HTTP certificate issues 2014-06-10 15:40:21 +02:00
jpjp
9eda8f2c7e Change Name -> Node Name to match Add Repo dialog. 2014-06-10 13:46:29 +02:00
Jakob Borg
456d9e870d Integration test, API key 2014-06-08 19:17:42 +02:00
Jakob Borg
a1533696a5 Travis badge 2014-06-08 07:40:57 +02:00
Jakob Borg
92499af323 Revert "Build for Solaris"
This reverts commit 5a2328d9a5.
2014-06-08 07:37:51 +02:00
Arthur Axel 'fREW' Schmidt
b2988cdd35 test against travis-ci 2014-06-08 07:37:42 +02:00
Arthur Axel 'fREW' Schmidt
82cfd37263 Allow prioritization of downloads based on name (fixes #174) 2014-06-08 07:16:25 +02:00
Jakob Borg
df381fd03f Let server side decide if restart is needed on config change 2014-06-07 04:00:46 +02:00
Jakob Borg
5a2328d9a5 Build for Solaris 2014-06-07 03:56:13 +02:00
Jakob Borg
b2f66cfb60 Reject index for existing repo from unshared node (fixes #342) 2014-06-06 21:48:29 +02:00
Jakob Borg
6d24e4f122 Test case for #342 2014-06-06 21:40:04 +02:00
Jakob Borg
2e2185165c Improve test suite, fix bug in Set.Global() 2014-06-05 15:32:11 +02:00
Jakob Borg
f0612e57c2 Integration tests with API key 2014-06-05 11:48:22 +02:00
Jakob Borg
e5d16ed08a Remove extra whitespace around node ID (fixes #335) 2014-06-05 11:29:05 +02:00
Jakob Borg
1cff9ccc63 API key change should take effect on restart only 2014-06-05 09:16:12 +02:00
Jakob Borg
20a018db2e Implement API keys 2014-06-04 22:00:55 +02:00
Jakob Borg
80c2b32b92 Implement CSRF protection for REST interface (fixes #287) 2014-06-04 21:20:07 +02:00
Jakob Borg
028e9bc17a Tweak Shared With wording 2014-06-04 15:05:23 +02:00
Jakob Borg
afc2d6fda4 Clarify repo mismatch message (fixes #331) 2014-06-04 14:17:48 +02:00
Jakob Borg
bec5c76631 Use unique name and O_EXCL for temporary indexes (fixes #332) 2014-06-04 13:43:59 +02:00
48 changed files with 1797 additions and 362 deletions

3
.gitignore vendored
View File

@@ -8,4 +8,5 @@ stcli.exe
*.sublime*
discosrv
stpidx
.jshintrc
.jshintrc
coverage.out

20
.travis.yml Normal file
View File

@@ -0,0 +1,20 @@
language: go
go:
- tip
install:
- export PATH=$PATH:$HOME/gopath/bin
- ./build.sh setup
- go get code.google.com/p/go.tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- ./build.sh test-cov
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/syncthing -repotoken="$COVERALS_TOKEN"
env:
global:
secure: "zEV2h2XtKHNLVdXJjM4LA/VjMfLVydm6goF+ARit+nOSGxGoH7f7jIdzJzhxgh7shKG93q61eLO1Tug+WBMYB2EpBuYnTB5AIMYhCDwNI8C4uBV6c3brHfcrie7MASNao8TID2QScASKNFFWvjv/i1Ccn5ztxdcQuhSsNjGZp8A="

View File

@@ -1,5 +1,6 @@
Aaron Bieber <qbit@deftly.net>
Andrew Dunham <andrew@du.nham.ca>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com>
Brandon Philips <brandon@ifup.org>
James Patterson <jamespatterson@operamail.com>
Jens Diemer <github.com@jensdiemer.de>

2
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/calmh/syncthing",
"GoVersion": "go1.2.2",
"GoVersion": "go1.3",
"Packages": [
"./cmd/syncthing",
"./cmd/assets",

View File

@@ -1,4 +1,4 @@
syncthing
syncthing [![Build Status](https://travis-ci.org/calmh/syncthing.svg?branch=master)](https://travis-ci.org/calmh/syncthing) [![Coverage Status](https://img.shields.io/coveralls/calmh/syncthing.svg)](https://coveralls.io/r/calmh/syncthing?branch=master)
=========
This is the `syncthing` project. The following are the project goals:

BIN
assets/st-logo-text.pxm Normal file
View File

Binary file not shown.

View File

File diff suppressed because one or more lines are too long

View File

@@ -52,7 +52,7 @@ func (b *Beacon) Recv() ([]byte, net.Addr) {
}
func (b *Beacon) reader() {
var bs = make([]byte, 65536)
bs := make([]byte, 65536)
for {
n, addr, err := b.conn.ReadFrom(bs)
if err != nil {
@@ -62,8 +62,11 @@ func (b *Beacon) reader() {
if debug {
l.Debugf("recv %d bytes from %s", n, addr)
}
c := make([]byte, n)
copy(c, bs)
select {
case b.outbox <- recv{bs[:n], addr}:
case b.outbox <- recv{c, addr}:
default:
if debug {
l.Debugln("dropping message")
@@ -83,7 +86,7 @@ func (b *Beacon) writer() {
var dsts []net.IP
for _, addr := range addrs {
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() {
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() && iaddr.IP.To4() != nil {
baddr := bcast(iaddr)
dsts = append(dsts, baddr.IP)
}

View File

@@ -1,50 +0,0 @@
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file.
// Package buffers manages a set of reusable byte buffers.
package buffers
const (
largeMin = 1024
)
var (
smallBuffers = make(chan []byte, 32)
largeBuffers = make(chan []byte, 32)
)
func Get(size int) []byte {
var ch = largeBuffers
if size < largeMin {
ch = smallBuffers
}
var buf []byte
select {
case buf = <-ch:
default:
}
if len(buf) < size {
return make([]byte, size)
}
return buf[:size]
}
func Put(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) == 0 {
return
}
var ch = largeBuffers
if len(buf) < largeMin {
ch = smallBuffers
}
select {
case ch <- buf:
default:
}
}

View File

@@ -9,7 +9,8 @@ date=$(git show -s --format=%ct)
user=$(whoami)
host=$(hostname)
host=${host%%.*}
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host"
bldenv=${ENVIRONMENT:-default}
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host -X main.BuildEnv $bldenv"
check() {
if ! command -v godep >/dev/null ; then
@@ -31,6 +32,21 @@ assets() {
godep go run cmd/assets/assets.go gui > auto/gui.files.go
}
test-cov() {
echo "mode: set" > coverage.out
fail=0
for dir in $(go list ./...) ; do
godep go test -coverprofile=profile.out $dir || fail=1
if [ -f profile.out ] ; then
grep -v "mode: set" profile.out >> coverage.out
rm profile.out
fi
done
exit $fail
}
test() {
check
godep go test -cpu=1,2,4 ./...
@@ -61,7 +77,8 @@ zipDist() {
rm -rf "$name"
mkdir -p "$name"
for f in "${distFiles[@]}" ; do
sed 's/$/
sed 's/$/
/' < "$f" > "$name/$f.txt"
done
cp syncthing.exe "$name"
sign "$name/syncthing.exe"
@@ -100,6 +117,10 @@ case "$1" in
test
;;
test-cov)
test-cov
;;
tar)
rm -f *.tar.gz *.zip
test || exit 1
@@ -121,7 +142,7 @@ case "$1" in
godep go build ./cmd/stpidx
godep go build ./cmd/stcli
for os in darwin-amd64 linux-386 linux-amd64 freebsd-amd64 windows-amd64 windows-386 solaris-amd64 ; do
export GOOS=${os%-*}
export GOARCH=${os#*-}

View File

@@ -16,6 +16,7 @@ import (
"net"
"net/http"
"path/filepath"
"reflect"
"runtime"
"sync"
"time"
@@ -40,6 +41,7 @@ var (
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
static func(http.ResponseWriter, *http.Request, *log.Logger)
apiKey string
)
const (
@@ -56,6 +58,8 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
if cfg.UseTLS {
cert, err := loadCert(confDir, "https-")
if err != nil {
l.Infoln("Loading HTTPS certificate:", err)
l.Infoln("Creating new HTTPS certificate")
newCertificate(confDir, "https-")
cert, err = loadCert(confDir, "https-")
}
@@ -87,6 +91,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
router.Get("/", getRoot)
router.Get("/rest/version", restGetVersion)
router.Get("/rest/model", restGetModel)
router.Get("/rest/model/version", restGetModelVersion)
router.Get("/rest/need", restGetNeed)
router.Get("/rest/connections", restGetConnections)
router.Get("/rest/config", restGetConfig)
@@ -94,6 +99,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
router.Get("/rest/system", restGetSystem)
router.Get("/rest/errors", restGetErrors)
router.Get("/rest/discovery", restGetDiscovery)
router.Get("/rest/report", restGetReport)
router.Get("/qr/:text", getQR)
router.Post("/rest/config", restPostConfig)
@@ -103,8 +109,10 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
router.Post("/rest/error", restPostError)
router.Post("/rest/error/clear", restClearErrors)
router.Post("/rest/discovery/hint", restPostDiscoveryHint)
router.Post("/rest/model/override", restPostOverride)
mr := martini.New()
mr.Use(csrfMiddleware)
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
mr.Use(basic(cfg.User, cfg.Password))
}
@@ -114,6 +122,9 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
mr.Action(router.Handle)
mr.Map(m)
apiKey = cfg.APIKey
loadCsrfTokens()
go http.Serve(listener, mr)
return nil
@@ -134,6 +145,17 @@ func restGetVersion() string {
return Version
}
func restGetModelVersion(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
var res = make(map[string]interface{})
res["version"] = m.Version(repo)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
@@ -158,24 +180,31 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
res["state"] = m.State(repo)
res["version"] = m.Version(repo)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restPostOverride(m *model.Model, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
m.Override(repo)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
files := m.NeedFilesRepo(repo)
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(files)
}
func restGetConnections(m *model.Model, w http.ResponseWriter) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
@@ -184,10 +213,11 @@ func restGetConfig(w http.ResponseWriter) {
if encCfg.GUI.Password != "" {
encCfg.GUI.Password = unchangedPassword
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(encCfg)
}
func restPostConfig(req *http.Request) {
func restPostConfig(req *http.Request, m *model.Model) {
var newCfg config.Configuration
err := json.NewDecoder(req.Body).Decode(&newCfg)
if err != nil {
@@ -205,28 +235,80 @@ func restPostConfig(req *http.Request) {
newCfg.GUI.Password = string(hash)
}
}
// Figure out if any changes require a restart
if len(cfg.Repositories) != len(newCfg.Repositories) {
configInSync = false
} else {
om := cfg.RepoMap()
nm := newCfg.RepoMap()
for id := range om {
if !reflect.DeepEqual(om[id], nm[id]) {
configInSync = false
break
}
}
}
if len(cfg.Nodes) != len(newCfg.Nodes) {
configInSync = false
} else {
om := cfg.NodeMap()
nm := newCfg.NodeMap()
for k := range om {
if _, ok := nm[k]; !ok {
configInSync = false
break
}
}
}
if newCfg.Options.URAccepted > cfg.Options.URAccepted {
// UR was enabled
newCfg.Options.URAccepted = usageReportVersion
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
go usageReportingLoop(m)
} else if newCfg.Options.URAccepted < cfg.Options.URAccepted {
// UR was disabled
newCfg.Options.URAccepted = -1
stopUsageReporting()
}
if !reflect.DeepEqual(cfg.Options, newCfg.Options) || !reflect.DeepEqual(cfg.GUI, newCfg.GUI) {
configInSync = false
}
// Activate and save
cfg = newCfg
saveConfig()
configInSync = false
}
}
func restGetConfigInSync(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
flushResponse(`{"ok": "restarting"}`, w)
go restart()
}
func restPostReset(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
flushResponse(`{"ok": "resetting repos"}`, w)
resetRepositories()
go restart()
}
func restPostShutdown(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
flushResponse(`{"ok": "shutting down"}`, w)
go shutdown()
}
@@ -261,11 +343,12 @@ func restGetSystem(w http.ResponseWriter) {
cpuUsageLock.RUnlock()
res["cpuPercent"] = cpusum / 10
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(guiErrors)
guiErrorsMut.Unlock()
@@ -302,9 +385,15 @@ func restPostDiscoveryHint(r *http.Request) {
}
func restGetDiscovery(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(discoverer.All())
}
func restGetReport(w http.ResponseWriter, m *model.Model) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(reportData(m))
}
func getQR(w http.ResponseWriter, params martini.Params) {
code, err := qr.Encode(params["text"], qr.M)
if err != nil {
@@ -318,6 +407,10 @@ func getQR(w http.ResponseWriter, params martini.Params) {
func basic(username string, passhash string) http.HandlerFunc {
return func(res http.ResponseWriter, req *http.Request) {
if validAPIKey(req.Header.Get("X-API-Key")) {
return
}
error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
res.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
@@ -355,6 +448,10 @@ func basic(username string, passhash string) http.HandlerFunc {
}
}
func validAPIKey(k string) bool {
return len(apiKey) > 0 && k == apiKey
}
func embeddedStatic() func(http.ResponseWriter, *http.Request, *log.Logger) {
var modt = time.Now().UTC().Format(http.TimeFormat)

115
cmd/syncthing/gui_csrf.go Normal file
View File

@@ -0,0 +1,115 @@
package main
import (
"bufio"
"crypto/rand"
"encoding/base64"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/calmh/syncthing/osutil"
)
var csrfTokens []string
var csrfMut sync.Mutex
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
// the request with 403. For / and /index.html, set a new CSRF cookie if none
// is currently set.
func csrfMiddleware(w http.ResponseWriter, r *http.Request) {
if validAPIKey(r.Header.Get("X-API-Key")) {
return
}
if strings.HasPrefix(r.URL.Path, "/rest/") {
token := r.Header.Get("X-CSRF-Token")
if !validCsrfToken(token) {
http.Error(w, "CSRF Error", 403)
}
} else if r.URL.Path == "/" || r.URL.Path == "/index.html" {
cookie, err := r.Cookie("CSRF-Token")
if err != nil || !validCsrfToken(cookie.Value) {
cookie = &http.Cookie{
Name: "CSRF-Token",
Value: newCsrfToken(),
}
http.SetCookie(w, cookie)
}
}
}
func validCsrfToken(token string) bool {
csrfMut.Lock()
defer csrfMut.Unlock()
for _, t := range csrfTokens {
if t == token {
return true
}
}
return false
}
func newCsrfToken() string {
bs := make([]byte, 30)
_, err := rand.Reader.Read(bs)
if err != nil {
l.Fatalln(err)
}
token := base64.StdEncoding.EncodeToString(bs)
csrfMut.Lock()
csrfTokens = append(csrfTokens, token)
if len(csrfTokens) > 10 {
csrfTokens = csrfTokens[len(csrfTokens)-10:]
}
defer csrfMut.Unlock()
saveCsrfTokens()
return token
}
func saveCsrfTokens() {
name := filepath.Join(confDir, "csrftokens.txt")
tmp := fmt.Sprintf("%s.tmp.%d", name, time.Now().UnixNano())
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
if err != nil {
return
}
defer os.Remove(tmp)
for _, t := range csrfTokens {
_, err := fmt.Fprintln(f, t)
if err != nil {
return
}
}
err = f.Close()
if err != nil {
return
}
osutil.Rename(tmp, name)
}
func loadCsrfTokens() {
name := filepath.Join(confDir, "csrftokens.txt")
f, err := os.Open(name)
if err != nil {
return
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
csrfTokens = append(csrfTokens, s.Text())
}
}

View File

@@ -5,6 +5,7 @@
package main
import (
"crypto/sha1"
"crypto/tls"
"flag"
"fmt"
@@ -36,6 +37,7 @@ import (
var (
Version = "unknown-dev"
BuildEnv = "default"
BuildStamp = "0"
BuildDate time.Time
BuildHost = "unknown"
@@ -50,7 +52,7 @@ func init() {
BuildDate = time.Unix(int64(stamp), 0)
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s %s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildEnv, BuildUser, BuildHost, date)
if os.Getenv("STTRACE") != "" {
logFlags = log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile
@@ -107,6 +109,10 @@ The following enviroment variables are interpreted by syncthing:
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.`
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func main() {
var reset bool
var showVersion bool
@@ -273,11 +279,28 @@ func main() {
m := model.NewModel(confDir, &cfg, "syncthing", Version)
for _, repo := range cfg.Repositories {
nextRepo:
for i, repo := range cfg.Repositories {
if repo.Invalid != "" {
continue
}
repo.Directory = expandTilde(repo.Directory)
// Safety check. If the cached index contains files but the repository
// doesn't exist, we have a problem. We would assume that all files
// have been deleted which might not be the case, so abort instead.
id := fmt.Sprintf("%x", sha1.Sum([]byte(repo.Directory)))
idxFile := filepath.Join(confDir, id+".idx.gz")
if _, err := os.Stat(idxFile); err == nil {
if fi, err := os.Stat(repo.Directory); err != nil || !fi.IsDir() {
cfg.Repositories[i].Invalid = "repo directory missing"
continue nextRepo
}
}
ensureDir(repo.Directory, -1)
m.AddRepo(repo)
}
@@ -321,41 +344,40 @@ func main() {
l.Infoln("Populating repository index")
m.LoadIndexes(confDir)
for _, repo := range cfg.Repositories {
if repo.Invalid != "" {
continue
}
dir := expandTilde(repo.Directory)
// Safety check. If the cached index contains files but the repository
// doesn't exist, we have a problem. We would assume that all files
// have been deleted which might not be the case, so abort instead.
if files, _, _ := m.LocalSize(repo.ID); files > 0 {
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
l.Warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory)
l.Fatalf("Ensure that directory is present or remove repository from configuration.")
}
}
// Ensure that repository directories exist for newly configured repositories.
ensureDir(dir, -1)
}
m.CleanRepos()
m.ScanRepos()
m.SaveIndexes(confDir)
// Remove all .idx* files that don't belong to an active repo.
validIndexes := make(map[string]bool)
for _, repo := range cfg.Repositories {
dir := expandTilde(repo.Directory)
id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
validIndexes[id] = true
}
allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*"))
if err == nil {
for _, idx := range allIndexes {
bn := filepath.Base(idx)
fs := strings.Split(bn, ".")
if len(fs) > 1 {
if _, ok := validIndexes[fs[0]]; !ok {
l.Infoln("Removing old index", bn)
os.Remove(idx)
}
}
}
}
// UPnP
var externalPort = 0
if cfg.Options.UPnPEnabled {
// We seed the random number generator with the node ID to get a
// repeatable sequence of random external ports.
rand.Seed(certSeed(cert.Certificate[0]))
externalPort = setupUPnP()
externalPort = setupUPnP(rand.NewSource(certSeed(cert.Certificate[0])))
}
// Routine to connect out to configured nodes
@@ -393,6 +415,21 @@ func main() {
}
}
if cfg.Options.URAccepted > 0 && cfg.Options.URAccepted < usageReportVersion {
l.Infoln("Anonymous usage report has changed; revoking acceptance")
cfg.Options.URAccepted = 0
}
if cfg.Options.URAccepted >= usageReportVersion {
go usageReportingLoop(m)
go func() {
time.Sleep(10 * time.Minute)
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
}()
}
<-stop
l.Okln("Exiting")
}
@@ -411,7 +448,7 @@ func waitForParentExit() {
l.Okln("Continuing")
}
func setupUPnP() int {
func setupUPnP(r rand.Source) int {
var externalPort = 0
if len(cfg.Options.ListenAddress) == 1 {
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
@@ -423,7 +460,7 @@ func setupUPnP() int {
igd, err := upnp.Discover()
if err == nil {
for i := 0; i < 10; i++ {
r := 1024 + rand.Intn(65535-1024)
r := 1024 + int(r.Int63()%(65535-1024))
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
if err == nil {
externalPort = r
@@ -569,6 +606,7 @@ func listenConnect(myID string, m *model.Model, tlsCfg *tls.Config) {
// Connect
go func() {
var delay time.Duration = 1 * time.Second
for {
nextNode:
for _, nodeCfg := range cfg.Nodes {
@@ -619,7 +657,11 @@ func listenConnect(myID string, m *model.Model, tlsCfg *tls.Config) {
}
}
time.Sleep(time.Duration(cfg.Options.ReconnectIntervalS) * time.Second)
time.Sleep(delay)
delay *= 2
if maxD := time.Duration(cfg.Options.ReconnectIntervalS) * time.Second; delay > maxD {
delay = maxD
}
}
}()

View File

@@ -0,0 +1,25 @@
package main
import (
"errors"
"os/exec"
"strconv"
"strings"
)
func memorySize() (uint64, error) {
cmd := exec.Command("sysctl", "hw.memsize")
out, err := cmd.Output()
if err != nil {
return 0, err
}
fs := strings.Fields(string(out))
if len(fs) != 2 {
return 0, errors.New("sysctl parse error")
}
bytes, err := strconv.ParseUint(fs[1], 10, 64)
if err != nil {
return 0, err
}
return bytes, nil
}

View File

@@ -0,0 +1,33 @@
package main
import (
"bufio"
"errors"
"os"
"strconv"
"strings"
)
func memorySize() (uint64, error) {
f, err := os.Open("/proc/meminfo")
if err != nil {
return 0, err
}
s := bufio.NewScanner(f)
if !s.Scan() {
return 0, errors.New("/proc/meminfo parse error 1")
}
l := s.Text()
fs := strings.Fields(l)
if len(fs) != 3 || fs[2] != "kB" {
return 0, errors.New("/proc/meminfo parse error 2")
}
kb, err := strconv.ParseUint(fs[1], 10, 64)
if err != nil {
return 0, err
}
return kb * 1024, nil
}

View File

@@ -0,0 +1,22 @@
// +build solaris
package main
import (
"os/exec"
"strconv"
)
func memorySize() (uint64, error) {
cmd := exec.Command("prtconf", "-m")
out, err := cmd.CombinedOutput()
if err != nil {
return 0, err
}
mb, err := strconv.ParseUint(string(out), 10, 64)
if err != nil {
return 0, err
}
return mb * 1024 * 1024, nil
}

View File

@@ -0,0 +1,9 @@
// +build freebsd
package main
import "errors"
func memorySize() (uint64, error) {
return 0, errors.New("not implemented")
}

View File

@@ -0,0 +1,25 @@
package main
import (
"encoding/binary"
"syscall"
"unsafe"
)
var (
kernel32, _ = syscall.LoadLibrary("kernel32.dll")
globalMemoryStatusEx, _ = syscall.GetProcAddress(kernel32, "GlobalMemoryStatusEx")
)
func memorySize() (uint64, error) {
var memoryStatusEx [64]byte
binary.LittleEndian.PutUint32(memoryStatusEx[:], 64)
p := uintptr(unsafe.Pointer(&memoryStatusEx[0]))
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, p, 0, 0)
if ret == 0 {
return 0, callErr
}
return binary.LittleEndian.Uint64(memoryStatusEx[8:]), nil
}

View File

@@ -2,6 +2,8 @@
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file.
// +build !solaris,!windows
package main
import (

View File

@@ -0,0 +1,9 @@
// +build windows solaris
package main
import "errors"
func upgrade() error {
return errors.New("Upgrade currently unsupported on Windows")
}

View File

@@ -0,0 +1,133 @@
package main
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"net"
"net/http"
"runtime"
"strings"
"time"
"github.com/calmh/syncthing/model"
)
// Current version number of the usage report, for acceptance purposes. If
// fields are added or changed this integer must be incremented so that users
// are prompted for acceptance of the new report.
const usageReportVersion = 1
var stopUsageReportingCh = make(chan struct{})
func reportData(m *model.Model) map[string]interface{} {
res := make(map[string]interface{})
res["uniqueID"] = strings.ToLower(certID([]byte(myID)))[:6]
res["version"] = Version
res["longVersion"] = LongVersion
res["platform"] = runtime.GOOS + "-" + runtime.GOARCH
res["numRepos"] = len(cfg.Repositories)
res["numNodes"] = len(cfg.Nodes)
var totFiles, maxFiles int
var totBytes, maxBytes int64
for _, repo := range cfg.Repositories {
files, _, bytes := m.GlobalSize(repo.ID)
totFiles += files
totBytes += bytes
if files > maxFiles {
maxFiles = files
}
if bytes > maxBytes {
maxBytes = bytes
}
}
res["totFiles"] = totFiles
res["repoMaxFiles"] = maxFiles
res["totMiB"] = totBytes / 1024 / 1024
res["repoMaxMiB"] = maxBytes / 1024 / 1024
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
res["memoryUsageMiB"] = mem.Sys / 1024 / 1024
var perf float64
for i := 0; i < 5; i++ {
p := cpuBench()
if p > perf {
perf = p
}
}
res["sha256Perf"] = perf
bytes, err := memorySize()
if err == nil {
res["memorySize"] = bytes / 1024 / 1024
}
return res
}
func sendUsageReport(m *model.Model) error {
d := reportData(m)
var b bytes.Buffer
json.NewEncoder(&b).Encode(d)
var client = http.DefaultClient
if BuildEnv == "android" {
// This works around the lack of DNS resolution on Android... :(
tr := &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return net.Dial(network, "194.126.249.13:443")
},
}
client = &http.Client{Transport: tr}
}
_, err := client.Post("https://data.syncthing.net/newdata", "application/json", &b)
return err
}
func usageReportingLoop(m *model.Model) {
l.Infoln("Starting usage reporting")
t := time.NewTicker(86400 * time.Second)
loop:
for {
select {
case <-stopUsageReportingCh:
break loop
case <-t.C:
err := sendUsageReport(m)
if err != nil {
l.Infoln("Usage report:", err)
}
}
}
l.Infoln("Stopping usage reporting")
}
func stopUsageReporting() {
select {
case stopUsageReportingCh <- struct{}{}:
default:
}
}
// Returns CPU performance as a measure of single threaded SHA-256 MiB/s
func cpuBench() float64 {
chunkSize := 100 * 1 << 10
h := sha256.New()
bs := make([]byte, chunkSize)
rand.Reader.Read(bs)
t0 := time.Now()
b := 0
for time.Since(t0) < 125*time.Millisecond {
h.Write(bs)
b += chunkSize
}
h.Sum(nil)
d := time.Since(t0)
return float64(int(float64(b)/d.Seconds()/(1<<20)*100)) / 100
}

View File

@@ -11,12 +11,14 @@ import (
"io"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/syncthing/logger"
"github.com/calmh/syncthing/scanner"
)
var l = logger.DefaultLogger
@@ -30,14 +32,42 @@ type Configuration struct {
XMLName xml.Name `xml:"configuration" json:"-"`
}
// SyncOrderPattern allows a user to prioritize file downloading based on a
// regular expression. If a file matches the Pattern the Priority will be
// assigned to the file. If a file matches more than one Pattern the
// Priorities are summed. This allows a user to, for example, prioritize files
// in a directory, as well as prioritize based on file type. The higher the
// priority the "sooner" a file will be downloaded. Files can be deprioritized
// by giving them a negative priority. While Priority is represented as an
// integer, the expected range is something like -1000 to 1000.
type SyncOrderPattern struct {
Pattern string `xml:"pattern,attr"`
Priority int `xml:"priority,attr"`
compiledPattern *regexp.Regexp
}
func (s *SyncOrderPattern) CompiledPattern() *regexp.Regexp {
if s.compiledPattern == nil {
re, err := regexp.Compile(s.Pattern)
if err != nil {
l.Warnln("Could not compile regexp (" + s.Pattern + "): " + err.Error())
s.compiledPattern = regexp.MustCompile("^\\0$")
} else {
s.compiledPattern = re
}
}
return s.compiledPattern
}
type RepositoryConfiguration struct {
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []NodeConfiguration `xml:"node"`
ReadOnly bool `xml:"ro,attr"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []NodeConfiguration `xml:"node"`
ReadOnly bool `xml:"ro,attr"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
SyncOrderPatterns []SyncOrderPattern `xml:"syncorder>pattern"`
nodeIDs []string
}
@@ -92,6 +122,21 @@ func (r *RepositoryConfiguration) NodeIDs() []string {
return r.nodeIDs
}
func (r RepositoryConfiguration) FileRanker() func(scanner.File) int {
if len(r.SyncOrderPatterns) <= 0 {
return nil
}
return func(f scanner.File) int {
ret := 0
for _, v := range r.SyncOrderPatterns {
if v.CompiledPattern().MatchString(f.Name) {
ret += v.Priority
}
}
return ret
}
}
type NodeConfiguration struct {
NodeID string `xml:"id,attr"`
Name string `xml:"name,attr,omitempty"`
@@ -111,7 +156,10 @@ type OptionsConfiguration struct {
MaxChangeKbps int `xml:"maxChangeKbps" default:"10000"`
StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
Deprecated_URDeclined bool `xml:"urDeclined,omitempty" json:"-"`
Deprecated_ReadOnly bool `xml:"readOnly,omitempty" json:"-"`
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty" json:"-"`
Deprecated_GUIAddress string `xml:"guiAddress,omitempty" json:"-"`
@@ -123,6 +171,23 @@ type GUIConfiguration struct {
User string `xml:"user,omitempty"`
Password string `xml:"password,omitempty"`
UseTLS bool `xml:"tls,attr"`
APIKey string `xml:"apikey,omitempty"`
}
func (cfg *Configuration) NodeMap() map[string]NodeConfiguration {
m := make(map[string]NodeConfiguration, len(cfg.Nodes))
for _, n := range cfg.Nodes {
m[n.NodeID] = n
}
return m
}
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
for _, r := range cfg.Repositories {
m[r.ID] = r
}
return m
}
func setDefaults(data interface{}) error {
@@ -279,6 +344,12 @@ func Load(rd io.Reader, myID string) (Configuration, error) {
}
}
if cfg.Options.Deprecated_URDeclined {
cfg.Options.URAccepted = -1
}
cfg.Options.Deprecated_URDeclined = false
cfg.Options.Deprecated_UREnabled = false
// Upgrade to v2 configuration if appropriate
if cfg.Version == 1 {
convertV1V2(&cfg)

View File

@@ -10,6 +10,9 @@ import (
"os"
"reflect"
"testing"
"github.com/calmh/syncthing/files"
"github.com/calmh/syncthing/scanner"
)
func TestDefaultValues(t *testing.T) {
@@ -281,3 +284,96 @@ func TestStripNodeIs(t *testing.T) {
}
}
}
func TestSyncOrders(t *testing.T) {
data := []byte(`
<configuration version="2">
<node id="AAAA-BBBB-CCCC">
<address>dynamic</address>
</node>
<repository directory="~/Sync">
<syncorder>
<pattern pattern="\.jpg$" priority="1" />
</syncorder>
<node id="AAAA-BBBB-CCCC" name=""></node>
</repository>
</configuration>
`)
expected := []SyncOrderPattern{
{
Pattern: "\\.jpg$",
Priority: 1,
},
}
cfg, err := Load(bytes.NewReader(data), "n4")
if err != nil {
t.Error(err)
}
for i := range expected {
if !reflect.DeepEqual(cfg.Repositories[0].SyncOrderPatterns[i], expected[i]) {
t.Errorf("Nodes[%d] differ;\n E: %#v\n A: %#v", i, expected[i], cfg.Repositories[0].SyncOrderPatterns[i])
}
}
}
func TestFileSorter(t *testing.T) {
rcfg := RepositoryConfiguration{
SyncOrderPatterns: []SyncOrderPattern{
{"\\.jpg$", 10, nil},
{"\\.mov$", 5, nil},
{"^camera-uploads", 100, nil},
},
}
f := []scanner.File{
{Name: "bar.mov"},
{Name: "baz.txt"},
{Name: "foo.jpg"},
{Name: "frew/foo.jpg"},
{Name: "frew/lol.go"},
{Name: "frew/rofl.copter"},
{Name: "frew/bar.mov"},
{Name: "camera-uploads/foo.jpg"},
{Name: "camera-uploads/hurr.pl"},
{Name: "camera-uploads/herp.mov"},
{Name: "camera-uploads/wee.txt"},
}
files.SortBy(rcfg.FileRanker()).Sort(f)
expected := []scanner.File{
{Name: "camera-uploads/foo.jpg"},
{Name: "camera-uploads/herp.mov"},
{Name: "camera-uploads/hurr.pl"},
{Name: "camera-uploads/wee.txt"},
{Name: "foo.jpg"},
{Name: "frew/foo.jpg"},
{Name: "bar.mov"},
{Name: "frew/bar.mov"},
{Name: "frew/lol.go"},
{Name: "baz.txt"},
{Name: "frew/rofl.copter"},
}
if !reflect.DeepEqual(f, expected) {
t.Errorf(
"\n\nexpected:\n" +
formatFiles(expected) + "\n" +
"got:\n" +
formatFiles(f) + "\n\n",
)
}
}
func formatFiles(f []scanner.File) string {
ret := ""
for _, v := range f {
ret += " " + v.Name + "\n"
}
return ret
}

View File

@@ -14,7 +14,6 @@ import (
"time"
"github.com/calmh/syncthing/beacon"
"github.com/calmh/syncthing/buffers"
)
type Discoverer struct {
@@ -329,11 +328,8 @@ func (d *Discoverer) externalLookup(node string) []string {
}
return nil
}
buffers.Put(buf)
buf = buffers.Get(2048)
defer buffers.Put(buf)
buf = make([]byte, 2048)
n, err := conn.Read(buf)
if err != nil {
if err, ok := err.(net.Error); ok && err.Timeout() {

View File

@@ -120,7 +120,12 @@ func (m *Set) Need(id uint) []scanner.File {
continue
}
if gk.newerThan(rkID[gk.Name]) {
if rk, ok := rkID[gk.Name]; gk.newerThan(rk) {
if protocol.IsDeleted(gf.File.Flags) && (!ok || protocol.IsDeleted(m.files[rk].File.Flags)) {
// We don't need to delete files we don't have or that are already deleted
continue
}
fs = append(fs, gf.File)
}
}
@@ -297,6 +302,9 @@ func (m *Set) replace(cid uint, fs []scanner.File) {
if na != 0 {
// Someone had the file
f := m.files[nk]
f.Global = true
m.files[nk] = f
m.globalKey[n] = nk
m.globalAvailability[n] = na
} else {

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file.
package files
package files_test
import (
"fmt"
@@ -11,6 +11,7 @@ import (
"testing"
"github.com/calmh/syncthing/cid"
"github.com/calmh/syncthing/files"
"github.com/calmh/syncthing/lamport"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
@@ -31,7 +32,7 @@ func (l fileList) Swap(a, b int) {
}
func TestGlobalSet(t *testing.T) {
m := NewSet()
m := files.NewSet()
local := []scanner.File{
scanner.File{Name: "a", Version: 1000},
@@ -40,7 +41,15 @@ func TestGlobalSet(t *testing.T) {
scanner.File{Name: "d", Version: 1000},
}
remote := []scanner.File{
remote0 := []scanner.File{
scanner.File{Name: "a", Version: 1000},
scanner.File{Name: "c", Version: 1002},
}
remote1 := []scanner.File{
scanner.File{Name: "b", Version: 1001},
scanner.File{Name: "e", Version: 1000},
}
remoteTot := []scanner.File{
scanner.File{Name: "a", Version: 1000},
scanner.File{Name: "b", Version: 1001},
scanner.File{Name: "c", Version: 1002},
@@ -55,25 +64,86 @@ func TestGlobalSet(t *testing.T) {
scanner.File{Name: "e", Version: 1000},
}
expectedLocalNeed := []scanner.File{
scanner.File{Name: "b", Version: 1001},
scanner.File{Name: "c", Version: 1002},
scanner.File{Name: "e", Version: 1000},
}
expectedRemoteNeed := []scanner.File{
scanner.File{Name: "d", Version: 1000},
}
m.ReplaceWithDelete(cid.LocalID, local)
m.Replace(1, remote)
m.Replace(1, remote0)
m.Update(1, remote1)
g := m.Global()
sort.Sort(fileList(g))
sort.Sort(fileList(expectedGlobal))
if !reflect.DeepEqual(g, expectedGlobal) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
}
if lb := len(m.files); lb != 7 {
t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files)
h := m.Have(cid.LocalID)
sort.Sort(fileList(h))
if !reflect.DeepEqual(h, local) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, local)
}
h = m.Have(1)
sort.Sort(fileList(h))
if !reflect.DeepEqual(h, remoteTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
}
n := m.Need(cid.LocalID)
sort.Sort(fileList(n))
if !reflect.DeepEqual(n, expectedLocalNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
}
n = m.Need(1)
sort.Sort(fileList(n))
if !reflect.DeepEqual(n, expectedRemoteNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
}
f := m.Get(cid.LocalID, "b")
if !reflect.DeepEqual(f, local[1]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, local[1])
}
f = m.Get(1, "b")
if !reflect.DeepEqual(f, remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
f = m.GetGlobal("b")
if !reflect.DeepEqual(f, remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
a := int(m.Availability("a"))
if av := 1<<0 + 1<<1; a != av {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = int(m.Availability("b"))
if av := 1 << 1; a != av {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = int(m.Availability("d"))
if av := 1 << 0; a != av {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
}
func TestLocalDeleted(t *testing.T) {
m := NewSet()
m := files.NewSet()
lamport.Default = lamport.Clock{}
local1 := []scanner.File{
@@ -151,7 +221,7 @@ func Benchmark10kReplace(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := NewSet()
m := files.NewSet()
m.ReplaceWithDelete(cid.LocalID, local)
}
}
@@ -162,7 +232,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m := NewSet()
m := files.NewSet()
m.Replace(1, remote)
var local []scanner.File
@@ -189,7 +259,7 @@ func Benchmark10kUpdateSme(b *testing.B) {
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m := NewSet()
m := files.NewSet()
m.Replace(1, remote)
var local []scanner.File
@@ -211,7 +281,7 @@ func Benchmark10kNeed2k(b *testing.B) {
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m := NewSet()
m := files.NewSet()
m.Replace(cid.LocalID+1, remote)
var local []scanner.File
@@ -239,7 +309,7 @@ func Benchmark10kHave(b *testing.B) {
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m := NewSet()
m := files.NewSet()
m.Replace(cid.LocalID+1, remote)
var local []scanner.File
@@ -267,7 +337,7 @@ func Benchmark10kGlobal(b *testing.B) {
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
}
m := NewSet()
m := files.NewSet()
m.Replace(cid.LocalID+1, remote)
var local []scanner.File
@@ -290,7 +360,7 @@ func Benchmark10kGlobal(b *testing.B) {
}
func TestGlobalReset(t *testing.T) {
m := NewSet()
m := files.NewSet()
local := []scanner.File{
scanner.File{Name: "a", Version: 1000},
@@ -306,28 +376,27 @@ func TestGlobalReset(t *testing.T) {
scanner.File{Name: "e", Version: 1000},
}
expectedGlobalKey := map[string]key{
"a": keyFor(local[0]),
"b": keyFor(local[1]),
"c": keyFor(local[2]),
"d": keyFor(local[3]),
m.ReplaceWithDelete(cid.LocalID, local)
g := m.Global()
sort.Sort(fileList(g))
if !reflect.DeepEqual(g, local) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.ReplaceWithDelete(cid.LocalID, local)
m.Replace(1, remote)
m.Replace(1, nil)
if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) {
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey)
}
g = m.Global()
sort.Sort(fileList(g))
if lb := len(m.files); lb != 4 {
t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files)
if !reflect.DeepEqual(g, local) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
}
func TestNeed(t *testing.T) {
m := NewSet()
m := files.NewSet()
local := []scanner.File{
scanner.File{Name: "a", Version: 1000},
@@ -363,7 +432,7 @@ func TestNeed(t *testing.T) {
}
func TestChanges(t *testing.T) {
m := NewSet()
m := files.NewSet()
local1 := []scanner.File{
scanner.File{Name: "a", Version: 1000},

34
files/sort.go Normal file
View File

@@ -0,0 +1,34 @@
package files
import (
"sort"
"github.com/calmh/syncthing/scanner"
)
type SortBy func(p scanner.File) int
func (by SortBy) Sort(files []scanner.File) {
ps := &fileSorter{
files: files,
by: by,
}
sort.Sort(ps)
}
type fileSorter struct {
files []scanner.File
by func(p1 scanner.File) int
}
func (s *fileSorter) Len() int {
return len(s.files)
}
func (s *fileSorter) Swap(i, j int) {
s.files[i], s.files[j] = s.files[j], s.files[i]
}
func (s *fileSorter) Less(i, j int) bool {
return s.by(s.files[i]) > s.by(s.files[j])
}

BIN
files/testdata/index.db vendored Normal file
View File

Binary file not shown.

View File

@@ -10,6 +10,11 @@
var syncthing = angular.module('syncthing', []);
var urlbase = 'rest';
syncthing.config(function ($httpProvider) {
$httpProvider.defaults.xsrfHeaderName = 'X-CSRF-Token';
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token';
});
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
var prevDate = 0;
var getOK = true;
@@ -25,21 +30,37 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.seenError = '';
$scope.model = {};
$scope.repos = {};
$scope.reportData = {};
$scope.reportPreview = false;
$scope.needActions = {
'rm': 'Del',
'rmdir': 'Del (dir)',
'sync': 'Sync',
'touch': 'Update',
}
$scope.needIcons = {
'rm': 'remove',
'rmdir': 'remove',
'sync': 'download',
'touch': 'asterisk',
}
// Strings before bools look better
$scope.settings = [
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text', restart: true},
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KiB/s)', type: 'number', restart: true},
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number', restart: true},
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number', restart: true},
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number', restart: true},
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KiB/s)', type: 'number', restart: true},
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text'},
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KiB/s)', type: 'number'},
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number'},
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number'},
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number'},
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KiB/s)', type: 'number'},
{id: 'GlobalAnnEnabled', descr: 'Global Discovery', type: 'bool', restart: true},
{id: 'LocalAnnEnabled', descr: 'Local Discovery', type: 'bool', restart: true},
{id: 'LocalAnnPort', descr: 'Local Discovery Port', type: 'number', restart: true},
{id: 'LocalAnnPort', descr: 'Local Discovery Port', type: 'number'},
{id: 'LocalAnnEnabled', descr: 'Local Discovery', type: 'bool'},
{id: 'GlobalAnnEnabled', descr: 'Global Discovery', type: 'bool'},
{id: 'StartBrowser', descr: 'Start Browser', type: 'bool'},
{id: 'UPnPEnabled', descr: 'Enable UPnP', type: 'bool'},
{id: 'UREnabled', descr: 'Anonymous Usage Reporting', type: 'bool'},
];
$scope.guiSettings = [
@@ -47,6 +68,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
{id: 'User', descr: 'GUI Authentication User', type: 'text', restart: true},
{id: 'Password', descr: 'GUI Authentication Password', type: 'password', restart: true},
{id: 'UseTLS', descr: 'Use HTTPS for GUI', type: 'bool', restart: true},
{id: 'APIKey', descr: 'API Key', type: 'apikey'},
];
function getSucceeded() {
@@ -81,9 +103,20 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
getFailed();
});
Object.keys($scope.repos).forEach(function (id) {
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
$scope.model[id] = data;
});
if (typeof $scope.model[id] === 'undefined') {
// Never fetched before
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
$scope.model[id] = data;
});
} else {
$http.get(urlbase + '/model/version?repo=' + encodeURIComponent(id)).success(function (data) {
if (data.version > $scope.model[id].version) {
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
$scope.model[id] = data;
});
}
});
}
});
$http.get(urlbase + '/connections').success(function (data) {
var now = Date.now(),
@@ -257,28 +290,45 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.editSettings = function () {
// Make a working copy
$scope.config.workingOptions = angular.copy($scope.config.Options);
$scope.config.workingGUI = angular.copy($scope.config.GUI);
$scope.tmpOptions = angular.copy($scope.config.Options);
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
$scope.tmpGUI = angular.copy($scope.config.GUI);
$('#settings').modal({backdrop: 'static', keyboard: true});
};
$scope.saveConfig = function() {
var cfg = JSON.stringify($scope.config);
var opts = {headers: {'Content-Type': 'application/json'}};
$http.post(urlbase + '/config', cfg, opts).success(function () {
$http.get(urlbase + '/config/sync').success(function (data) {
$scope.configInSync = data.configInSync;
});
});
};
$scope.saveSettings = function () {
// Make sure something changed
var changed = ! angular.equals($scope.config.Options, $scope.config.workingOptions) ||
! angular.equals($scope.config.GUI, $scope.config.workingGUI);
if(changed){
// see if protocol will need to be changed on restart
if($scope.config.GUI.UseTLS !== $scope.config.workingGUI.UseTLS){
var changed = !angular.equals($scope.config.Options, $scope.tmpOptions) ||
!angular.equals($scope.config.GUI, $scope.tmpGUI);
if (changed) {
// Check if usage reporting has been enabled or disabled
if ($scope.tmpOptions.UREnabled && $scope.tmpOptions.URAccepted <= 0) {
$scope.tmpOptions.URAccepted = 1000;
} else if (!$scope.tmpOptions.UREnabled && $scope.tmpOptions.URAccepted > 0){
$scope.tmpOptions.URAccepted = -1;
}
// Check if protocol will need to be changed on restart
if($scope.config.GUI.UseTLS !== $scope.tmpGUI.UseTLS){
$scope.protocolChanged = true;
}
// Apply new settings locally
$scope.config.Options = angular.copy($scope.config.workingOptions);
$scope.config.GUI = angular.copy($scope.config.workingGUI);
$scope.configInSync = false;
$scope.config.Options = angular.copy($scope.tmpOptions);
$scope.config.GUI = angular.copy($scope.tmpGUI);
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$scope.saveConfig();
}
$('#settings').modal("hide");
@@ -352,14 +402,12 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
});
}
$scope.configInSync = false;
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$scope.saveConfig();
};
$scope.saveNode = function () {
var nodeCfg, done, i;
$scope.configInSync = false;
$('#editNode').modal('hide');
nodeCfg = $scope.currentNode;
nodeCfg.NodeID = nodeCfg.NodeID.replace(/ /g, '').replace(/-/g, '').toUpperCase().trim();
@@ -381,7 +429,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.nodes.sort(nodeCompare);
$scope.config.Nodes = $scope.nodes;
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$scope.saveConfig();
};
$scope.otherNodes = function () {
@@ -456,7 +504,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.saveRepo = function () {
var repoCfg, done, i;
$scope.configInSync = false;
$('#editRepo').modal('hide');
repoCfg = $scope.currentRepo;
repoCfg.Nodes = [];
@@ -484,7 +531,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.repos[repoCfg.ID] = repoCfg;
$scope.config.Repositories = repoList($scope.repos);
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$scope.saveConfig();
};
$scope.sharesRepo = function(repoCfg) {
@@ -505,8 +552,11 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
delete $scope.repos[$scope.currentRepo.ID];
$scope.config.Repositories = repoList($scope.repos);
$scope.configInSync = false;
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$scope.saveConfig();
};
$scope.setAPIKey = function (cfg) {
cfg.APIKey = randomString(30, 32);
};
$scope.init = function() {
@@ -529,11 +579,75 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.repos = repoMap($scope.config.Repositories);
$scope.refresh();
if ($scope.config.Options.URAccepted == 0) {
// If usage reporting has been neither accepted nor declined,
// we want to ask the user to make a choice. But we don't want
// to bug them during initial setup, so we set a cookie with
// the time of the first visit. When that cookie is present
// and the time is more than four hours ago, we ask the
// question.
var firstVisit = document.cookie.replace(/(?:(?:^|.*;\s*)firstVisit\s*\=\s*([^;]*).*$)|^.*$/, "$1");
if (!firstVisit) {
document.cookie = "firstVisit=" + Date.now() + ";max-age=" + 30*24*3600;
} else {
if (+firstVisit < Date.now() - 4*3600*1000){
$('#ur').modal({backdrop: 'static', keyboard: false});
}
}
}
});
$http.get(urlbase + '/config/sync').success(function (data) {
$scope.configInSync = data.configInSync;
});
$http.get(urlbase + '/report').success(function (data) {
$scope.reportData = data;
});
};
$scope.acceptUR = function () {
$scope.config.Options.URAccepted = 1000; // Larger than the largest existing report version
$scope.saveConfig();
$('#ur').modal('hide');
};
$scope.declineUR = function () {
$scope.config.Options.URAccepted = -1;
$scope.saveConfig();
$('#ur').modal('hide');
};
$scope.showNeed = function (repo) {
$scope.neededLoaded = false;
$('#needed').modal({backdrop: 'static', keyboard: true});
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
$scope.needed = data;
$scope.neededLoaded = true;
});
};
$scope.needAction = function (file) {
var fDelete = 4096;
var fDirectory = 16384;
if ((file.Flags & (fDelete+fDirectory)) === fDelete+fDirectory) {
return 'rmdir';
} else if ((file.Flags & fDelete) === fDelete) {
return 'rm';
} else if ((file.Flags & fDirectory) === fDirectory) {
return 'touch';
} else {
return 'sync';
}
};
$scope.override = function (repo) {
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo)).success(function () {
$scope.refresh();
});
};
$scope.init();
@@ -588,6 +702,18 @@ function decimals(val, num) {
return decs;
}
function randomString(len, bits)
{
bits = bits || 36;
var outStr = "", newStr;
while (outStr.length < len)
{
newStr = Math.random().toString(bits).slice(2);
outStr += newStr.slice(0, Math.min(newStr.length, (len - outStr.length)));
}
return outStr.toUpperCase();
}
syncthing.filter('natural', function () {
return function (input, valid) {
return input.toFixed(decimals(input, valid));
@@ -674,6 +800,18 @@ syncthing.filter('shortPath', function () {
};
});
syncthing.filter('basename', function () {
return function (input) {
if (input === undefined)
return "";
var parts = input.split(/[\/\\]/);
if (!parts || parts.length < 1) {
return input;
}
return parts[parts.length-1];
};
});
syncthing.filter('clean', function () {
return function (input) {
return encodeURIComponent(input).replace(/%/g, '');

View File

@@ -65,7 +65,7 @@ found in the LICENSE file.
}
.table th {
white-space:nowrap;
white-space: nowrap;
font-weight: 400;
}
@@ -73,6 +73,10 @@ found in the LICENSE file.
padding-left: 20px !important;
}
.table td.small-data {
white-space: nowrap;
}
@media (max-width:767px) {
.table-responsive>.table>tbody>tr>td {
/* revert a bootstrap setting e.g.:
@@ -168,15 +172,18 @@ found in the LICENSE file.
</tr>
<tr>
<th><span class="glyphicon glyphicon-globe"></span>&emsp;Global Repository</th>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} files, {{model[repo.ID].globalBytes | binary}}B</td>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} items, {{model[repo.ID].globalBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-home"></span>&emsp;Local Repository</th>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} files, {{model[repo.ID].localBytes | binary}}B</td>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} items, {{model[repo.ID].localBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;Out of Sync</th>
<td class="text-right">{{model[repo.ID].needFiles | alwaysNumber}} files, {{model[repo.ID].needBytes | binary}}B</td>
<td class="text-right">
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} items, {{model[repo.ID].needBytes | binary}}B</a>
<span ng-if="model[repo.ID].needFiles == 0">0 items, 0 B</span>
</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-lock"></span>&emsp;Master Repository</th>
@@ -199,7 +206,10 @@ found in the LICENSE file.
</tbody>
</table>
</div>
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span>&emsp;Edit</a></span>
<span class="pull-right">
<a class="btn btn-sm btn-primary" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span>&emsp;Edit</a>
<a class="btn btn-sm btn-danger" ng-if="repo.ReadOnly && model[repo.ID].needFiles > 0" ng-click="override(repo.ID)" href=""><span class="glyphicon glyphicon-upload"></span>&emsp;Override Changes</a>
</span>
</div>
</div>
</div>
@@ -408,9 +418,7 @@ found in the LICENSE file.
</h4>
</div>
<div class="modal-body">
<div class="well well-sm text-monospace text-center">
{{myID | chunkID}}
</div>
<div class="well well-sm text-monospace text-center">{{myID | chunkID}}</div>
<img ng-if="myID" class="center-block img-thumbnail" src="qr/{{myID | chunkID}}"/>
</div>
<div class="modal-footer">
@@ -444,7 +452,7 @@ found in the LICENSE file.
</p>
</div>
<div class="form-group">
<label for="name">Name</label>
<label for="name">Node Name</label>
<input placeholder="Home Server" id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
<p class="help-block">Shown instead of Node ID in the cluster status.</p>
</div>
@@ -516,7 +524,7 @@ found in the LICENSE file.
<p class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
</div>
<div class="form-group">
<label for="nodes">Nodes</label>
<label for="nodes">Share With Nodes</label>
<div class="checkbox" ng-repeat="node in otherNodes()">
<label>
<input type="checkbox" ng-model="currentRepo.selectedNodes[node.NodeID]"> {{nodeName(node)}}
@@ -566,7 +574,7 @@ found in the LICENSE file.
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title"> Settings</h4>
<h4 class="modal-title">Settings</h4>
</div>
<div class="modal-body">
<form role="form">
@@ -575,11 +583,11 @@ found in the LICENSE file.
<div class="form-group" ng-repeat="setting in settings">
<div ng-if="setting.type == 'text' || setting.type == 'number'">
<label for="{{setting.id}}">{{setting.descr}}</label>
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.workingOptions[setting.id]"></input>
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="tmpOptions[setting.id]"></input>
</div>
<div class="checkbox" ng-if="setting.type == 'bool'">
<label>
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.workingOptions[setting.id]"></input>
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="tmpOptions[setting.id]"></input>
</label>
</div>
</div>
@@ -588,13 +596,18 @@ found in the LICENSE file.
<div class="form-group" ng-repeat="setting in guiSettings">
<div ng-if="setting.type == 'text' || setting.type == 'number' || setting.type == 'password'">
<label for="{{setting.id}}">{{setting.descr}}</label>
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.workingGUI[setting.id]"></input>
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="tmpGUI[setting.id]"></input>
</div>
<div class="checkbox" ng-if="setting.type == 'bool'">
<label>
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.workingGUI[setting.id]"></input>
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="tmpGUI[setting.id]"></input>
</label>
</div>
<div ng-if="setting.type == 'apikey'">
<label>{{setting.descr}} (<a href="http://discourse.syncthing.net/t/v0-8-14-api-keys/335">Usage</a>)</label>
<div class="well well-sm text-monospace">{{tmpGUI[setting.id] || "-"}}</div>
<button type="button" class="btn btn-sm btn-default" ng-click="setAPIKey(tmpGUI)">Generate</button>
</div>
</div>
</div>
</div>
@@ -608,6 +621,56 @@ found in the LICENSE file.
</div>
</div>
<!-- Usage report modal -->
<div id="ur" class="modal fade">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header alert alert-success">
<h4 class="modal-title">Allow Anonymous Usage Reporting?</h4>
</div>
<div class="modal-body">
<p>
The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.
</p>
<p>
The aggregated statistics are publicly available at <a href="https://data.syncthing.net/">https://data.syncthing.net/</a>.
</p>
<button type="button" class="btn btn-default" ng-show="!reportPreview" ng-click="reportPreview = true">Preview Usage Report</button>
<pre ng-if="reportPreview"><small>{{reportData | json}}</small></pre>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-success" ng-click="acceptUR()"><span class="glyphicon glyphicon-ok"></span>&emsp;Yes</button>
<button type="button" class="btn btn-danger" ng-click="declineUR()"><span class="glyphicon glyphicon-remove"></span>&emsp;No</button>
</div>
</div>
</div>
</div>
<!-- Needed files modal -->
<div id="needed" class="modal fade">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header alert alert-info">
<h4 class="modal-title">Out of Sync Items</h4>
</div>
<div class="modal-body">
<table class="table table-striped table-condensed">
<tr ng-repeat="f in needed" ng-init="a = needAction(f)">
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
<td title="{{f.Name}}">{{f.Name | basename}}</td>
<td class="text-right small-data"><span ng-if="f.Size > 0">{{f.Size | binary}}B</span></td>
</tr>
</table>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;Close</button>
</div>
</div>
</div>
</div>
<script src="angular.min.js"></script>
<script src="jquery-2.0.3.min.js"></script>

View File

@@ -12,3 +12,6 @@ json
*.idx.gz
dirs-*
*.out
csrftokens.txt
s4d
http

View File

@@ -1,27 +1,32 @@
<configuration version="1">
<repository directory="s1" ro="true">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
<address>127.0.0.1:22002</address>
</node>
<configuration version="2">
<repository id="default" directory="s1" ro="true" ignorePerms="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
<syncorder></syncorder>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
<address>127.0.0.1:22002</address>
</node>
<gui enabled="true" tls="false">
<address>127.0.0.1:8081</address>
<apikey>abc123</apikey>
</gui>
<options>
<listenAddress>127.0.0.1:22001</listenAddress>
<readOnly>true</readOnly>
<allowDelete>true</allowDelete>
<followSymlinks>true</followSymlinks>
<guiEnabled>true</guiEnabled>
<guiAddress>127.0.0.1:8081</guiAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<parallelRequests>16</parallelRequests>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>10</rescanIntervalS>
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
</options>
</configuration>

View File

@@ -1,27 +1,32 @@
<configuration version="1">
<repository directory="s2">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
<address>127.0.0.1:22002</address>
</node>
<configuration version="2">
<repository id="default" directory="s2" ro="false" ignorePerms="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
<syncorder></syncorder>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
<address>127.0.0.1:22002</address>
</node>
<gui enabled="true" tls="false">
<address>127.0.0.1:8082</address>
<apikey>abc123</apikey>
</gui>
<options>
<listenAddress>127.0.0.1:22002</listenAddress>
<readOnly>false</readOnly>
<allowDelete>true</allowDelete>
<followSymlinks>true</followSymlinks>
<guiEnabled>true</guiEnabled>
<guiAddress>127.0.0.1:8082</guiAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<parallelRequests>16</parallelRequests>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>15</rescanIntervalS>
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
</options>
</configuration>

View File

@@ -20,7 +20,7 @@ start() {
stop() {
echo "Stopping..."
for i in 1 2 ; do
curl -X POST "http://localhost:808$i/rest/shutdown"
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
done
}
@@ -39,8 +39,8 @@ setup() {
testConvergence() {
while true ; do
sleep 5
s1comp=$(curl -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
s2comp=$(curl -s "http://localhost:8081/rest/connections" | ./json "$id2/Completion")
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/connections" | ./json "$id2/Completion")
s1comp=${s1comp:-0}
s2comp=${s2comp:-0}
tot=$(($s1comp + $s2comp))

View File

@@ -1,41 +1,45 @@
<configuration version="1">
<repository directory="s1">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
<address>127.0.0.1:22004</address>
</node>
<configuration version="2">
<repository id="default" directory="s1" ro="false" ignorePerms="false">
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<repository id="s12" directory="s12-1">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<repository id="s12" directory="s12-1" ro="false" ignorePerms="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
<address>127.0.0.1:22004</address>
</node>
<gui enabled="true" tls="false">
<address>127.0.0.1:8081</address>
<apikey>abc123</apikey>
<user>testuser</user>
<password>testpass</password>
</gui>
<options>
<listenAddress>127.0.0.1:22001</listenAddress>
<readOnly>false</readOnly>
<allowDelete>true</allowDelete>
<followSymlinks>true</followSymlinks>
<guiEnabled>true</guiEnabled>
<guiAddress>127.0.0.1:8081</guiAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<parallelRequests>16</parallelRequests>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>10</rescanIntervalS>
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
</options>
</configuration>

View File

@@ -1,46 +1,45 @@
<configuration version="1">
<repository directory="s2">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<configuration version="2">
<repository id="default" directory="s2" ro="false" ignorePerms="false">
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<repository id="s12" directory="s12-2">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<repository id="s12" directory="s12-2" ro="false" ignorePerms="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<repository id="s23" directory="s23-2">
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<repository id="s23" directory="s23-2" ro="false" ignorePerms="false">
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<gui enabled="true" tls="false">
<address>127.0.0.1:8082</address>
<apikey>abc123</apikey>
</gui>
<options>
<listenAddress>127.0.0.1:22002</listenAddress>
<readOnly>false</readOnly>
<allowDelete>true</allowDelete>
<followSymlinks>true</followSymlinks>
<guiEnabled>true</guiEnabled>
<guiAddress>127.0.0.1:8082</guiAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<parallelRequests>16</parallelRequests>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>15</rescanIntervalS>
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
</options>
</configuration>

View File

@@ -1,38 +1,40 @@
<configuration version="1">
<repository directory="s3">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<configuration version="2">
<repository id="default" directory="s3" ro="false" ignorePerms="false">
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<repository id="s23" directory="s23-3">
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<repository id="s23" directory="s23-3" ro="false" ignorePerms="false">
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
<versioning></versioning>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
<address>127.0.0.1:22002</address>
</node>
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
<address>127.0.0.1:22003</address>
</node>
<gui enabled="true" tls="false">
<address>127.0.0.1:8083</address>
<apikey>abc123</apikey>
</gui>
<options>
<listenAddress>127.0.0.1:22003</listenAddress>
<readOnly>false</readOnly>
<allowDelete>true</allowDelete>
<followSymlinks>true</followSymlinks>
<guiEnabled>true</guiEnabled>
<guiAddress>127.0.0.1:8083</guiAddress>
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>true</localAnnounceEnabled>
<localAnnouncePort>21025</localAnnouncePort>
<parallelRequests>16</parallelRequests>
<maxSendKbps>0</maxSendKbps>
<rescanIntervalS>20</rescanIntervalS>
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
</options>
</configuration>

View File

@@ -5,6 +5,9 @@
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3"></node>
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4"></node>
</repository>
<repository id="default" directory="s4d" ro="false">
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1"></node>
</repository>
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
<address>127.0.0.1:22001</address>
</node>
@@ -19,6 +22,7 @@
</node>
<gui enabled="true">
<address>127.0.0.1:8084</address>
<apikey>abc123</apikey>
</gui>
<options>
<listenAddress>:22004</listenAddress>

232
integration/http.go Normal file
View File

@@ -0,0 +1,232 @@
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file.
// +build ignore
package main
import (
"bufio"
"flag"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"testing"
)
var (
target string
authUser string
authPass string
csrfToken string
csrfFile string
apiKey string
)
var jsonEndpoints = []string{
"/rest/model?repo=default",
"/rest/model/version?repo=default",
"/rest/need",
"/rest/connections",
"/rest/config",
"/rest/config/sync",
"/rest/system",
"/rest/errors",
// "/rest/discovery",
"/rest/report",
}
func main() {
flag.StringVar(&target, "target", "localhost:8080", "Test target")
flag.StringVar(&authUser, "user", "", "Username")
flag.StringVar(&authPass, "pass", "", "Password")
flag.StringVar(&csrfFile, "csrf", "", "CSRF token file")
flag.StringVar(&apiKey, "api", "", "API key")
flag.Parse()
if len(csrfFile) > 0 {
fd, err := os.Open(csrfFile)
if err != nil {
log.Fatal(err)
}
s := bufio.NewScanner(fd)
for s.Scan() {
csrfToken = s.Text()
}
fd.Close()
}
var tests []testing.InternalTest
tests = append(tests, testing.InternalTest{"TestGetIndex", TestGetIndex})
tests = append(tests, testing.InternalTest{"TestGetVersion", TestGetVersion})
tests = append(tests, testing.InternalTest{"TestGetVersionNoCSRF", TestGetVersion})
tests = append(tests, testing.InternalTest{"TestJSONEndpoints", TestJSONEndpoints})
if len(authUser) > 0 || len(apiKey) > 0 {
tests = append(tests, testing.InternalTest{"TestJSONEndpointsNoAuth", TestJSONEndpointsNoAuth})
tests = append(tests, testing.InternalTest{"TestJSONEndpointsIncorrectAuth", TestJSONEndpointsIncorrectAuth})
}
if len(csrfToken) > 0 {
tests = append(tests, testing.InternalTest{"TestJSONEndpointsNoCSRF", TestJSONEndpointsNoCSRF})
}
testing.Main(matcher, tests, nil, nil)
}
func matcher(s0, s1 string) (bool, error) {
return true, nil
}
func TestGetIndex(t *testing.T) {
res, err := get("/index.html")
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Errorf("Status %d != 200", res.StatusCode)
}
if res.ContentLength < 1024 {
t.Errorf("Length %d < 1024", res.ContentLength)
}
res.Body.Close()
res, err = get("/")
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Errorf("Status %d != 200", res.StatusCode)
}
if res.ContentLength < 1024 {
t.Errorf("Length %d < 1024", res.ContentLength)
}
res.Body.Close()
}
func TestGetVersion(t *testing.T) {
res, err := get("/rest/version")
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Status %d != 200", res.StatusCode)
}
ver, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if !regexp.MustCompile(`v\d+\.\d+\.\d+`).Match(ver) {
t.Errorf("Invalid version %q", ver)
}
}
func TestGetVersionNoCSRF(t *testing.T) {
r, err := http.NewRequest("GET", "http://"+target+"/rest/version", nil)
if err != nil {
t.Fatal(err)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 403 {
t.Fatalf("Status %d != 403", res.StatusCode)
}
}
func TestJSONEndpoints(t *testing.T) {
for _, p := range jsonEndpoints {
res, err := get(p)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Errorf("Status %d != 200 for %q", res.StatusCode, p)
}
if ct := res.Header.Get("Content-Type"); ct != "application/json; charset=utf-8" {
t.Errorf("Content-Type %q != \"application/json\" for %q", ct, p)
}
}
}
func TestJSONEndpointsNoCSRF(t *testing.T) {
for _, p := range jsonEndpoints {
r, err := http.NewRequest("GET", "http://"+target+p, nil)
if err != nil {
t.Fatal(err)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 403 && res.StatusCode != 401 {
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
}
}
}
func TestJSONEndpointsNoAuth(t *testing.T) {
for _, p := range jsonEndpoints {
r, err := http.NewRequest("GET", "http://"+target+p, nil)
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 403 && res.StatusCode != 401 {
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
}
}
}
func TestJSONEndpointsIncorrectAuth(t *testing.T) {
for _, p := range jsonEndpoints {
r, err := http.NewRequest("GET", "http://"+target+p, nil)
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
r.SetBasicAuth("wronguser", "wrongpass")
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 403 && res.StatusCode != 401 {
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
}
}
}
func get(path string) (*http.Response, error) {
r, err := http.NewRequest("GET", "http://"+target+path, nil)
if err != nil {
return nil, err
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
if len(apiKey) > 0 {
r.Header.Set("X-API-Key", apiKey)
}
return http.DefaultClient.Do(r)
}

View File

@@ -13,20 +13,47 @@ id3=373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA
go build genfiles.go
go build md5r.go
go build json.go
go build http.go
start() {
echo "Starting..."
for i in 1 2 3 4 ; do
STPROFILER=":909$i" syncthing -home "h$i" > "$i.out" 2>&1 &
done
# Test REST API
sleep 2
curl -s -o /dev/null http://testuser:testpass@localhost:8081/index.html
curl -s -o /dev/null http://localhost:8082/index.html
sleep 1
./http -target localhost:8081 -user testuser -pass testpass -csrf h1/csrftokens.txt || stop 1
./http -target localhost:8081 -api abc123 || stop 1
./http -target localhost:8082 -csrf h2/csrftokens.txt || stop 1
./http -target localhost:8082 -api abc123 || stop 1
}
stop() {
for i in 1 2 3 4 ; do
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
done
exit $1
}
clean() {
if [[ $(uname -s) == "Linux" ]] ; then
grep -v utf8-nfd
else
cat
fi
}
testConvergence() {
while true ; do
sleep 5
s1comp=$(curl -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
s2comp=$(curl -s "http://localhost:8083/rest/connections" | ./json "$id2/Completion")
s3comp=$(curl -s "http://localhost:8081/rest/connections" | ./json "$id3/Completion")
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8083/rest/connections" | ./json "$id2/Completion")
s3comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/connections" | ./json "$id3/Completion")
s1comp=${s1comp:-0}
s2comp=${s2comp:-0}
s3comp=${s3comp:-0}
@@ -38,13 +65,13 @@ testConvergence() {
done
echo "Verifying..."
cat md5-? | sort | uniq > md5-tot
cat md5-12-? | sort | uniq > md5-12-tot
cat md5-23-? | sort | uniq > md5-23-tot
cat md5-? | sort | clean | uniq > md5-tot
cat md5-12-? | sort | clean | uniq > md5-12-tot
cat md5-23-? | sort | clean | uniq > md5-23-tot
for i in 1 2 3 12-1 12-2 23-2 23-3; do
pushd "s$i" >/dev/null
../md5r -l | sort > ../md5-$i
../md5r -l | sort | clean > ../md5-$i
popd >/dev/null
done
@@ -74,8 +101,7 @@ testConvergence() {
fi
done
if [[ $ok != 7 ]] ; then
pkill syncthing
exit 1
stop 1
fi
}
@@ -105,10 +131,11 @@ alterFiles() {
pkill -CONT syncthing
}
rm -f h?/*.idx.gz
rm -rf s? s??-? s4d
echo "Setting up files..."
for i in 1 2 3 12-1 12-2 23-2 23-3; do
rm -f h$i/*.idx.gz
rm -rf "s$i"
mkdir "s$i"
pushd "s$i" >/dev/null
echo " $i: random nonoverlapping"
@@ -117,9 +144,17 @@ for i in 1 2 3 12-1 12-2 23-2 23-3; do
touch "empty-$i"
echo " $i: large file"
dd if=/dev/urandom of=large-$i bs=1024k count=55 2>/dev/null
echo " $i: weird encodings"
echo somedata > "$(echo -e utf8-nfc-\\xc3\\xad)-$i"
echo somedata > "$(echo -e utf8-nfd-i\\xcc\\x81)-$i"
echo somedata > "$(echo -e cp850-\\xa1)-$i"
touch "empty-$i"
popd >/dev/null
done
mkdir s4d
echo somerandomdata > s4d/extrafile
echo "MD5-summing..."
for i in 1 2 3 12-1 12-2 23-2 23-3 ; do
pushd "s$i" >/dev/null
@@ -139,6 +174,4 @@ for ((t = 1; t <= $iterations; t++)) ; do
testConvergence
done
for i in 1 2 3 4 ; do
curl -X POST "http://localhost:808$i/rest/shutdown"
done
stop 0

View File

@@ -16,7 +16,6 @@ import (
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/cid"
"github.com/calmh/syncthing/config"
"github.com/calmh/syncthing/files"
@@ -98,6 +97,9 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
}
deadlockDetect(&m.rmut, 60*time.Second)
deadlockDetect(&m.smut, 60*time.Second)
deadlockDetect(&m.pmut, 60*time.Second)
go m.broadcastIndexLoop()
return m
}
@@ -248,7 +250,11 @@ func (m *Model) NeedFilesRepo(repo string) []scanner.File {
m.rmut.RLock()
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
return rf.Need(cid.LocalID)
f := rf.Need(cid.LocalID)
if r := m.repoCfgs[repo].FileRanker(); r != nil {
files.SortBy(r).Sort(f)
}
return f
}
return nil
}
@@ -260,6 +266,11 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
l.Debugf("IDX(in): %s %q: %d files", nodeID, repo, len(fs))
}
if !m.repoSharedWith(repo, nodeID) {
l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
return
}
var files = make([]scanner.File, len(fs))
for i := range fs {
f := fs[i]
@@ -279,8 +290,7 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
if r, ok := m.repoFiles[repo]; ok {
r.Replace(id, files)
} else {
l.Warnf("Index from %s for unexpected repo %q; verify configuration", nodeID, repo)
l.Fatalf("Index for nonexistant repo %q", repo)
}
m.rmut.RUnlock()
}
@@ -292,6 +302,11 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs))
}
if !m.repoSharedWith(repo, nodeID) {
l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
return
}
var files = make([]scanner.File, len(fs))
for i := range fs {
f := fs[i]
@@ -311,11 +326,22 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
if r, ok := m.repoFiles[repo]; ok {
r.Update(id, files)
} else {
l.Warnf("Index update from %s for nonexistant repo %q; dropping", nodeID, repo)
l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
}
m.rmut.RUnlock()
}
func (m *Model) repoSharedWith(repo, nodeID string) bool {
m.rmut.RLock()
defer m.rmut.RUnlock()
for _, nrepo := range m.nodeRepos[nodeID] {
if nrepo == repo {
return true
}
}
return false
}
func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessage) {
compErr := compareClusterConfig(m.clusterConfig(nodeID), config)
if debug {
@@ -409,7 +435,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
}
defer fd.Close()
buf := buffers.Get(int(size))
buf := make([]byte, size)
_, err = fd.ReadAt(buf, offset)
if err != nil {
return nil, err
@@ -716,11 +742,12 @@ func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) error
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
name := id + ".idx.gz"
name = filepath.Join(dir, name)
idxf, err := os.Create(name + ".tmp")
tmp := fmt.Sprintf("%s.tmp.%d", name, time.Now().UnixNano())
idxf, err := os.OpenFile(tmp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer os.Remove(tmp)
gzw := gzip.NewWriter(idxf)
@@ -748,7 +775,7 @@ func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) error
l.Debugln("wrote index,", n, "bytes uncompressed")
}
return osutil.Rename(name+".tmp", name)
return osutil.Rename(tmp, name)
}
func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
@@ -826,3 +853,41 @@ func (m *Model) State(repo string) string {
return "unknown"
}
}
func (m *Model) Override(repo string) {
fs := m.NeedFilesRepo(repo)
m.rmut.Lock()
r := m.repoFiles[repo]
for i := range fs {
f := &fs[i]
h := r.Get(cid.LocalID, f.Name)
if h.Name != f.Name {
// We are missing the file
f.Flags |= protocol.FlagDeleted
f.Blocks = nil
} else {
// We have the file, replace with our version
*f = h
}
f.Version = lamport.Default.Tick(f.Version)
}
m.rmut.Unlock()
r.Update(cid.LocalID, fs)
}
// Version returns the change version for the given repository. This is
// guaranteed to increment if the contents of the local or global repository
// has changed.
func (m *Model) Version(repo string) uint64 {
var ver uint64
m.rmut.Lock()
for _, n := range m.repoNodes[repo] {
ver += m.repoFiles[repo].Changes(m.cm.Get(n))
}
m.rmut.Unlock()
return ver
}

View File

@@ -12,7 +12,6 @@ import (
"runtime"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/cid"
"github.com/calmh/syncthing/config"
"github.com/calmh/syncthing/osutil"
@@ -137,6 +136,7 @@ func (p *puller) run() {
walkTicker := time.Tick(time.Duration(p.cfg.Options.RescanIntervalS) * time.Second)
timeout := time.Tick(5 * time.Second)
changed := true
var prevVer uint64
for {
// Run the pulling loop as long as there are blocks to fetch
@@ -199,8 +199,11 @@ func (p *puller) run() {
default:
}
// Queue more blocks to fetch, if any
p.queueNeededBlocks()
if v := p.model.Version(p.repoCfg.ID); v > prevVer {
// Queue more blocks to fetch, if any
p.queueNeededBlocks()
prevVer = v
}
}
}
@@ -339,7 +342,6 @@ func (p *puller) handleRequestResult(res requestResult) {
}
_, of.err = of.file.WriteAt(res.data, res.offset)
buffers.Put(res.data)
of.outstanding--
p.openFiles[f.Name] = of
@@ -490,12 +492,11 @@ func (p *puller) handleCopyBlock(b bqBlock) {
defer exfd.Close()
for _, b := range b.copy {
bs := buffers.Get(int(b.Size))
bs := make([]byte, b.Size)
_, of.err = exfd.ReadAt(bs, b.Offset)
if of.err == nil {
_, of.err = of.file.WriteAt(bs, b.Offset)
}
buffers.Put(bs)
if of.err != nil {
if debug {
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)

View File

@@ -7,6 +7,8 @@ package model
import (
"fmt"
"path/filepath"
"sync"
"time"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
@@ -90,3 +92,27 @@ func compareClusterConfig(local, remote protocol.ClusterConfigMessage) error {
return nil
}
func deadlockDetect(mut sync.Locker, timeout time.Duration) {
go func() {
for {
time.Sleep(timeout / 4)
ok := make(chan bool, 2)
go func() {
mut.Lock()
mut.Unlock()
ok <- true
}()
go func() {
time.Sleep(timeout)
ok <- false
}()
if r := <-ok; !r {
panic("deadlock detected")
}
}
}()
}

View File

@@ -59,10 +59,11 @@ or certificate pinning combined with some out of band first
verification. The reference implementation uses preshared certificate
fingerprints (SHA-256) referred to as "Node IDs".
There is no required order or synchronization among BEP messages - any
message type may be sent at any time and the sender need not await a
response to one message before sending another. Responses MUST however
be sent in the same order as the requests are received.
There is no required order or synchronization among BEP messages except
as noted per message type - any message type may be sent at any time and
the sender need not await a response to one message before sending
another. Responses MUST however be sent in the same order as the
requests are received.
The underlying transport protocol MUST be TCP.
@@ -70,12 +71,13 @@ Messages
--------
Every message starts with one 32 bit word indicating the message
version, type and ID.
version, type and ID. The header is in network byte order, i.e. big
endian.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Ver | Type | Message ID | Reply To |
| Ver | Message ID | Type | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
For BEP v1 the Version field is set to zero. Future versions with
@@ -84,19 +86,19 @@ with an unknown version is a protocol error and MUST result in the
connection being terminated. A client supporting multiple versions MAY
retry with a different protocol version upon disconnection.
The Message ID is set to a unique value for each transmitted request
message. In response messages it is set to the Message ID of the
corresponding request message. The uniqueness requirement implies that
no more than 4096 messages may be outstanding at any given moment. The
ordering requirement implies that a response to a given message ID also
means that all preceding messages have been received, specifically those
which do not otherwise demand a response. Hence their message ID:s may
be reused.
The Type field indicates the type of data following the message header
and is one of the integers defined below. A message of an unknown type
is a protocol error and MUST result in the connection being terminated.
The Message ID is set to a unique value for each transmitted message. In
request messages the Reply To is set to zero. In response messages it is
set to the message ID of the corresponding request. The uniqueness
requirement implies that no more than 4096 messages may be outstanding
at any given moment. The ordering requirement implies that a response to
a given message ID also means that all preceding messages have been
received, specifically those which do not otherwise demand a response.
Hence their message ID:s may be reused.
All data following the message header MUST be in XDR (RFC 1014)
encoding. All fields shorter than 32 bits and all variable length data
MUST be padded to a multiple of 32 bits. The actual data types in use by
@@ -117,8 +119,9 @@ normalization form C.
### Cluster Config (Type = 0)
This informational message provides information about the cluster
configuration, as it pertains to the current connection. It is sent by
both sides after connection establishment.
configuration as it pertains to the current connection. A Cluster Config
message MUST be the first message sent on a BEP connection. Additional
Cluster Config messages MUST NOT be sent after the initial exchange.
#### Graphical Representation
@@ -294,11 +297,12 @@ peers acting in a specific manner as a result of sent options.
### Index (Type = 1)
The Index message defines the contents of the senders repository. An
Index message MUST be sent by each node immediately upon connection. A
node with no data to advertise MUST send an empty Index message (a file
list of zero length). If the repository contents change from non-empty
to empty, an empty Index message MUST be sent. There is no response to
the Index message.
Index message MUST be sent for each repository mentioned in the Cluster
Config message. An Index message for a repository MUST be sent before
any other message referring to that repository. A node with no data to
advertise MUST send an empty Index message (a file list of zero length).
If the repository contents change from non-empty to empty, an empty
Index message MUST be sent. There is no response to the Index message.
#### Graphical Representation

View File

@@ -81,10 +81,12 @@ type rawConnection struct {
xw *xdr.Writer
wmut sync.Mutex
indexSent map[string]map[string][2]int64
indexSent map[string]map[string]uint64
awaiting []chan asyncResult
imut sync.Mutex
idxMut sync.Mutex // ensures serialization of Index calls
nextID chan int
outbox chan []encodable
closed chan struct{}
@@ -96,8 +98,8 @@ type asyncResult struct {
}
const (
pingTimeout = 300 * time.Second
pingIdleTime = 600 * time.Second
pingTimeout = 30 * time.Second
pingIdleTime = 60 * time.Second
)
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {
@@ -122,12 +124,13 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
wb: wb,
xw: xdr.NewWriter(wb),
awaiting: make([]chan asyncResult, 0x1000),
indexSent: make(map[string]map[string][2]int64),
indexSent: make(map[string]map[string]uint64),
outbox: make(chan []encodable),
nextID: make(chan int),
closed: make(chan struct{}),
}
go c.indexSerializerLoop()
go c.readerLoop()
go c.writerLoop()
go c.pingerLoop()
@@ -142,31 +145,36 @@ func (c *rawConnection) ID() string {
// Index writes the list of file information to the connected peer node
func (c *rawConnection) Index(repo string, idx []FileInfo) {
c.idxMut.Lock()
defer c.idxMut.Unlock()
c.imut.Lock()
var msgType int
if c.indexSent[repo] == nil {
// This is the first time we send an index.
msgType = messageTypeIndex
c.indexSent[repo] = make(map[string][2]int64)
c.indexSent[repo] = make(map[string]uint64)
for _, f := range idx {
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
c.indexSent[repo][f.Name] = f.Version
}
} else {
// We have sent one full index. Only send updates now.
msgType = messageTypeIndexUpdate
var diff []FileInfo
for _, f := range idx {
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Version != vs {
diff = append(diff, f)
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
c.indexSent[repo][f.Name] = f.Version
}
}
idx = diff
}
c.imut.Unlock()
c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
if len(idx) > 0 {
c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
}
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
@@ -285,6 +293,31 @@ func (c *rawConnection) readerLoop() (err error) {
}
}
type incomingIndex struct {
update bool
id string
repo string
files []FileInfo
}
var incomingIndexes = make(chan incomingIndex, 100) // should be enough for anyone, right?
func (c *rawConnection) indexSerializerLoop() {
// We must avoid blocking the reader loop when processing large indexes.
// There is otherwise a potential deadlock where both sides has the model
// locked because it's sending a large index update and can't receive the
// large index update from the other side. But we must also ensure to
// process the indexes in the order they are received, hence the separate
// routine and buffered channel.
for ii := range incomingIndexes {
if ii.update {
c.receiver.IndexUpdate(ii.id, ii.repo, ii.files)
} else {
c.receiver.Index(ii.id, ii.repo, ii.files)
}
}
}
func (c *rawConnection) handleIndex() error {
var im IndexMessage
im.decodeXDR(c.xr)
@@ -299,7 +332,7 @@ func (c *rawConnection) handleIndex() error {
// update and can't receive the large index update from the
// other side.
go c.receiver.Index(c.id, im.Repository, im.Files)
incomingIndexes <- incomingIndex{false, c.id, im.Repository, im.Files}
}
return nil
}
@@ -310,7 +343,7 @@ func (c *rawConnection) handleIndexUpdate() error {
if err := c.xr.Error(); err != nil {
return err
} else {
go c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
incomingIndexes <- incomingIndex{true, c.id, im.Repository, im.Files}
}
return nil
}

View File

@@ -25,6 +25,31 @@ func TestHeaderFunctions(t *testing.T) {
}
}
func TestHeaderLayout(t *testing.T) {
var e, a uint32
// Version are the first four bits
e = 0xf0000000
a = encodeHeader(header{0xf, 0, 0})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Message ID are the following 12 bits
e = 0x0fff0000
a = encodeHeader(header{0, 0xfff, 0})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Type are the last 8 bits before reserved
e = 0x0000ff00
a = encodeHeader(header{0, 0, 0xff})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
}
func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()

View File

@@ -7,12 +7,14 @@ package scanner
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"code.google.com/p/go.text/unicode/norm"
"github.com/calmh/syncthing/lamport"
"github.com/calmh/syncthing/protocol"
@@ -103,13 +105,15 @@ func (w *Walker) loadIgnoreFiles(dir string, ign map[string][]string) filepath.W
}
if pn, sn := filepath.Split(rn); sn == w.IgnoreFile {
pn := strings.Trim(pn, "/")
pn := filepath.Clean(pn)
l.Debugf("pn: %q", pn)
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
lineStr := strings.TrimSpace(string(line))
if len(lineStr) > 0 {
patterns = append(patterns, lineStr)
}
}
ign[pn] = patterns
@@ -159,6 +163,11 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
return nil
}
if (runtime.GOOS == "linux" || runtime.GOOS == "windows") && !norm.NFC.IsNormalString(rn) {
l.Warnf("File %q contains non-NFC UTF-8 sequences and cannot be synced. Consider renaming.", rn)
return nil
}
if info.Mode().IsDir() {
if w.CurrentFiler != nil {
cf := w.CurrentFiler.CurrentFile(rn)
@@ -276,8 +285,9 @@ func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error {
func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
first, last := filepath.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
if prefix == "." || prefix == first || strings.HasPrefix(first, fmt.Sprintf("%s%c", prefix, os.PathSeparator)) {
for _, pattern := range pats {
l.Debugf("%q %q", pattern, last)
if match, _ := filepath.Match(pattern, last); match {
return true
}

View File

@@ -22,7 +22,7 @@ var testdata = []struct {
}
var correctIgnores = map[string][]string{
"": {".*", "quux"},
".": {".*", "quux"},
}
func TestWalk(t *testing.T) {
@@ -88,7 +88,7 @@ func TestWalkError(t *testing.T) {
func TestIgnore(t *testing.T) {
var patterns = map[string][]string{
"": {"t2"},
".": {"t2"},
"foo": {"bar", "z*"},
"foo/baz": {"quux", ".*"},
}
@@ -97,6 +97,7 @@ func TestIgnore(t *testing.T) {
r bool
}{
{"foo/bar", true},
{"foofoo", false},
{"foo/quux", false},
{"foo/zuux", true},
{"foo/qzuux", false},