mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-03 19:39:20 -05:00
Compare commits
169 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58cc108c0c | ||
|
|
50b37f1366 | ||
|
|
a7b6e35467 | ||
|
|
37d83a4e2e | ||
|
|
a720f90a70 | ||
|
|
4a6b43bcae | ||
|
|
2f5a822ca4 | ||
|
|
bc1d04f0b9 | ||
|
|
381795d6d0 | ||
|
|
6ade27641d | ||
|
|
53898d2c60 | ||
|
|
91c4ff6009 | ||
|
|
0aa067a726 | ||
|
|
5353659f9f | ||
|
|
7ac00e189b | ||
|
|
a2da31056b | ||
|
|
2383579a64 | ||
|
|
68750211ef | ||
|
|
db3e3ade80 | ||
|
|
e6f04ed238 | ||
|
|
a6eb690e31 | ||
|
|
77fe8449ba | ||
|
|
33e9a35f08 | ||
|
|
4ab4816556 | ||
|
|
8e8a579bb2 | ||
|
|
efbdf72d20 | ||
|
|
0e59b5678a | ||
|
|
de75550415 | ||
|
|
4dbce32738 | ||
|
|
b05fcbc9d7 | ||
|
|
d09c71b688 | ||
|
|
874d6760d4 | ||
|
|
26ebbee877 | ||
|
|
12eda0449a | ||
|
|
5a98f4e47c | ||
|
|
964c903a68 | ||
|
|
21b699826d | ||
|
|
5fa8f8e50c | ||
|
|
9ca87f5314 | ||
|
|
537c6b3b69 | ||
|
|
48a3fac2da | ||
|
|
fd73682806 | ||
|
|
34bd5b9dcf | ||
|
|
58c5e46206 | ||
|
|
4c61ab0f18 | ||
|
|
f241b63e0e | ||
|
|
2ffdb5a82a | ||
|
|
46e963443d | ||
|
|
66d4e9e5d7 | ||
|
|
de382e33a3 | ||
|
|
3c6738da73 | ||
|
|
18e5cb6793 | ||
|
|
9cd6b85c09 | ||
|
|
f40f3b3b7b | ||
|
|
7454670b0a | ||
|
|
e63596681d | ||
|
|
3dbaa76dcb | ||
|
|
8752003b50 | ||
|
|
8716ed5aa4 | ||
|
|
38ac4e8f79 | ||
|
|
70fc8a3064 | ||
|
|
7626c5d526 | ||
|
|
7e04c9d048 | ||
|
|
9eda8f2c7e | ||
|
|
456d9e870d | ||
|
|
a1533696a5 | ||
|
|
92499af323 | ||
|
|
b2988cdd35 | ||
|
|
82cfd37263 | ||
|
|
df381fd03f | ||
|
|
5a2328d9a5 | ||
|
|
b2f66cfb60 | ||
|
|
6d24e4f122 | ||
|
|
2e2185165c | ||
|
|
f0612e57c2 | ||
|
|
e5d16ed08a | ||
|
|
1cff9ccc63 | ||
|
|
20a018db2e | ||
|
|
80c2b32b92 | ||
|
|
028e9bc17a | ||
|
|
afc2d6fda4 | ||
|
|
bec5c76631 | ||
|
|
d87051ca99 | ||
|
|
3798cebad0 | ||
|
|
a477989950 | ||
|
|
5065d1d0b4 | ||
|
|
829990c9ef | ||
|
|
ac037e0fa3 | ||
|
|
da42d51008 | ||
|
|
99027813ef | ||
|
|
9112ba8f0b | ||
|
|
843fd9bdbd | ||
|
|
26c33c4a69 | ||
|
|
2db76ae786 | ||
|
|
a0b15d006d | ||
|
|
23b27fa24a | ||
|
|
b6f580cbc2 | ||
|
|
f2459ef331 | ||
|
|
0a37fac794 | ||
|
|
2d9a822ed7 | ||
|
|
98622ca4d0 | ||
|
|
f7a25adcbd | ||
|
|
9bf13b253c | ||
|
|
2e8b639a34 | ||
|
|
672f7a010f | ||
|
|
37e15c4368 | ||
|
|
4d7837ba96 | ||
|
|
a6c8423905 | ||
|
|
832ed556d9 | ||
|
|
7c6fb018ca | ||
|
|
9c5c06bf31 | ||
|
|
61e3daaead | ||
|
|
9c0fde795e | ||
|
|
ce4f565e2f | ||
|
|
5369a62fd5 | ||
|
|
b44016ff70 | ||
|
|
9f76c87880 | ||
|
|
42ae2898e1 | ||
|
|
dd649a6be4 | ||
|
|
593f098276 | ||
|
|
4a87221f16 | ||
|
|
7745ed34d3 | ||
|
|
8fe546c4a2 | ||
|
|
381f6aeaf6 | ||
|
|
9154bacced | ||
|
|
dc0dc8efb4 | ||
|
|
b062d5dd7f | ||
|
|
c519e582b5 | ||
|
|
6b9dce36bf | ||
|
|
8e0520887a | ||
|
|
cfd1fdb38e | ||
|
|
c6ba0208d0 | ||
|
|
3d055bbb79 | ||
|
|
dd971b56e5 | ||
|
|
4031f5e24b | ||
|
|
1cd7cc6869 | ||
|
|
9de2864db3 | ||
|
|
c27861cbaf | ||
|
|
c2f75d3689 | ||
|
|
5454ca1cf7 | ||
|
|
8644bf30a9 | ||
|
|
db3341a178 | ||
|
|
e2cb0219c7 | ||
|
|
217f29de76 | ||
|
|
8661afcb4f | ||
|
|
ed07fc0f2c | ||
|
|
4af3f77a9a | ||
|
|
8c4f07ef1b | ||
|
|
1a231d39a5 | ||
|
|
17e3d14272 | ||
|
|
03182c7714 | ||
|
|
963078f6ac | ||
|
|
8356b58b1d | ||
|
|
303ce02271 | ||
|
|
bcdc3ecdae | ||
|
|
b60d648e22 | ||
|
|
7bc36cbbd1 | ||
|
|
04130fcb15 | ||
|
|
52d8e4c691 | ||
|
|
ae0193b724 | ||
|
|
2e1c33206f | ||
|
|
0c642ec7cf | ||
|
|
b3ca96eeba | ||
|
|
ae0e033178 | ||
|
|
a97985b428 | ||
|
|
63c0f11458 | ||
|
|
b336b2c336 | ||
|
|
8a5a573851 | ||
|
|
358862c7ad |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -5,3 +5,8 @@ stcli.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.sublime*
|
||||
discosrv
|
||||
stpidx
|
||||
.jshintrc
|
||||
coverage.out
|
||||
|
||||
20
.travis.yml
Normal file
20
.travis.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- ./build.sh setup
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./build.sh test-cov
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/syncthing -repotoken="$COVERALS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: "zEV2h2XtKHNLVdXJjM4LA/VjMfLVydm6goF+ARit+nOSGxGoH7f7jIdzJzhxgh7shKG93q61eLO1Tug+WBMYB2EpBuYnTB5AIMYhCDwNI8C4uBV6c3brHfcrie7MASNao8TID2QScASKNFFWvjv/i1Ccn5ztxdcQuhSsNjGZp8A="
|
||||
@@ -1,7 +1,9 @@
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
James Patterson <jamespatterson@operamail.com>
|
||||
Jens Diemer <github.com@jensdiemer.de>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
|
||||
2
Godeps/Godeps.json
generated
2
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/calmh/syncthing",
|
||||
"GoVersion": "go1.2.2",
|
||||
"GoVersion": "go1.3",
|
||||
"Packages": [
|
||||
"./cmd/syncthing",
|
||||
"./cmd/assets",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
syncthing
|
||||
syncthing [](https://travis-ci.org/calmh/syncthing) [](https://coveralls.io/r/calmh/syncthing?branch=master)
|
||||
=========
|
||||
|
||||
This is the `syncthing` project. The following are the project goals:
|
||||
|
||||
BIN
assets/st-logo-text.pxm
Normal file
BIN
assets/st-logo-text.pxm
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package beacon
|
||||
|
||||
import "net"
|
||||
@@ -48,7 +52,7 @@ func (b *Beacon) Recv() ([]byte, net.Addr) {
|
||||
}
|
||||
|
||||
func (b *Beacon) reader() {
|
||||
var bs = make([]byte, 65536)
|
||||
bs := make([]byte, 65536)
|
||||
for {
|
||||
n, addr, err := b.conn.ReadFrom(bs)
|
||||
if err != nil {
|
||||
@@ -58,8 +62,11 @@ func (b *Beacon) reader() {
|
||||
if debug {
|
||||
l.Debugf("recv %d bytes from %s", n, addr)
|
||||
}
|
||||
|
||||
c := make([]byte, n)
|
||||
copy(c, bs)
|
||||
select {
|
||||
case b.outbox <- recv{bs[:n], addr}:
|
||||
case b.outbox <- recv{c, addr}:
|
||||
default:
|
||||
if debug {
|
||||
l.Debugln("dropping message")
|
||||
@@ -79,7 +86,7 @@ func (b *Beacon) writer() {
|
||||
|
||||
var dsts []net.IP
|
||||
for _, addr := range addrs {
|
||||
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() {
|
||||
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() && iaddr.IP.To4() != nil {
|
||||
baddr := bcast(iaddr)
|
||||
dsts = append(dsts, baddr.IP)
|
||||
}
|
||||
@@ -102,9 +109,7 @@ func (b *Beacon) writer() {
|
||||
if debug {
|
||||
l.Debugln(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
} else if debug {
|
||||
l.Debugf("sent %d bytes to %s", len(bs), dst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/beacon"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b, err := beacon.NewBeacon(21025)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
bs, addr := b.Recv()
|
||||
log.Printf("Received %d bytes from %s: %x %x", len(bs), addr, bs[:8], bs[8:])
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
bs := [16]byte{}
|
||||
binary.BigEndian.PutUint64(bs[:], uint64(time.Now().UnixNano()))
|
||||
log.Printf("My ID: %x", bs[:8])
|
||||
for {
|
||||
binary.BigEndian.PutUint64(bs[8:], uint64(time.Now().UnixNano()))
|
||||
b.Send(bs[:])
|
||||
log.Printf("Sent %d bytes", len(bs[:]))
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}()
|
||||
select {}
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package beacon
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package beacon implements an UDP broadcast beacon
|
||||
package beacon
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
// Package buffers manages a set of reusable byte buffers.
|
||||
package buffers
|
||||
|
||||
const (
|
||||
largeMin = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
smallBuffers = make(chan []byte, 32)
|
||||
largeBuffers = make(chan []byte, 32)
|
||||
)
|
||||
|
||||
func Get(size int) []byte {
|
||||
var ch = largeBuffers
|
||||
if size < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
select {
|
||||
case buf = <-ch:
|
||||
default:
|
||||
}
|
||||
|
||||
if len(buf) < size {
|
||||
return make([]byte, size)
|
||||
}
|
||||
return buf[:size]
|
||||
}
|
||||
|
||||
func Put(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var ch = largeBuffers
|
||||
if len(buf) < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- buf:
|
||||
default:
|
||||
}
|
||||
}
|
||||
40
build.sh
40
build.sh
@@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export COPYFILE_DISABLE=true
|
||||
export GO386=387 # Don't use SSE on 32 bit builds
|
||||
|
||||
distFiles=(README.md LICENSE) # apart from the binary itself
|
||||
distFiles=(README.md LICENSE CONTRIBUTORS) # apart from the binary itself
|
||||
version=$(git describe --always --dirty)
|
||||
date=$(git show -s --format=%ct)
|
||||
user=$(whoami)
|
||||
host=$(hostname)
|
||||
host=${host%%.*}
|
||||
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host"
|
||||
bldenv=${ENVIRONMENT:-default}
|
||||
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host -X main.BuildEnv $bldenv"
|
||||
|
||||
check() {
|
||||
if ! command -v godep >/dev/null ; then
|
||||
@@ -30,6 +32,21 @@ assets() {
|
||||
godep go run cmd/assets/assets.go gui > auto/gui.files.go
|
||||
}
|
||||
|
||||
test-cov() {
|
||||
echo "mode: set" > coverage.out
|
||||
fail=0
|
||||
|
||||
for dir in $(go list ./...) ; do
|
||||
godep go test -coverprofile=profile.out $dir || fail=1
|
||||
if [ -f profile.out ] ; then
|
||||
grep -v "mode: set" profile.out >> coverage.out
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
|
||||
exit $fail
|
||||
}
|
||||
|
||||
test() {
|
||||
check
|
||||
godep go test -cpu=1,2,4 ./...
|
||||
@@ -59,7 +76,11 @@ zipDist() {
|
||||
name="$1"
|
||||
rm -rf "$name"
|
||||
mkdir -p "$name"
|
||||
cp syncthing.exe "${distFiles[@]}" "$name"
|
||||
for f in "${distFiles[@]}" ; do
|
||||
sed 's/$/
|
||||
/' < "$f" > "$name/$f.txt"
|
||||
done
|
||||
cp syncthing.exe "$name"
|
||||
sign "$name/syncthing.exe"
|
||||
zip -r "$name.zip" "$name"
|
||||
rm -rf "$name"
|
||||
@@ -88,13 +109,18 @@ case "$1" in
|
||||
;;
|
||||
|
||||
guidev)
|
||||
build -tags guidev
|
||||
echo "Syncthing is already built for GUI developments. Try:"
|
||||
echo " STGUIASSETS=~/someDir/gui syncthing"
|
||||
;;
|
||||
|
||||
test)
|
||||
test
|
||||
;;
|
||||
|
||||
test-cov)
|
||||
test-cov
|
||||
;;
|
||||
|
||||
tar)
|
||||
rm -f *.tar.gz *.zip
|
||||
test || exit 1
|
||||
@@ -112,7 +138,11 @@ case "$1" in
|
||||
test || exit 1
|
||||
assets
|
||||
|
||||
for os in darwin-amd64 linux-386 linux-amd64 freebsd-amd64 windows-amd64 windows-386 ; do
|
||||
godep go build ./discover/cmd/discosrv
|
||||
godep go build ./cmd/stpidx
|
||||
godep go build ./cmd/stcli
|
||||
|
||||
for os in darwin-amd64 linux-386 linux-amd64 freebsd-amd64 windows-amd64 windows-386 solaris-amd64 ; do
|
||||
export GOOS=${os%-*}
|
||||
export GOARCH=${os#*-}
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package cid provides a manager for mappings between node ID:s and connection ID:s.
|
||||
package cid
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cid
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,20 +1,29 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/calmh/syncthing/auto"
|
||||
"github.com/calmh/syncthing/config"
|
||||
"github.com/calmh/syncthing/logger"
|
||||
"github.com/calmh/syncthing/model"
|
||||
@@ -31,8 +40,8 @@ var (
|
||||
configInSync = true
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut sync.Mutex
|
||||
static = embeddedStatic()
|
||||
staticFunc = static.(func(http.ResponseWriter, *http.Request, *log.Logger))
|
||||
static func(http.ResponseWriter, *http.Request, *log.Logger)
|
||||
apiKey string
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,12 +52,14 @@ func init() {
|
||||
l.AddHandler(logger.LevelWarn, showGuiError)
|
||||
}
|
||||
|
||||
func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) error {
|
||||
var listener net.Listener
|
||||
var err error
|
||||
if cfg.UseTLS {
|
||||
cert, err := loadCert(confDir, "https-")
|
||||
if err != nil {
|
||||
l.Infoln("Loading HTTPS certificate:", err)
|
||||
l.Infoln("Creating new HTTPS certificate")
|
||||
newCertificate(confDir, "https-")
|
||||
cert, err = loadCert(confDir, "https-")
|
||||
}
|
||||
@@ -70,10 +81,17 @@ func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
}
|
||||
}
|
||||
|
||||
if len(assetDir) > 0 {
|
||||
static = martini.Static(assetDir).(func(http.ResponseWriter, *http.Request, *log.Logger))
|
||||
} else {
|
||||
static = embeddedStatic()
|
||||
}
|
||||
|
||||
router := martini.NewRouter()
|
||||
router.Get("/", getRoot)
|
||||
router.Get("/rest/version", restGetVersion)
|
||||
router.Get("/rest/model", restGetModel)
|
||||
router.Get("/rest/model/version", restGetModelVersion)
|
||||
router.Get("/rest/need", restGetNeed)
|
||||
router.Get("/rest/connections", restGetConnections)
|
||||
router.Get("/rest/config", restGetConfig)
|
||||
@@ -81,6 +99,7 @@ func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
router.Get("/rest/system", restGetSystem)
|
||||
router.Get("/rest/errors", restGetErrors)
|
||||
router.Get("/rest/discovery", restGetDiscovery)
|
||||
router.Get("/rest/report", restGetReport)
|
||||
router.Get("/qr/:text", getQR)
|
||||
|
||||
router.Post("/rest/config", restPostConfig)
|
||||
@@ -90,8 +109,10 @@ func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
router.Post("/rest/error", restPostError)
|
||||
router.Post("/rest/error/clear", restClearErrors)
|
||||
router.Post("/rest/discovery/hint", restPostDiscoveryHint)
|
||||
router.Post("/rest/model/override", restPostOverride)
|
||||
|
||||
mr := martini.New()
|
||||
mr.Use(csrfMiddleware)
|
||||
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
|
||||
mr.Use(basic(cfg.User, cfg.Password))
|
||||
}
|
||||
@@ -101,6 +122,9 @@ func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
mr.Action(router.Handle)
|
||||
mr.Map(m)
|
||||
|
||||
apiKey = cfg.APIKey
|
||||
loadCsrfTokens()
|
||||
|
||||
go http.Serve(listener, mr)
|
||||
|
||||
return nil
|
||||
@@ -108,7 +132,7 @@ func startGUI(cfg config.GUIConfiguration, m *model.Model) error {
|
||||
|
||||
func getRoot(w http.ResponseWriter, r *http.Request) {
|
||||
r.URL.Path = "/index.html"
|
||||
staticFunc(w, r, nil)
|
||||
static(w, r, nil)
|
||||
}
|
||||
|
||||
func restMiddleware(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -121,6 +145,17 @@ func restGetVersion() string {
|
||||
return Version
|
||||
}
|
||||
|
||||
func restGetModelVersion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var repo = qs.Get("repo")
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
res["version"] = m.Version(repo)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var repo = qs.Get("repo")
|
||||
@@ -145,24 +180,31 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
||||
|
||||
res["state"] = m.State(repo)
|
||||
res["version"] = m.Version(repo)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restPostOverride(m *model.Model, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var repo = qs.Get("repo")
|
||||
m.Override(repo)
|
||||
}
|
||||
|
||||
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var repo = qs.Get("repo")
|
||||
|
||||
files := m.NeedFilesRepo(repo)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(files)
|
||||
}
|
||||
|
||||
func restGetConnections(m *model.Model, w http.ResponseWriter) {
|
||||
var res = m.ConnectionStats()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
@@ -171,48 +213,102 @@ func restGetConfig(w http.ResponseWriter) {
|
||||
if encCfg.GUI.Password != "" {
|
||||
encCfg.GUI.Password = unchangedPassword
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(encCfg)
|
||||
}
|
||||
|
||||
func restPostConfig(req *http.Request) {
|
||||
var prevPassHash = cfg.GUI.Password
|
||||
err := json.NewDecoder(req.Body).Decode(&cfg)
|
||||
func restPostConfig(req *http.Request, m *model.Model) {
|
||||
var newCfg config.Configuration
|
||||
err := json.NewDecoder(req.Body).Decode(&newCfg)
|
||||
if err != nil {
|
||||
l.Warnln(err)
|
||||
} else {
|
||||
if cfg.GUI.Password == "" {
|
||||
if newCfg.GUI.Password == "" {
|
||||
// Leave it empty
|
||||
} else if cfg.GUI.Password != unchangedPassword {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
|
||||
} else if newCfg.GUI.Password == unchangedPassword {
|
||||
newCfg.GUI.Password = cfg.GUI.Password
|
||||
} else {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
|
||||
if err != nil {
|
||||
l.Warnln(err)
|
||||
} else {
|
||||
cfg.GUI.Password = string(hash)
|
||||
newCfg.GUI.Password = string(hash)
|
||||
}
|
||||
} else {
|
||||
cfg.GUI.Password = prevPassHash
|
||||
}
|
||||
|
||||
// Figure out if any changes require a restart
|
||||
|
||||
if len(cfg.Repositories) != len(newCfg.Repositories) {
|
||||
configInSync = false
|
||||
} else {
|
||||
om := cfg.RepoMap()
|
||||
nm := newCfg.RepoMap()
|
||||
for id := range om {
|
||||
if !reflect.DeepEqual(om[id], nm[id]) {
|
||||
configInSync = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.Nodes) != len(newCfg.Nodes) {
|
||||
configInSync = false
|
||||
} else {
|
||||
om := cfg.NodeMap()
|
||||
nm := newCfg.NodeMap()
|
||||
for k := range om {
|
||||
if _, ok := nm[k]; !ok {
|
||||
configInSync = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newCfg.Options.URAccepted > cfg.Options.URAccepted {
|
||||
// UR was enabled
|
||||
newCfg.Options.URAccepted = usageReportVersion
|
||||
err := sendUsageReport(m)
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
}
|
||||
go usageReportingLoop(m)
|
||||
} else if newCfg.Options.URAccepted < cfg.Options.URAccepted {
|
||||
// UR was disabled
|
||||
newCfg.Options.URAccepted = -1
|
||||
stopUsageReporting()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg.Options, newCfg.Options) || !reflect.DeepEqual(cfg.GUI, newCfg.GUI) {
|
||||
configInSync = false
|
||||
}
|
||||
|
||||
// Activate and save
|
||||
|
||||
cfg = newCfg
|
||||
saveConfig()
|
||||
configInSync = false
|
||||
}
|
||||
}
|
||||
|
||||
func restGetConfigInSync(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
|
||||
}
|
||||
|
||||
func restPostRestart(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
flushResponse(`{"ok": "restarting"}`, w)
|
||||
go restart()
|
||||
}
|
||||
|
||||
func restPostReset(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
flushResponse(`{"ok": "resetting repos"}`, w)
|
||||
resetRepositories()
|
||||
go restart()
|
||||
}
|
||||
|
||||
func restPostShutdown(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
flushResponse(`{"ok": "shutting down"}`, w)
|
||||
go shutdown()
|
||||
}
|
||||
@@ -235,7 +331,7 @@ func restGetSystem(w http.ResponseWriter) {
|
||||
res["goroutines"] = runtime.NumGoroutine()
|
||||
res["alloc"] = m.Alloc
|
||||
res["sys"] = m.Sys
|
||||
res["tilde"] = expandTilde("~/")
|
||||
res["tilde"] = expandTilde("~")
|
||||
if cfg.Options.GlobalAnnEnabled && discoverer != nil {
|
||||
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
|
||||
}
|
||||
@@ -247,11 +343,12 @@ func restGetSystem(w http.ResponseWriter) {
|
||||
cpuUsageLock.RUnlock()
|
||||
res["cpuPercent"] = cpusum / 10
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetErrors(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
guiErrorsMut.Lock()
|
||||
json.NewEncoder(w).Encode(guiErrors)
|
||||
guiErrorsMut.Unlock()
|
||||
@@ -288,9 +385,15 @@ func restPostDiscoveryHint(r *http.Request) {
|
||||
}
|
||||
|
||||
func restGetDiscovery(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(discoverer.All())
|
||||
}
|
||||
|
||||
func restGetReport(w http.ResponseWriter, m *model.Model) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(reportData(m))
|
||||
}
|
||||
|
||||
func getQR(w http.ResponseWriter, params martini.Params) {
|
||||
code, err := qr.Encode(params["text"], qr.M)
|
||||
if err != nil {
|
||||
@@ -304,6 +407,10 @@ func getQR(w http.ResponseWriter, params martini.Params) {
|
||||
|
||||
func basic(username string, passhash string) http.HandlerFunc {
|
||||
return func(res http.ResponseWriter, req *http.Request) {
|
||||
if validAPIKey(req.Header.Get("X-API-Key")) {
|
||||
return
|
||||
}
|
||||
|
||||
error := func() {
|
||||
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
|
||||
res.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
|
||||
@@ -340,3 +447,33 @@ func basic(username string, passhash string) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validAPIKey(k string) bool {
|
||||
return len(apiKey) > 0 && k == apiKey
|
||||
}
|
||||
|
||||
func embeddedStatic() func(http.ResponseWriter, *http.Request, *log.Logger) {
|
||||
var modt = time.Now().UTC().Format(http.TimeFormat)
|
||||
|
||||
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
|
||||
file := req.URL.Path
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
|
||||
bs, ok := auto.Assets[file]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
|
||||
if len(mtype) != 0 {
|
||||
res.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
res.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
res.Header().Set("Last-Modified", modt)
|
||||
|
||||
res.Write(bs)
|
||||
}
|
||||
}
|
||||
|
||||
115
cmd/syncthing/gui_csrf.go
Normal file
115
cmd/syncthing/gui_csrf.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/osutil"
|
||||
)
|
||||
|
||||
var csrfTokens []string
|
||||
var csrfMut sync.Mutex
|
||||
|
||||
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
|
||||
// the request with 403. For / and /index.html, set a new CSRF cookie if none
|
||||
// is currently set.
|
||||
func csrfMiddleware(w http.ResponseWriter, r *http.Request) {
|
||||
if validAPIKey(r.Header.Get("X-API-Key")) {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.Path, "/rest/") {
|
||||
token := r.Header.Get("X-CSRF-Token")
|
||||
if !validCsrfToken(token) {
|
||||
http.Error(w, "CSRF Error", 403)
|
||||
}
|
||||
} else if r.URL.Path == "/" || r.URL.Path == "/index.html" {
|
||||
cookie, err := r.Cookie("CSRF-Token")
|
||||
if err != nil || !validCsrfToken(cookie.Value) {
|
||||
cookie = &http.Cookie{
|
||||
Name: "CSRF-Token",
|
||||
Value: newCsrfToken(),
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validCsrfToken(token string) bool {
|
||||
csrfMut.Lock()
|
||||
defer csrfMut.Unlock()
|
||||
for _, t := range csrfTokens {
|
||||
if t == token {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newCsrfToken() string {
|
||||
bs := make([]byte, 30)
|
||||
_, err := rand.Reader.Read(bs)
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
|
||||
token := base64.StdEncoding.EncodeToString(bs)
|
||||
|
||||
csrfMut.Lock()
|
||||
csrfTokens = append(csrfTokens, token)
|
||||
if len(csrfTokens) > 10 {
|
||||
csrfTokens = csrfTokens[len(csrfTokens)-10:]
|
||||
}
|
||||
defer csrfMut.Unlock()
|
||||
|
||||
saveCsrfTokens()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
func saveCsrfTokens() {
|
||||
name := filepath.Join(confDir, "csrftokens.txt")
|
||||
tmp := fmt.Sprintf("%s.tmp.%d", name, time.Now().UnixNano())
|
||||
|
||||
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(tmp)
|
||||
|
||||
for _, t := range csrfTokens {
|
||||
_, err := fmt.Fprintln(f, t)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
osutil.Rename(tmp, name)
|
||||
}
|
||||
|
||||
func loadCsrfTokens() {
|
||||
name := filepath.Join(confDir, "csrftokens.txt")
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
csrfTokens = append(csrfTokens, s.Text())
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build guidev
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/codegangsta/martini"
|
||||
|
||||
func embeddedStatic() interface{} {
|
||||
return martini.Static("gui")
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
//+build !guidev
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/auto"
|
||||
)
|
||||
|
||||
func embeddedStatic() interface{} {
|
||||
var modt = time.Now().UTC().Format(http.TimeFormat)
|
||||
|
||||
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
|
||||
file := req.URL.Path
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
|
||||
bs, ok := auto.Assets[file]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
|
||||
if len(mtype) != 0 {
|
||||
res.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
res.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
res.Header().Set("Last-Modified", modt)
|
||||
|
||||
res.Write(bs)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build solaris
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build !windows,!solaris
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build locktrace
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -13,6 +18,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
@@ -24,6 +30,7 @@ import (
|
||||
"github.com/calmh/syncthing/discover"
|
||||
"github.com/calmh/syncthing/logger"
|
||||
"github.com/calmh/syncthing/model"
|
||||
"github.com/calmh/syncthing/osutil"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/upnp"
|
||||
"github.com/juju/ratelimit"
|
||||
@@ -31,6 +38,7 @@ import (
|
||||
|
||||
var (
|
||||
Version = "unknown-dev"
|
||||
BuildEnv = "default"
|
||||
BuildStamp = "0"
|
||||
BuildDate time.Time
|
||||
BuildHost = "unknown"
|
||||
@@ -41,14 +49,22 @@ var (
|
||||
var l = logger.DefaultLogger
|
||||
|
||||
func init() {
|
||||
if Version != "unknown-dev" {
|
||||
// If not a generic dev build, version string should come from git describe
|
||||
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-\d+-g[0-9a-f]+)?(-dirty)?$`)
|
||||
if !exp.MatchString(Version) {
|
||||
l.Fatalf("Invalid version string %q;\n\tdoes not match regexp %v", Version, exp)
|
||||
}
|
||||
}
|
||||
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s %s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildEnv, BuildUser, BuildHost, date)
|
||||
|
||||
if os.Getenv("STTRACE") != "" {
|
||||
l.SetFlags(log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile)
|
||||
logFlags = log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,6 +72,7 @@ var (
|
||||
cfg config.Configuration
|
||||
myID string
|
||||
confDir string
|
||||
logFlags int = log.Ltime
|
||||
rateBucket *ratelimit.Bucket
|
||||
stop = make(chan bool)
|
||||
discoverer *discover.Discoverer
|
||||
@@ -63,7 +80,19 @@ var (
|
||||
|
||||
const (
|
||||
usage = "syncthing [options]"
|
||||
extraUsage = `The following enviroment variables are interpreted by syncthing:
|
||||
extraUsage = `The value for the -logflags option is a sum of the following:
|
||||
|
||||
1 Date
|
||||
2 Time
|
||||
4 Microsecond time
|
||||
8 Long filename
|
||||
16 Short filename
|
||||
|
||||
I.e. to prefix each log line with date and time, set -logflags=3 (1 + 2 from
|
||||
above). The value 0 is used to disable all of the above. The default is to
|
||||
show time only (2).
|
||||
|
||||
The following enviroment variables are interpreted by syncthing:
|
||||
|
||||
STNORESTART Do not attempt to restart when requested to, instead just exit.
|
||||
Set this variable when running under a service manager such as
|
||||
@@ -84,9 +113,17 @@ const (
|
||||
- "xdr" (the xdr package)
|
||||
- "all" (all of the above)
|
||||
|
||||
STCPUPROFILE Write CPU profile to the specified file.`
|
||||
STCPUPROFILE Write CPU profile to the specified file.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
||||
|
||||
STDEADLOCKTIMEOUT Alter deadlock detection timeout (seconds; default 1200).`
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func main() {
|
||||
var reset bool
|
||||
var showVersion bool
|
||||
@@ -95,19 +132,17 @@ func main() {
|
||||
flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
|
||||
flag.BoolVar(&showVersion, "version", false, "Show version")
|
||||
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.IntVar(&logFlags, "logflags", logFlags, "Set log flags")
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
|
||||
flag.Parse()
|
||||
|
||||
if len(os.Getenv("STRESTART")) > 0 {
|
||||
// Give the parent process time to exit and release sockets etc.
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if showVersion {
|
||||
fmt.Println(LongVersion)
|
||||
return
|
||||
}
|
||||
|
||||
l.SetFlags(logFlags)
|
||||
|
||||
if doUpgrade {
|
||||
err := upgrade()
|
||||
if err != nil {
|
||||
@@ -133,7 +168,12 @@ func main() {
|
||||
// continue. We don't much care if this fails at this point, we will
|
||||
// be checking that later.
|
||||
|
||||
oldDefault := expandTilde("~/.syncthing")
|
||||
var oldDefault string
|
||||
if runtime.GOOS == "windows" {
|
||||
oldDefault = filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
} else {
|
||||
oldDefault = expandTilde("~/.syncthing")
|
||||
}
|
||||
if _, err := os.Stat(oldDefault); err == nil {
|
||||
os.MkdirAll(filepath.Dir(confDir), 0700)
|
||||
if err := os.Rename(oldDefault, confDir); err == nil {
|
||||
@@ -200,9 +240,9 @@ func main() {
|
||||
l.FatalErr(err)
|
||||
cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
|
||||
port, err = getFreePort("", 22000)
|
||||
port, err = getFreePort("0.0.0.0", 22000)
|
||||
l.FatalErr(err)
|
||||
cfg.Options.ListenAddress = []string{fmt.Sprintf(":%d", port)}
|
||||
cfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}
|
||||
|
||||
saveConfig()
|
||||
l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
|
||||
@@ -224,6 +264,10 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
if len(os.Getenv("STRESTART")) > 0 {
|
||||
waitForParentExit()
|
||||
}
|
||||
|
||||
// The TLS configuration is used for both the listening socket and outgoing
|
||||
// connections.
|
||||
|
||||
@@ -246,12 +290,29 @@ func main() {
|
||||
|
||||
m := model.NewModel(confDir, &cfg, "syncthing", Version)
|
||||
|
||||
for _, repo := range cfg.Repositories {
|
||||
nextRepo:
|
||||
for i, repo := range cfg.Repositories {
|
||||
if repo.Invalid != "" {
|
||||
continue
|
||||
}
|
||||
dir := expandTilde(repo.Directory)
|
||||
m.AddRepo(repo.ID, dir, repo.Nodes)
|
||||
|
||||
repo.Directory = expandTilde(repo.Directory)
|
||||
|
||||
// Safety check. If the cached index contains files but the repository
|
||||
// doesn't exist, we have a problem. We would assume that all files
|
||||
// have been deleted which might not be the case, so abort instead.
|
||||
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(repo.Directory)))
|
||||
idxFile := filepath.Join(confDir, id+".idx.gz")
|
||||
if _, err := os.Stat(idxFile); err == nil {
|
||||
if fi, err := os.Stat(repo.Directory); err != nil || !fi.IsDir() {
|
||||
cfg.Repositories[i].Invalid = "repo directory missing"
|
||||
continue nextRepo
|
||||
}
|
||||
}
|
||||
|
||||
ensureDir(repo.Directory, -1)
|
||||
m.AddRepo(repo)
|
||||
}
|
||||
|
||||
// GUI
|
||||
@@ -279,7 +340,7 @@ func main() {
|
||||
}
|
||||
|
||||
l.Infof("Starting web GUI on %s://%s:%d/", proto, hostShow, addr.Port)
|
||||
err := startGUI(cfg.GUI, m)
|
||||
err := startGUI(cfg.GUI, os.Getenv("STGUIASSETS"), m)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
@@ -294,41 +355,40 @@ func main() {
|
||||
|
||||
l.Infoln("Populating repository index")
|
||||
m.LoadIndexes(confDir)
|
||||
|
||||
for _, repo := range cfg.Repositories {
|
||||
if repo.Invalid != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
dir := expandTilde(repo.Directory)
|
||||
|
||||
// Safety check. If the cached index contains files but the repository
|
||||
// doesn't exist, we have a problem. We would assume that all files
|
||||
// have been deleted which might not be the case, so abort instead.
|
||||
|
||||
if files, _, _ := m.LocalSize(repo.ID); files > 0 {
|
||||
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
|
||||
l.Warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory)
|
||||
l.Fatalf("Ensure that directory is present or remove repository from configuration.")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that repository directories exist for newly configured repositories.
|
||||
ensureDir(dir, -1)
|
||||
}
|
||||
|
||||
m.CleanRepos()
|
||||
m.ScanRepos()
|
||||
m.SaveIndexes(confDir)
|
||||
|
||||
// Remove all .idx* files that don't belong to an active repo.
|
||||
|
||||
validIndexes := make(map[string]bool)
|
||||
for _, repo := range cfg.Repositories {
|
||||
dir := expandTilde(repo.Directory)
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(dir)))
|
||||
validIndexes[id] = true
|
||||
}
|
||||
|
||||
allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*"))
|
||||
if err == nil {
|
||||
for _, idx := range allIndexes {
|
||||
bn := filepath.Base(idx)
|
||||
fs := strings.Split(bn, ".")
|
||||
if len(fs) > 1 {
|
||||
if _, ok := validIndexes[fs[0]]; !ok {
|
||||
l.Infoln("Removing old index", bn)
|
||||
os.Remove(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UPnP
|
||||
|
||||
var externalPort = 0
|
||||
if cfg.Options.UPnPEnabled {
|
||||
// We seed the random number generator with the node ID to get a
|
||||
// repeatable sequence of random external ports.
|
||||
rand.Seed(certSeed(cert.Certificate[0]))
|
||||
externalPort = setupUPnP()
|
||||
externalPort = setupUPnP(rand.NewSource(certSeed(cert.Certificate[0])))
|
||||
}
|
||||
|
||||
// Routine to connect out to configured nodes
|
||||
@@ -360,10 +420,46 @@ func main() {
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
for _, node := range cfg.Nodes {
|
||||
if len(node.Name) > 0 {
|
||||
l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Options.URAccepted > 0 && cfg.Options.URAccepted < usageReportVersion {
|
||||
l.Infoln("Anonymous usage report has changed; revoking acceptance")
|
||||
cfg.Options.URAccepted = 0
|
||||
}
|
||||
if cfg.Options.URAccepted >= usageReportVersion {
|
||||
go usageReportingLoop(m)
|
||||
go func() {
|
||||
time.Sleep(10 * time.Minute)
|
||||
err := sendUsageReport(m)
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
<-stop
|
||||
l.Okln("Exiting")
|
||||
}
|
||||
|
||||
func setupUPnP() int {
|
||||
func waitForParentExit() {
|
||||
l.Infoln("Waiting for parent to exit...")
|
||||
// Wait for the listen address to become free, indicating that the parent has exited.
|
||||
for {
|
||||
ln, err := net.Listen("tcp", cfg.Options.ListenAddress[0])
|
||||
if err == nil {
|
||||
ln.Close()
|
||||
break
|
||||
}
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
l.Okln("Continuing")
|
||||
}
|
||||
|
||||
func setupUPnP(r rand.Source) int {
|
||||
var externalPort = 0
|
||||
if len(cfg.Options.ListenAddress) == 1 {
|
||||
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
|
||||
@@ -375,7 +471,7 @@ func setupUPnP() int {
|
||||
igd, err := upnp.Discover()
|
||||
if err == nil {
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + rand.Intn(65535-1024)
|
||||
r := 1024 + int(r.Int63()%(65535-1024))
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
|
||||
if err == nil {
|
||||
externalPort = r
|
||||
@@ -387,7 +483,10 @@ func setupUPnP() int {
|
||||
l.Warnln("Failed to create UPnP port mapping")
|
||||
}
|
||||
} else {
|
||||
l.Infof("No UPnP IGD device found, no port mapping created (%v)", err)
|
||||
l.Infof("No UPnP gateway detected")
|
||||
if debugNet {
|
||||
l.Debugf("UPnP: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -471,7 +570,7 @@ func saveConfigLoop(cfgFile string) {
|
||||
continue
|
||||
}
|
||||
|
||||
err = model.Rename(cfgFile+".tmp", cfgFile)
|
||||
err = osutil.Rename(cfgFile+".tmp", cfgFile)
|
||||
if err != nil {
|
||||
l.Warnln(err)
|
||||
}
|
||||
@@ -521,6 +620,7 @@ func listenConnect(myID string, m *model.Model, tlsCfg *tls.Config) {
|
||||
|
||||
// Connect
|
||||
go func() {
|
||||
var delay time.Duration = 1 * time.Second
|
||||
for {
|
||||
nextNode:
|
||||
for _, nodeCfg := range cfg.Nodes {
|
||||
@@ -571,7 +671,11 @@ func listenConnect(myID string, m *model.Model, tlsCfg *tls.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(cfg.Options.ReconnectIntervalS) * time.Second)
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if maxD := time.Duration(cfg.Options.ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
delay = maxD
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -604,6 +708,9 @@ next:
|
||||
wr = &limitedWriter{conn, rateBucket}
|
||||
}
|
||||
protoConn := protocol.NewConnection(remoteID, conn, wr, m)
|
||||
|
||||
l.Infof("Connection to %s established at %v", remoteID, conn.RemoteAddr())
|
||||
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue next
|
||||
}
|
||||
@@ -615,7 +722,7 @@ next:
|
||||
}
|
||||
|
||||
func discovery(extPort int) *discover.Discoverer {
|
||||
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress)
|
||||
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress, cfg.Options.LocalAnnPort)
|
||||
if err != nil {
|
||||
l.Warnf("No discovery possible (%v)", err)
|
||||
return nil
|
||||
@@ -648,7 +755,7 @@ func ensureDir(dir string, mode int) {
|
||||
func getDefaultConfDir() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
return filepath.Join(os.Getenv("LocalAppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
return expandTilde("~/Library/Application Support/Syncthing")
|
||||
@@ -663,7 +770,12 @@ func getDefaultConfDir() string {
|
||||
}
|
||||
|
||||
func expandTilde(p string) string {
|
||||
if runtime.GOOS == "windows" || !strings.HasPrefix(p, "~/") {
|
||||
if p == "~" {
|
||||
return getHomeDir()
|
||||
}
|
||||
|
||||
p = filepath.FromSlash(p)
|
||||
if !strings.HasPrefix(p, fmt.Sprintf("~%c", os.PathSeparator)) {
|
||||
return p
|
||||
}
|
||||
|
||||
|
||||
25
cmd/syncthing/memsize_darwin.go
Normal file
25
cmd/syncthing/memsize_darwin.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
cmd := exec.Command("sysctl", "hw.memsize")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fs := strings.Fields(string(out))
|
||||
if len(fs) != 2 {
|
||||
return 0, errors.New("sysctl parse error")
|
||||
}
|
||||
bytes, err := strconv.ParseUint(fs[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
33
cmd/syncthing/memsize_linux.go
Normal file
33
cmd/syncthing/memsize_linux.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
if !s.Scan() {
|
||||
return 0, errors.New("/proc/meminfo parse error 1")
|
||||
}
|
||||
|
||||
l := s.Text()
|
||||
fs := strings.Fields(l)
|
||||
if len(fs) != 3 || fs[2] != "kB" {
|
||||
return 0, errors.New("/proc/meminfo parse error 2")
|
||||
}
|
||||
|
||||
kb, err := strconv.ParseUint(fs[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return kb * 1024, nil
|
||||
}
|
||||
22
cmd/syncthing/memsize_solaris.go
Normal file
22
cmd/syncthing/memsize_solaris.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
cmd := exec.Command("prtconf", "-m")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mb, err := strconv.ParseUint(string(out), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return mb * 1024 * 1024, nil
|
||||
}
|
||||
9
cmd/syncthing/memsize_unimpl.go
Normal file
9
cmd/syncthing/memsize_unimpl.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build freebsd
|
||||
|
||||
package main
|
||||
|
||||
import "errors"
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
25
cmd/syncthing/memsize_windows.go
Normal file
25
cmd/syncthing/memsize_windows.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32, _ = syscall.LoadLibrary("kernel32.dll")
|
||||
globalMemoryStatusEx, _ = syscall.GetProcAddress(kernel32, "GlobalMemoryStatusEx")
|
||||
)
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
var memoryStatusEx [64]byte
|
||||
binary.LittleEndian.PutUint32(memoryStatusEx[:], 64)
|
||||
p := uintptr(unsafe.Pointer(&memoryStatusEx[0]))
|
||||
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, p, 0, 0)
|
||||
if ret == 0 {
|
||||
return 0, callErr
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(memoryStatusEx[8:]), nil
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -11,6 +15,7 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
mr "math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -50,7 +55,7 @@ func newCertificate(dir string, prefix string) {
|
||||
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: new(big.Int).SetInt64(0),
|
||||
SerialNumber: new(big.Int).SetInt64(mr.Int63()),
|
||||
Subject: pkix.Name{
|
||||
CommonName: tlsName,
|
||||
},
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
// +build !windows
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !solaris,!windows
|
||||
|
||||
package main
|
||||
|
||||
@@ -6,6 +10,7 @@ import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -14,8 +19,10 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"bytes"
|
||||
"bitbucket.org/kardianos/osext"
|
||||
)
|
||||
|
||||
@@ -33,6 +40,10 @@ type githubAsset struct {
|
||||
var GoArchExtra string // "", "v5", "v6", "v7"
|
||||
|
||||
func upgrade() error {
|
||||
if runtime.GOOS == "windows" {
|
||||
return errors.New("Upgrade currently unsupported on Windows")
|
||||
}
|
||||
|
||||
path, err := osext.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -52,14 +63,15 @@ func upgrade() error {
|
||||
}
|
||||
rel := rels[0]
|
||||
|
||||
if rel.Tag > Version {
|
||||
l.Infof("Attempting upgrade to %s...", rel.Tag)
|
||||
} else if rel.Tag == Version {
|
||||
l.Okf("Already running the latest version, %s. Not upgrading.", Version)
|
||||
return nil
|
||||
} else {
|
||||
switch compareVersions(rel.Tag, Version) {
|
||||
case -1:
|
||||
l.Okf("Current version %s is newer than latest release %s. Not upgrading.", Version, rel.Tag)
|
||||
return nil
|
||||
case 0:
|
||||
l.Okf("Already running the latest version, %s. Not upgrading.", Version)
|
||||
return nil
|
||||
default:
|
||||
l.Infof("Attempting upgrade to %s...", rel.Tag)
|
||||
}
|
||||
|
||||
expectedRelease := fmt.Sprintf("syncthing-%s-%s%s-%s.", runtime.GOOS, runtime.GOARCH, GoArchExtra, rel.Tag)
|
||||
@@ -147,3 +159,18 @@ func readTarGZ(url string, dir string) (string, error) {
|
||||
|
||||
return "", fmt.Errorf("No upgrade found")
|
||||
}
|
||||
|
||||
func compareVersions(a, b string) int {
|
||||
return bytes.Compare(versionParts(a), versionParts(b))
|
||||
}
|
||||
|
||||
func versionParts(v string) []byte {
|
||||
parts := strings.Split(v, "-")
|
||||
fields := strings.Split(parts[0], ".")
|
||||
res := make([]byte, len(fields))
|
||||
for i, s := range fields {
|
||||
v, _ := strconv.Atoi(s)
|
||||
res[i] = byte(v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
31
cmd/syncthing/upgrade_test.go
Normal file
31
cmd/syncthing/upgrade_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
var testcases = []struct {
|
||||
a, b string
|
||||
r int
|
||||
}{
|
||||
{"0.1.2", "0.1.2", 0},
|
||||
{"0.1.3", "0.1.2", 1},
|
||||
{"0.1.1", "0.1.2", -1},
|
||||
{"0.3.0", "0.1.2", 1},
|
||||
{"0.0.9", "0.1.2", -1},
|
||||
{"1.1.2", "0.1.2", 1},
|
||||
{"0.1.2", "1.1.2", -1},
|
||||
{"0.1.10", "0.1.9", 1},
|
||||
{"0.10.0", "0.2.0", 1},
|
||||
{"30.10.0", "4.9.0", 1},
|
||||
}
|
||||
|
||||
func TestCompareVersions(t *testing.T) {
|
||||
for _, tc := range testcases {
|
||||
if r := compareVersions(tc.a, tc.b); r != tc.r {
|
||||
t.Errorf("compareVersions(%q, %q): %d != %d", tc.a, tc.b, r, tc.r)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build windows
|
||||
// +build windows solaris
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
133
cmd/syncthing/usage_report.go
Normal file
133
cmd/syncthing/usage_report.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/model"
|
||||
)
|
||||
|
||||
// Current version number of the usage report, for acceptance purposes. If
|
||||
// fields are added or changed this integer must be incremented so that users
|
||||
// are prompted for acceptance of the new report.
|
||||
const usageReportVersion = 1
|
||||
|
||||
var stopUsageReportingCh = make(chan struct{})
|
||||
|
||||
func reportData(m *model.Model) map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
res["uniqueID"] = strings.ToLower(certID([]byte(myID)))[:6]
|
||||
res["version"] = Version
|
||||
res["longVersion"] = LongVersion
|
||||
res["platform"] = runtime.GOOS + "-" + runtime.GOARCH
|
||||
res["numRepos"] = len(cfg.Repositories)
|
||||
res["numNodes"] = len(cfg.Nodes)
|
||||
|
||||
var totFiles, maxFiles int
|
||||
var totBytes, maxBytes int64
|
||||
for _, repo := range cfg.Repositories {
|
||||
files, _, bytes := m.GlobalSize(repo.ID)
|
||||
totFiles += files
|
||||
totBytes += bytes
|
||||
if files > maxFiles {
|
||||
maxFiles = files
|
||||
}
|
||||
if bytes > maxBytes {
|
||||
maxBytes = bytes
|
||||
}
|
||||
}
|
||||
|
||||
res["totFiles"] = totFiles
|
||||
res["repoMaxFiles"] = maxFiles
|
||||
res["totMiB"] = totBytes / 1024 / 1024
|
||||
res["repoMaxMiB"] = maxBytes / 1024 / 1024
|
||||
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
res["memoryUsageMiB"] = mem.Sys / 1024 / 1024
|
||||
|
||||
var perf float64
|
||||
for i := 0; i < 5; i++ {
|
||||
p := cpuBench()
|
||||
if p > perf {
|
||||
perf = p
|
||||
}
|
||||
}
|
||||
res["sha256Perf"] = perf
|
||||
|
||||
bytes, err := memorySize()
|
||||
if err == nil {
|
||||
res["memorySize"] = bytes / 1024 / 1024
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func sendUsageReport(m *model.Model) error {
|
||||
d := reportData(m)
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(d)
|
||||
|
||||
var client = http.DefaultClient
|
||||
if BuildEnv == "android" {
|
||||
// This works around the lack of DNS resolution on Android... :(
|
||||
tr := &http.Transport{
|
||||
Dial: func(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, "194.126.249.13:443")
|
||||
},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
}
|
||||
_, err := client.Post("https://data.syncthing.net/newdata", "application/json", &b)
|
||||
return err
|
||||
}
|
||||
|
||||
func usageReportingLoop(m *model.Model) {
|
||||
l.Infoln("Starting usage reporting")
|
||||
t := time.NewTicker(86400 * time.Second)
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-stopUsageReportingCh:
|
||||
break loop
|
||||
case <-t.C:
|
||||
err := sendUsageReport(m)
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
l.Infoln("Stopping usage reporting")
|
||||
}
|
||||
|
||||
func stopUsageReporting() {
|
||||
select {
|
||||
case stopUsageReportingCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Returns CPU performance as a measure of single threaded SHA-256 MiB/s
|
||||
func cpuBench() float64 {
|
||||
chunkSize := 100 * 1 << 10
|
||||
h := sha256.New()
|
||||
bs := make([]byte, chunkSize)
|
||||
rand.Reader.Read(bs)
|
||||
|
||||
t0 := time.Now()
|
||||
b := 0
|
||||
for time.Since(t0) < 125*time.Millisecond {
|
||||
h.Write(bs)
|
||||
b += chunkSize
|
||||
}
|
||||
h.Sum(nil)
|
||||
d := time.Since(t0)
|
||||
return float64(int(float64(b)/d.Seconds()/(1<<20)*100)) / 100
|
||||
}
|
||||
135
config/config.go
135
config/config.go
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package config implements reading and writing of the syncthing configuration file.
|
||||
package config
|
||||
|
||||
@@ -7,12 +11,14 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/calmh/syncthing/logger"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
var l = logger.DefaultLogger
|
||||
@@ -26,13 +32,85 @@ type Configuration struct {
|
||||
XMLName xml.Name `xml:"configuration" json:"-"`
|
||||
}
|
||||
|
||||
// SyncOrderPattern allows a user to prioritize file downloading based on a
|
||||
// regular expression. If a file matches the Pattern the Priority will be
|
||||
// assigned to the file. If a file matches more than one Pattern the
|
||||
// Priorities are summed. This allows a user to, for example, prioritize files
|
||||
// in a directory, as well as prioritize based on file type. The higher the
|
||||
// priority the "sooner" a file will be downloaded. Files can be deprioritized
|
||||
// by giving them a negative priority. While Priority is represented as an
|
||||
// integer, the expected range is something like -1000 to 1000.
|
||||
type SyncOrderPattern struct {
|
||||
Pattern string `xml:"pattern,attr"`
|
||||
Priority int `xml:"priority,attr"`
|
||||
compiledPattern *regexp.Regexp
|
||||
}
|
||||
|
||||
func (s *SyncOrderPattern) CompiledPattern() *regexp.Regexp {
|
||||
if s.compiledPattern == nil {
|
||||
re, err := regexp.Compile(s.Pattern)
|
||||
if err != nil {
|
||||
l.Warnln("Could not compile regexp (" + s.Pattern + "): " + err.Error())
|
||||
s.compiledPattern = regexp.MustCompile("^\\0$")
|
||||
} else {
|
||||
s.compiledPattern = re
|
||||
}
|
||||
}
|
||||
return s.compiledPattern
|
||||
}
|
||||
|
||||
type RepositoryConfiguration struct {
|
||||
ID string `xml:"id,attr"`
|
||||
Directory string `xml:"directory,attr"`
|
||||
Nodes []NodeConfiguration `xml:"node"`
|
||||
ReadOnly bool `xml:"ro,attr"`
|
||||
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
||||
nodeIDs []string
|
||||
ID string `xml:"id,attr"`
|
||||
Directory string `xml:"directory,attr"`
|
||||
Nodes []NodeConfiguration `xml:"node"`
|
||||
ReadOnly bool `xml:"ro,attr"`
|
||||
IgnorePerms bool `xml:"ignorePerms,attr"`
|
||||
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
|
||||
Versioning VersioningConfiguration `xml:"versioning"`
|
||||
SyncOrderPatterns []SyncOrderPattern `xml:"syncorder>pattern"`
|
||||
|
||||
nodeIDs []string
|
||||
}
|
||||
|
||||
type VersioningConfiguration struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
type InternalVersioningConfiguration struct {
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
Params []InternalParam `xml:"param"`
|
||||
}
|
||||
|
||||
type InternalParam struct {
|
||||
Key string `xml:"key,attr"`
|
||||
Val string `xml:"val,attr"`
|
||||
}
|
||||
|
||||
func (c *VersioningConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
var tmp InternalVersioningConfiguration
|
||||
tmp.Type = c.Type
|
||||
for k, v := range c.Params {
|
||||
tmp.Params = append(tmp.Params, InternalParam{k, v})
|
||||
}
|
||||
|
||||
return e.EncodeElement(tmp, start)
|
||||
|
||||
}
|
||||
|
||||
func (c *VersioningConfiguration) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var tmp InternalVersioningConfiguration
|
||||
err := d.DecodeElement(&tmp, &start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Type = tmp.Type
|
||||
c.Params = make(map[string]string, len(tmp.Params))
|
||||
for _, p := range tmp.Params {
|
||||
c.Params[p.Key] = p.Val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepositoryConfiguration) NodeIDs() []string {
|
||||
@@ -44,6 +122,21 @@ func (r *RepositoryConfiguration) NodeIDs() []string {
|
||||
return r.nodeIDs
|
||||
}
|
||||
|
||||
func (r RepositoryConfiguration) FileRanker() func(scanner.File) int {
|
||||
if len(r.SyncOrderPatterns) <= 0 {
|
||||
return nil
|
||||
}
|
||||
return func(f scanner.File) int {
|
||||
ret := 0
|
||||
for _, v := range r.SyncOrderPatterns {
|
||||
if v.CompiledPattern().MatchString(f.Name) {
|
||||
ret += v.Priority
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
type NodeConfiguration struct {
|
||||
NodeID string `xml:"id,attr"`
|
||||
Name string `xml:"name,attr,omitempty"`
|
||||
@@ -55,6 +148,7 @@ type OptionsConfiguration struct {
|
||||
GlobalAnnServer string `xml:"globalAnnounceServer" default:"announce.syncthing.net:22025"`
|
||||
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true"`
|
||||
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true"`
|
||||
LocalAnnPort int `xml:"localAnnouncePort" default:"21025"`
|
||||
ParallelRequests int `xml:"parallelRequests" default:"16"`
|
||||
MaxSendKbps int `xml:"maxSendKbps"`
|
||||
RescanIntervalS int `xml:"rescanIntervalS" default:"60"`
|
||||
@@ -62,7 +156,10 @@ type OptionsConfiguration struct {
|
||||
MaxChangeKbps int `xml:"maxChangeKbps" default:"10000"`
|
||||
StartBrowser bool `xml:"startBrowser" default:"true"`
|
||||
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
|
||||
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
|
||||
|
||||
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
|
||||
Deprecated_URDeclined bool `xml:"urDeclined,omitempty" json:"-"`
|
||||
Deprecated_ReadOnly bool `xml:"readOnly,omitempty" json:"-"`
|
||||
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty" json:"-"`
|
||||
Deprecated_GUIAddress string `xml:"guiAddress,omitempty" json:"-"`
|
||||
@@ -74,6 +171,23 @@ type GUIConfiguration struct {
|
||||
User string `xml:"user,omitempty"`
|
||||
Password string `xml:"password,omitempty"`
|
||||
UseTLS bool `xml:"tls,attr"`
|
||||
APIKey string `xml:"apikey,omitempty"`
|
||||
}
|
||||
|
||||
func (cfg *Configuration) NodeMap() map[string]NodeConfiguration {
|
||||
m := make(map[string]NodeConfiguration, len(cfg.Nodes))
|
||||
for _, n := range cfg.Nodes {
|
||||
m[n.NodeID] = n
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
|
||||
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
|
||||
for _, r := range cfg.Repositories {
|
||||
m[r.ID] = r
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func setDefaults(data interface{}) error {
|
||||
@@ -189,6 +303,7 @@ func Load(rd io.Reader, myID string) (Configuration, error) {
|
||||
// Strip spaces and dashes
|
||||
node.NodeID = strings.Replace(node.NodeID, "-", "", -1)
|
||||
node.NodeID = strings.Replace(node.NodeID, " ", "", -1)
|
||||
node.NodeID = strings.ToUpper(node.NodeID)
|
||||
}
|
||||
|
||||
// Check for missing, bad or duplicate repository ID:s
|
||||
@@ -198,7 +313,7 @@ func Load(rd io.Reader, myID string) (Configuration, error) {
|
||||
repo := &cfg.Repositories[i]
|
||||
|
||||
if len(repo.Directory) == 0 {
|
||||
repo.Invalid = "empty directory"
|
||||
repo.Invalid = "no directory configured"
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -229,6 +344,12 @@ func Load(rd io.Reader, myID string) (Configuration, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Options.Deprecated_URDeclined {
|
||||
cfg.Options.URAccepted = -1
|
||||
}
|
||||
cfg.Options.Deprecated_URDeclined = false
|
||||
cfg.Options.Deprecated_UREnabled = false
|
||||
|
||||
// Upgrade to v2 configuration if appropriate
|
||||
if cfg.Version == 1 {
|
||||
convertV1V2(&cfg)
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -6,6 +10,9 @@ import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/syncthing/files"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
func TestDefaultValues(t *testing.T) {
|
||||
@@ -14,6 +21,7 @@ func TestDefaultValues(t *testing.T) {
|
||||
GlobalAnnServer: "announce.syncthing.net:22025",
|
||||
GlobalAnnEnabled: true,
|
||||
LocalAnnEnabled: true,
|
||||
LocalAnnPort: 21025,
|
||||
ParallelRequests: 16,
|
||||
MaxSendKbps: 0,
|
||||
RescanIntervalS: 60,
|
||||
@@ -37,10 +45,10 @@ func TestNodeConfig(t *testing.T) {
|
||||
v1data := []byte(`
|
||||
<configuration version="1">
|
||||
<repository id="test" directory="~/Sync">
|
||||
<node id="node1" name="node one">
|
||||
<node id="NODE1" name="node one">
|
||||
<address>a</address>
|
||||
</node>
|
||||
<node id="node2" name="node two">
|
||||
<node id="NODE2" name="node two">
|
||||
<address>b</address>
|
||||
</node>
|
||||
</repository>
|
||||
@@ -53,20 +61,20 @@ func TestNodeConfig(t *testing.T) {
|
||||
v2data := []byte(`
|
||||
<configuration version="2">
|
||||
<repository id="test" directory="~/Sync" ro="true">
|
||||
<node id="node1"/>
|
||||
<node id="node2"/>
|
||||
<node id="NODE1"/>
|
||||
<node id="NODE2"/>
|
||||
</repository>
|
||||
<node id="node1" name="node one">
|
||||
<node id="NODE1" name="node one">
|
||||
<address>a</address>
|
||||
</node>
|
||||
<node id="node2" name="node two">
|
||||
<node id="NODE2" name="node two">
|
||||
<address>b</address>
|
||||
</node>
|
||||
</configuration>
|
||||
`)
|
||||
|
||||
for i, data := range [][]byte{v1data, v2data} {
|
||||
cfg, err := Load(bytes.NewReader(data), "node1")
|
||||
cfg, err := Load(bytes.NewReader(data), "NODE1")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -75,23 +83,23 @@ func TestNodeConfig(t *testing.T) {
|
||||
{
|
||||
ID: "test",
|
||||
Directory: "~/Sync",
|
||||
Nodes: []NodeConfiguration{{NodeID: "node1"}, {NodeID: "node2"}},
|
||||
Nodes: []NodeConfiguration{{NodeID: "NODE1"}, {NodeID: "NODE2"}},
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
expectedNodes := []NodeConfiguration{
|
||||
{
|
||||
NodeID: "node1",
|
||||
NodeID: "NODE1",
|
||||
Name: "node one",
|
||||
Addresses: []string{"a"},
|
||||
},
|
||||
{
|
||||
NodeID: "node2",
|
||||
NodeID: "NODE2",
|
||||
Name: "node two",
|
||||
Addresses: []string{"b"},
|
||||
},
|
||||
}
|
||||
expectedNodeIDs := []string{"node1", "node2"}
|
||||
expectedNodeIDs := []string{"NODE1", "NODE2"}
|
||||
|
||||
if cfg.Version != 2 {
|
||||
t.Errorf("%d: Incorrect version %d != 2", i, cfg.Version)
|
||||
@@ -145,6 +153,7 @@ func TestOverriddenValues(t *testing.T) {
|
||||
<globalAnnounceServer>syncthing.nym.se:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>false</localAnnounceEnabled>
|
||||
<localAnnouncePort>42123</localAnnouncePort>
|
||||
<parallelRequests>32</parallelRequests>
|
||||
<maxSendKbps>1234</maxSendKbps>
|
||||
<rescanIntervalS>600</rescanIntervalS>
|
||||
@@ -161,6 +170,7 @@ func TestOverriddenValues(t *testing.T) {
|
||||
GlobalAnnServer: "syncthing.nym.se:22025",
|
||||
GlobalAnnEnabled: false,
|
||||
LocalAnnEnabled: false,
|
||||
LocalAnnPort: 42123,
|
||||
ParallelRequests: 32,
|
||||
MaxSendKbps: 1234,
|
||||
RescanIntervalS: 600,
|
||||
@@ -197,25 +207,25 @@ func TestNodeAddresses(t *testing.T) {
|
||||
name, _ := os.Hostname()
|
||||
expected := []NodeConfiguration{
|
||||
{
|
||||
NodeID: "n1",
|
||||
NodeID: "N1",
|
||||
Addresses: []string{"dynamic"},
|
||||
},
|
||||
{
|
||||
NodeID: "n2",
|
||||
NodeID: "N2",
|
||||
Addresses: []string{"dynamic"},
|
||||
},
|
||||
{
|
||||
NodeID: "n3",
|
||||
NodeID: "N3",
|
||||
Addresses: []string{"dynamic"},
|
||||
},
|
||||
{
|
||||
NodeID: "n4",
|
||||
NodeID: "N4",
|
||||
Name: name, // Set when auto created
|
||||
Addresses: []string{"dynamic"},
|
||||
},
|
||||
}
|
||||
|
||||
cfg, err := Load(bytes.NewReader(data), "n4")
|
||||
cfg, err := Load(bytes.NewReader(data), "N4")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -274,3 +284,96 @@ func TestStripNodeIs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncOrders(t *testing.T) {
|
||||
data := []byte(`
|
||||
<configuration version="2">
|
||||
<node id="AAAA-BBBB-CCCC">
|
||||
<address>dynamic</address>
|
||||
</node>
|
||||
<repository directory="~/Sync">
|
||||
<syncorder>
|
||||
<pattern pattern="\.jpg$" priority="1" />
|
||||
</syncorder>
|
||||
<node id="AAAA-BBBB-CCCC" name=""></node>
|
||||
</repository>
|
||||
</configuration>
|
||||
`)
|
||||
|
||||
expected := []SyncOrderPattern{
|
||||
{
|
||||
Pattern: "\\.jpg$",
|
||||
Priority: 1,
|
||||
},
|
||||
}
|
||||
|
||||
cfg, err := Load(bytes.NewReader(data), "n4")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for i := range expected {
|
||||
if !reflect.DeepEqual(cfg.Repositories[0].SyncOrderPatterns[i], expected[i]) {
|
||||
t.Errorf("Nodes[%d] differ;\n E: %#v\n A: %#v", i, expected[i], cfg.Repositories[0].SyncOrderPatterns[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileSorter(t *testing.T) {
|
||||
rcfg := RepositoryConfiguration{
|
||||
SyncOrderPatterns: []SyncOrderPattern{
|
||||
{"\\.jpg$", 10, nil},
|
||||
{"\\.mov$", 5, nil},
|
||||
{"^camera-uploads", 100, nil},
|
||||
},
|
||||
}
|
||||
|
||||
f := []scanner.File{
|
||||
{Name: "bar.mov"},
|
||||
{Name: "baz.txt"},
|
||||
{Name: "foo.jpg"},
|
||||
{Name: "frew/foo.jpg"},
|
||||
{Name: "frew/lol.go"},
|
||||
{Name: "frew/rofl.copter"},
|
||||
{Name: "frew/bar.mov"},
|
||||
{Name: "camera-uploads/foo.jpg"},
|
||||
{Name: "camera-uploads/hurr.pl"},
|
||||
{Name: "camera-uploads/herp.mov"},
|
||||
{Name: "camera-uploads/wee.txt"},
|
||||
}
|
||||
|
||||
files.SortBy(rcfg.FileRanker()).Sort(f)
|
||||
|
||||
expected := []scanner.File{
|
||||
{Name: "camera-uploads/foo.jpg"},
|
||||
{Name: "camera-uploads/herp.mov"},
|
||||
{Name: "camera-uploads/hurr.pl"},
|
||||
{Name: "camera-uploads/wee.txt"},
|
||||
{Name: "foo.jpg"},
|
||||
{Name: "frew/foo.jpg"},
|
||||
{Name: "bar.mov"},
|
||||
{Name: "frew/bar.mov"},
|
||||
{Name: "frew/lol.go"},
|
||||
{Name: "baz.txt"},
|
||||
{Name: "frew/rofl.copter"},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(f, expected) {
|
||||
t.Errorf(
|
||||
"\n\nexpected:\n" +
|
||||
formatFiles(expected) + "\n" +
|
||||
"got:\n" +
|
||||
formatFiles(f) + "\n\n",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func formatFiles(f []scanner.File) string {
|
||||
ret := ""
|
||||
|
||||
for _, v := range f {
|
||||
ret += " " + v.Name + "\n"
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -5,6 +9,7 @@ import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
@@ -16,18 +21,18 @@ import (
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Addresses []Address
|
||||
Updated time.Time
|
||||
type node struct {
|
||||
addresses []address
|
||||
updated time.Time
|
||||
}
|
||||
|
||||
type Address struct {
|
||||
IP []byte
|
||||
Port uint16
|
||||
type address struct {
|
||||
ip []byte
|
||||
port uint16
|
||||
}
|
||||
|
||||
var (
|
||||
nodes = make(map[string]Node)
|
||||
nodes = make(map[string]node)
|
||||
lock sync.Mutex
|
||||
queries = 0
|
||||
announces = 0
|
||||
@@ -134,7 +139,7 @@ func limit(addr *net.UDPAddr) bool {
|
||||
func handleAnnounceV2(addr *net.UDPAddr, buf []byte) {
|
||||
var pkt discover.AnnounceV2
|
||||
err := pkt.UnmarshalXDR(buf)
|
||||
if err != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
log.Println("AnnounceV2 Unmarshal:", err)
|
||||
log.Println(hex.Dump(buf))
|
||||
return
|
||||
@@ -152,25 +157,25 @@ func handleAnnounceV2(addr *net.UDPAddr, buf []byte) {
|
||||
ip = addr.IP.To16()
|
||||
}
|
||||
|
||||
var addrs []Address
|
||||
for _, addr := range pkt.Addresses {
|
||||
var addrs []address
|
||||
for _, addr := range pkt.This.Addresses {
|
||||
tip := addr.IP
|
||||
if len(tip) == 0 {
|
||||
tip = ip
|
||||
}
|
||||
addrs = append(addrs, Address{
|
||||
IP: tip,
|
||||
Port: addr.Port,
|
||||
addrs = append(addrs, address{
|
||||
ip: tip,
|
||||
port: addr.Port,
|
||||
})
|
||||
}
|
||||
|
||||
node := Node{
|
||||
Addresses: addrs,
|
||||
Updated: time.Now(),
|
||||
node := node{
|
||||
addresses: addrs,
|
||||
updated: time.Now(),
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
nodes[pkt.NodeID] = node
|
||||
nodes[pkt.This.ID] = node
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
@@ -191,19 +196,21 @@ func handleQueryV2(conn *net.UDPConn, addr *net.UDPAddr, buf []byte) {
|
||||
queries++
|
||||
lock.Unlock()
|
||||
|
||||
if ok && len(node.Addresses) > 0 {
|
||||
pkt := discover.AnnounceV2{
|
||||
Magic: discover.AnnouncementMagicV2,
|
||||
NodeID: pkt.NodeID,
|
||||
if ok && len(node.addresses) > 0 {
|
||||
ann := discover.AnnounceV2{
|
||||
Magic: discover.AnnouncementMagicV2,
|
||||
This: discover.Node{
|
||||
ID: pkt.NodeID,
|
||||
},
|
||||
}
|
||||
for _, addr := range node.Addresses {
|
||||
pkt.Addresses = append(pkt.Addresses, discover.Address{IP: addr.IP, Port: addr.Port})
|
||||
for _, addr := range node.addresses {
|
||||
ann.This.Addresses = append(ann.This.Addresses, discover.Address{IP: addr.ip, Port: addr.port})
|
||||
}
|
||||
if debug {
|
||||
log.Printf("-> %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
tb := pkt.MarshalXDR()
|
||||
tb := ann.MarshalXDR()
|
||||
_, _, err = conn.WriteMsgUDP(tb, nil, addr)
|
||||
if err != nil {
|
||||
log.Println("QueryV2 response write:", err)
|
||||
@@ -235,7 +242,7 @@ func logStats(file string, intv int) {
|
||||
|
||||
var deleted = 0
|
||||
for id, node := range nodes {
|
||||
if time.Since(node.Updated) > 60*time.Minute {
|
||||
if time.Since(node.updated) > 60*time.Minute {
|
||||
delete(nodes, id)
|
||||
deleted++
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package discover
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package discover
|
||||
|
||||
import (
|
||||
@@ -10,11 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/beacon"
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
)
|
||||
|
||||
const (
|
||||
AnnouncementPort = 21025
|
||||
)
|
||||
|
||||
type Discoverer struct {
|
||||
@@ -42,8 +41,8 @@ var (
|
||||
// When we hit this many errors in succession, we stop.
|
||||
const maxErrors = 30
|
||||
|
||||
func NewDiscoverer(id string, addresses []string) (*Discoverer, error) {
|
||||
b, err := beacon.New(21025)
|
||||
func NewDiscoverer(id string, addresses []string, localPort int) (*Discoverer, error) {
|
||||
b, err := beacon.New(localPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -169,16 +168,21 @@ func (d *Discoverer) sendLocalAnnouncements() {
|
||||
}
|
||||
|
||||
func (d *Discoverer) sendExternalAnnouncements() {
|
||||
// this should go in the Discoverer struct
|
||||
errorRetryIntv := 60 * time.Second
|
||||
|
||||
remote, err := net.ResolveUDPAddr("udp", d.extServer)
|
||||
if err != nil {
|
||||
l.Warnf("Global discovery: %v; no external announcements", err)
|
||||
return
|
||||
for err != nil {
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
|
||||
time.Sleep(errorRetryIntv)
|
||||
remote, err = net.ResolveUDPAddr("udp", d.extServer)
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", nil)
|
||||
if err != nil {
|
||||
l.Warnf("Global discovery: %v; no external announcements", err)
|
||||
return
|
||||
for err != nil {
|
||||
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
|
||||
time.Sleep(errorRetryIntv)
|
||||
conn, err = net.ListenUDP("udp", nil)
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
@@ -191,25 +195,21 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
} else {
|
||||
buf = d.announcementPkt()
|
||||
}
|
||||
var errCounter = 0
|
||||
|
||||
for errCounter < maxErrors {
|
||||
for {
|
||||
var ok bool
|
||||
|
||||
if debug {
|
||||
l.Debugf("discover: send announcement -> %v\n%s", remote, hex.Dump(buf))
|
||||
}
|
||||
|
||||
_, err = conn.WriteTo(buf, remote)
|
||||
_, err := conn.WriteTo(buf, remote)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugln("discover: warning:", err)
|
||||
}
|
||||
errCounter++
|
||||
ok = false
|
||||
} else {
|
||||
errCounter = 0
|
||||
|
||||
// Verify that the announce server responds positively for our node ID
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -218,7 +218,6 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
l.Debugln("discover: external lookup check:", res)
|
||||
}
|
||||
ok = len(res) > 0
|
||||
|
||||
}
|
||||
|
||||
d.extAnnounceOKmut.Lock()
|
||||
@@ -228,10 +227,9 @@ func (d *Discoverer) sendExternalAnnouncements() {
|
||||
if ok {
|
||||
time.Sleep(d.globalBcastIntv)
|
||||
} else {
|
||||
time.Sleep(60 * time.Second)
|
||||
time.Sleep(errorRetryIntv)
|
||||
}
|
||||
}
|
||||
l.Warnf("Global discovery: %v: stopping due to too many errors: %v", remote, err)
|
||||
}
|
||||
|
||||
func (d *Discoverer) recvAnnouncements() {
|
||||
@@ -335,11 +333,8 @@ func (d *Discoverer) externalLookup(node string) []string {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
buffers.Put(buf)
|
||||
|
||||
buf = buffers.Get(2048)
|
||||
defer buffers.Put(buf)
|
||||
|
||||
buf = make([]byte, 2048)
|
||||
n, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package discover implements the node discovery protocol.
|
||||
package discover
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package discover
|
||||
|
||||
const (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package discover
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package files
|
||||
|
||||
import (
|
||||
|
||||
21
files/set.go
21
files/set.go
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package files provides a set type to track local/remote files with newness checks.
|
||||
package files
|
||||
|
||||
@@ -76,7 +80,7 @@ func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
|
||||
for _, ck := range m.remoteKey[cid.LocalID] {
|
||||
if _, ok := nf[ck.Name]; !ok {
|
||||
cf := m.files[ck].File
|
||||
if cf.Flags&protocol.FlagDeleted != protocol.FlagDeleted {
|
||||
if !protocol.IsDeleted(cf.Flags) {
|
||||
cf.Flags |= protocol.FlagDeleted
|
||||
cf.Blocks = nil
|
||||
cf.Size = 0
|
||||
@@ -116,7 +120,12 @@ func (m *Set) Need(id uint) []scanner.File {
|
||||
continue
|
||||
}
|
||||
|
||||
if gk.newerThan(rkID[gk.Name]) {
|
||||
if rk, ok := rkID[gk.Name]; gk.newerThan(rk) {
|
||||
if protocol.IsDeleted(gf.File.Flags) && (!ok || protocol.IsDeleted(m.files[rk].File.Flags)) {
|
||||
// We don't need to delete files we don't have or that are already deleted
|
||||
continue
|
||||
}
|
||||
|
||||
fs = append(fs, gf.File)
|
||||
}
|
||||
}
|
||||
@@ -193,7 +202,7 @@ func (m *Set) equals(id uint, fs []scanner.File) bool {
|
||||
curWithoutDeleted := make(map[string]key)
|
||||
for _, k := range m.remoteKey[id] {
|
||||
f := m.files[k].File
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
curWithoutDeleted[f.Name] = k
|
||||
}
|
||||
}
|
||||
@@ -210,6 +219,9 @@ func (m *Set) equals(id uint, fs []scanner.File) bool {
|
||||
|
||||
func (m *Set) update(cid uint, fs []scanner.File) {
|
||||
remFiles := m.remoteKey[cid]
|
||||
if remFiles == nil {
|
||||
l.Fatalln("update before replace for cid", cid)
|
||||
}
|
||||
for _, f := range fs {
|
||||
n := f.Name
|
||||
fk := keyFor(f)
|
||||
@@ -290,6 +302,9 @@ func (m *Set) replace(cid uint, fs []scanner.File) {
|
||||
|
||||
if na != 0 {
|
||||
// Someone had the file
|
||||
f := m.files[nk]
|
||||
f.Global = true
|
||||
m.files[nk] = f
|
||||
m.globalKey[n] = nk
|
||||
m.globalAvailability[n] = na
|
||||
} else {
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build anal
|
||||
|
||||
package files
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//+build !anal
|
||||
|
||||
package files
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
package files
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package files_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -7,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/syncthing/cid"
|
||||
"github.com/calmh/syncthing/files"
|
||||
"github.com/calmh/syncthing/lamport"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
@@ -27,7 +32,7 @@ func (l fileList) Swap(a, b int) {
|
||||
}
|
||||
|
||||
func TestGlobalSet(t *testing.T) {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -36,7 +41,15 @@ func TestGlobalSet(t *testing.T) {
|
||||
scanner.File{Name: "d", Version: 1000},
|
||||
}
|
||||
|
||||
remote := []scanner.File{
|
||||
remote0 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
}
|
||||
remote1 := []scanner.File{
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
remoteTot := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
@@ -51,25 +64,86 @@ func TestGlobalSet(t *testing.T) {
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
expectedLocalNeed := []scanner.File{
|
||||
scanner.File{Name: "b", Version: 1001},
|
||||
scanner.File{Name: "c", Version: 1002},
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
expectedRemoteNeed := []scanner.File{
|
||||
scanner.File{Name: "d", Version: 1000},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.Replace(1, remote)
|
||||
m.Replace(1, remote0)
|
||||
m.Update(1, remote1)
|
||||
|
||||
g := m.Global()
|
||||
|
||||
sort.Sort(fileList(g))
|
||||
sort.Sort(fileList(expectedGlobal))
|
||||
|
||||
if !reflect.DeepEqual(g, expectedGlobal) {
|
||||
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
|
||||
}
|
||||
|
||||
if lb := len(m.files); lb != 7 {
|
||||
t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files)
|
||||
h := m.Have(cid.LocalID)
|
||||
sort.Sort(fileList(h))
|
||||
|
||||
if !reflect.DeepEqual(h, local) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, local)
|
||||
}
|
||||
|
||||
h = m.Have(1)
|
||||
sort.Sort(fileList(h))
|
||||
|
||||
if !reflect.DeepEqual(h, remoteTot) {
|
||||
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
|
||||
}
|
||||
|
||||
n := m.Need(cid.LocalID)
|
||||
sort.Sort(fileList(n))
|
||||
|
||||
if !reflect.DeepEqual(n, expectedLocalNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
|
||||
}
|
||||
|
||||
n = m.Need(1)
|
||||
sort.Sort(fileList(n))
|
||||
|
||||
if !reflect.DeepEqual(n, expectedRemoteNeed) {
|
||||
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
|
||||
}
|
||||
|
||||
f := m.Get(cid.LocalID, "b")
|
||||
if !reflect.DeepEqual(f, local[1]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, local[1])
|
||||
}
|
||||
|
||||
f = m.Get(1, "b")
|
||||
if !reflect.DeepEqual(f, remote1[0]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
f = m.GetGlobal("b")
|
||||
if !reflect.DeepEqual(f, remote1[0]) {
|
||||
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
|
||||
}
|
||||
|
||||
a := int(m.Availability("a"))
|
||||
if av := 1<<0 + 1<<1; a != av {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
}
|
||||
a = int(m.Availability("b"))
|
||||
if av := 1 << 1; a != av {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
}
|
||||
a = int(m.Availability("d"))
|
||||
if av := 1 << 0; a != av {
|
||||
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalDeleted(t *testing.T) {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
local1 := []scanner.File{
|
||||
@@ -147,7 +221,7 @@ func Benchmark10kReplace(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
}
|
||||
}
|
||||
@@ -158,7 +232,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.Replace(1, remote)
|
||||
|
||||
var local []scanner.File
|
||||
@@ -185,7 +259,7 @@ func Benchmark10kUpdateSme(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.Replace(1, remote)
|
||||
|
||||
var local []scanner.File
|
||||
@@ -207,7 +281,7 @@ func Benchmark10kNeed2k(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
|
||||
var local []scanner.File
|
||||
@@ -235,7 +309,7 @@ func Benchmark10kHave(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
|
||||
var local []scanner.File
|
||||
@@ -263,7 +337,7 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
}
|
||||
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
m.Replace(cid.LocalID+1, remote)
|
||||
|
||||
var local []scanner.File
|
||||
@@ -286,7 +360,7 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestGlobalReset(t *testing.T) {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -302,28 +376,27 @@ func TestGlobalReset(t *testing.T) {
|
||||
scanner.File{Name: "e", Version: 1000},
|
||||
}
|
||||
|
||||
expectedGlobalKey := map[string]key{
|
||||
"a": keyFor(local[0]),
|
||||
"b": keyFor(local[1]),
|
||||
"c": keyFor(local[2]),
|
||||
"d": keyFor(local[3]),
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
g := m.Global()
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if !reflect.DeepEqual(g, local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(cid.LocalID, local)
|
||||
m.Replace(1, remote)
|
||||
m.Replace(1, nil)
|
||||
|
||||
if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey)
|
||||
}
|
||||
g = m.Global()
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
if lb := len(m.files); lb != 4 {
|
||||
t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files)
|
||||
if !reflect.DeepEqual(g, local) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeed(t *testing.T) {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
|
||||
local := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
@@ -359,7 +432,7 @@ func TestNeed(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChanges(t *testing.T) {
|
||||
m := NewSet()
|
||||
m := files.NewSet()
|
||||
|
||||
local1 := []scanner.File{
|
||||
scanner.File{Name: "a", Version: 1000},
|
||||
|
||||
34
files/sort.go
Normal file
34
files/sort.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package files
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
type SortBy func(p scanner.File) int
|
||||
|
||||
func (by SortBy) Sort(files []scanner.File) {
|
||||
ps := &fileSorter{
|
||||
files: files,
|
||||
by: by,
|
||||
}
|
||||
sort.Sort(ps)
|
||||
}
|
||||
|
||||
type fileSorter struct {
|
||||
files []scanner.File
|
||||
by func(p1 scanner.File) int
|
||||
}
|
||||
|
||||
func (s *fileSorter) Len() int {
|
||||
return len(s.files)
|
||||
}
|
||||
|
||||
func (s *fileSorter) Swap(i, j int) {
|
||||
s.files[i], s.files[j] = s.files[j], s.files[i]
|
||||
}
|
||||
|
||||
func (s *fileSorter) Less(i, j int) bool {
|
||||
return s.by(s.files[i]) > s.by(s.files[j])
|
||||
}
|
||||
BIN
files/testdata/index.db
vendored
Normal file
BIN
files/testdata/index.db
vendored
Normal file
Binary file not shown.
303
gui/app.js
303
gui/app.js
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
/*jslint browser: true, continue: true, plusplus: true */
|
||||
/*global $: false, angular: false */
|
||||
|
||||
@@ -6,6 +10,11 @@
|
||||
var syncthing = angular.module('syncthing', []);
|
||||
var urlbase = 'rest';
|
||||
|
||||
syncthing.config(function ($httpProvider) {
|
||||
$httpProvider.defaults.xsrfHeaderName = 'X-CSRF-Token';
|
||||
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token';
|
||||
});
|
||||
|
||||
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
var prevDate = 0;
|
||||
var getOK = true;
|
||||
@@ -16,24 +25,42 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$scope.myID = '';
|
||||
$scope.nodes = [];
|
||||
$scope.configInSync = true;
|
||||
$scope.protocolChanged = false;
|
||||
$scope.errors = [];
|
||||
$scope.seenError = '';
|
||||
$scope.model = {};
|
||||
$scope.repos = {};
|
||||
$scope.reportData = {};
|
||||
$scope.reportPreview = false;
|
||||
|
||||
$scope.needActions = {
|
||||
'rm': 'Del',
|
||||
'rmdir': 'Del (dir)',
|
||||
'sync': 'Sync',
|
||||
'touch': 'Update',
|
||||
}
|
||||
$scope.needIcons = {
|
||||
'rm': 'remove',
|
||||
'rmdir': 'remove',
|
||||
'sync': 'download',
|
||||
'touch': 'asterisk',
|
||||
}
|
||||
|
||||
// Strings before bools look better
|
||||
$scope.settings = [
|
||||
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text', restart: true},
|
||||
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KiB/s)', type: 'number', restart: true},
|
||||
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number', restart: true},
|
||||
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number', restart: true},
|
||||
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number', restart: true},
|
||||
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KiB/s)', type: 'number', restart: true},
|
||||
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text'},
|
||||
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KiB/s)', type: 'number'},
|
||||
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number'},
|
||||
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number'},
|
||||
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number'},
|
||||
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KiB/s)', type: 'number'},
|
||||
|
||||
{id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
|
||||
{id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
|
||||
{id: 'LocalAnnPort', descr: 'Local Discovery Port', type: 'number'},
|
||||
{id: 'LocalAnnEnabled', descr: 'Local Discovery', type: 'bool'},
|
||||
{id: 'GlobalAnnEnabled', descr: 'Global Discovery', type: 'bool'},
|
||||
{id: 'StartBrowser', descr: 'Start Browser', type: 'bool'},
|
||||
{id: 'UPnPEnabled', descr: 'Enable UPnP', type: 'bool'},
|
||||
{id: 'UREnabled', descr: 'Anonymous Usage Reporting', type: 'bool'},
|
||||
];
|
||||
|
||||
$scope.guiSettings = [
|
||||
@@ -41,6 +68,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
{id: 'User', descr: 'GUI Authentication User', type: 'text', restart: true},
|
||||
{id: 'Password', descr: 'GUI Authentication Password', type: 'password', restart: true},
|
||||
{id: 'UseTLS', descr: 'Use HTTPS for GUI', type: 'bool', restart: true},
|
||||
{id: 'APIKey', descr: 'API Key', type: 'apikey'},
|
||||
];
|
||||
|
||||
function getSucceeded() {
|
||||
@@ -52,6 +80,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
if (restarting) {
|
||||
$scope.init();
|
||||
$('#restarting').modal('hide');
|
||||
$('#shutdown').modal('hide');
|
||||
restarting = false;
|
||||
}
|
||||
}
|
||||
@@ -74,9 +103,20 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
getFailed();
|
||||
});
|
||||
Object.keys($scope.repos).forEach(function (id) {
|
||||
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
|
||||
$scope.model[id] = data;
|
||||
});
|
||||
if (typeof $scope.model[id] === 'undefined') {
|
||||
// Never fetched before
|
||||
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
|
||||
$scope.model[id] = data;
|
||||
});
|
||||
} else {
|
||||
$http.get(urlbase + '/model/version?repo=' + encodeURIComponent(id)).success(function (data) {
|
||||
if (data.version > $scope.model[id].version) {
|
||||
$http.get(urlbase + '/model?repo=' + encodeURIComponent(id)).success(function (data) {
|
||||
$scope.model[id] = data;
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
$http.get(urlbase + '/connections').success(function (data) {
|
||||
var now = Date.now(),
|
||||
@@ -84,9 +124,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
id;
|
||||
|
||||
prevDate = now;
|
||||
$scope.inbps = 0;
|
||||
$scope.outbps = 0;
|
||||
|
||||
for (id in data) {
|
||||
if (!data.hasOwnProperty(id)) {
|
||||
continue;
|
||||
@@ -98,8 +135,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
data[id].inbps = 0;
|
||||
data[id].outbps = 0;
|
||||
}
|
||||
$scope.inbps += data[id].inbps;
|
||||
$scope.outbps += data[id].outbps;
|
||||
}
|
||||
$scope.connections = data;
|
||||
});
|
||||
@@ -125,7 +160,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
$scope.repoClass = function (repo) {
|
||||
if (typeof $scope.model[repo] === 'undefined') {
|
||||
@@ -144,7 +179,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
return 'primary';
|
||||
}
|
||||
return 'info';
|
||||
}
|
||||
};
|
||||
|
||||
$scope.syncPercentage = function (repo) {
|
||||
if (typeof $scope.model[repo] === 'undefined') {
|
||||
@@ -162,7 +197,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
if (conn.Completion === 100) {
|
||||
return 'In Sync';
|
||||
return 'Up to Date';
|
||||
} else {
|
||||
return 'Syncing (' + conn.Completion + '%)';
|
||||
}
|
||||
@@ -254,13 +289,48 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
};
|
||||
|
||||
$scope.editSettings = function () {
|
||||
// Make a working copy
|
||||
$scope.tmpOptions = angular.copy($scope.config.Options);
|
||||
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
|
||||
$scope.tmpGUI = angular.copy($scope.config.GUI);
|
||||
$('#settings').modal({backdrop: 'static', keyboard: true});
|
||||
}
|
||||
};
|
||||
|
||||
$scope.saveConfig = function() {
|
||||
var cfg = JSON.stringify($scope.config);
|
||||
var opts = {headers: {'Content-Type': 'application/json'}};
|
||||
$http.post(urlbase + '/config', cfg, opts).success(function () {
|
||||
$http.get(urlbase + '/config/sync').success(function (data) {
|
||||
$scope.configInSync = data.configInSync;
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
$scope.saveSettings = function () {
|
||||
$scope.configInSync = false;
|
||||
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
// Make sure something changed
|
||||
var changed = !angular.equals($scope.config.Options, $scope.tmpOptions) ||
|
||||
!angular.equals($scope.config.GUI, $scope.tmpGUI);
|
||||
if (changed) {
|
||||
// Check if usage reporting has been enabled or disabled
|
||||
if ($scope.tmpOptions.UREnabled && $scope.tmpOptions.URAccepted <= 0) {
|
||||
$scope.tmpOptions.URAccepted = 1000;
|
||||
} else if (!$scope.tmpOptions.UREnabled && $scope.tmpOptions.URAccepted > 0){
|
||||
$scope.tmpOptions.URAccepted = -1;
|
||||
}
|
||||
|
||||
// Check if protocol will need to be changed on restart
|
||||
if($scope.config.GUI.UseTLS !== $scope.tmpGUI.UseTLS){
|
||||
$scope.protocolChanged = true;
|
||||
}
|
||||
|
||||
// Apply new settings locally
|
||||
$scope.config.Options = angular.copy($scope.tmpOptions);
|
||||
$scope.config.GUI = angular.copy($scope.tmpGUI);
|
||||
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
|
||||
|
||||
$scope.saveConfig();
|
||||
}
|
||||
|
||||
$('#settings').modal("hide");
|
||||
};
|
||||
|
||||
@@ -269,6 +339,21 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$('#restarting').modal({backdrop: 'static', keyboard: false});
|
||||
$http.post(urlbase + '/restart');
|
||||
$scope.configInSync = true;
|
||||
|
||||
// Switch webpage protocol if needed
|
||||
if($scope.protocolChanged){
|
||||
var protocol = 'http';
|
||||
|
||||
if($scope.config.GUI.UseTLS){
|
||||
protocol = 'https';
|
||||
}
|
||||
|
||||
setTimeout(function(){
|
||||
window.location.protocol = protocol;
|
||||
}, 1000);
|
||||
|
||||
$scope.protocolChanged = false;
|
||||
}
|
||||
};
|
||||
|
||||
$scope.shutdown = function () {
|
||||
@@ -317,17 +402,15 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
});
|
||||
}
|
||||
|
||||
$scope.configInSync = false;
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.saveNode = function () {
|
||||
var nodeCfg, done, i;
|
||||
|
||||
$scope.configInSync = false;
|
||||
$('#editNode').modal('hide');
|
||||
nodeCfg = $scope.currentNode;
|
||||
nodeCfg.NodeID = nodeCfg.NodeID.replace(/ /g, '').replace(/-/g, '').trim();
|
||||
nodeCfg.NodeID = nodeCfg.NodeID.replace(/ /g, '').replace(/-/g, '').toUpperCase().trim();
|
||||
nodeCfg.Addresses = nodeCfg.AddressesStr.split(',').map(function (x) { return x.trim(); });
|
||||
|
||||
done = false;
|
||||
@@ -346,7 +429,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$scope.nodes.sort(nodeCompare);
|
||||
$scope.config.Nodes = $scope.nodes;
|
||||
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.otherNodes = function () {
|
||||
@@ -393,13 +476,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
|
||||
$scope.repoList = function () {
|
||||
return repoList($scope.repos);
|
||||
}
|
||||
};
|
||||
|
||||
$scope.editRepo = function (nodeCfg) {
|
||||
$scope.currentRepo = $.extend({selectedNodes: {}}, nodeCfg);
|
||||
$scope.currentRepo = angular.copy(nodeCfg);
|
||||
$scope.currentRepo.selectedNodes = {};
|
||||
$scope.currentRepo.Nodes.forEach(function (n) {
|
||||
$scope.currentRepo.selectedNodes[n.NodeID] = true;
|
||||
});
|
||||
if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "simple") {
|
||||
$scope.currentRepo.simpleFileVersioning = true;
|
||||
$scope.currentRepo.simpleKeep = +$scope.currentRepo.Versioning.Params.keep;
|
||||
}
|
||||
$scope.currentRepo.simpleKeep = $scope.currentRepo.simpleKeep || 5;
|
||||
$scope.editingExisting = true;
|
||||
$scope.repoEditor.$setPristine();
|
||||
$('#editRepo').modal({backdrop: 'static', keyboard: true});
|
||||
@@ -415,7 +504,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$scope.saveRepo = function () {
|
||||
var repoCfg, done, i;
|
||||
|
||||
$scope.configInSync = false;
|
||||
$('#editRepo').modal('hide');
|
||||
repoCfg = $scope.currentRepo;
|
||||
repoCfg.Nodes = [];
|
||||
@@ -427,10 +515,32 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
}
|
||||
delete repoCfg.selectedNodes;
|
||||
|
||||
if (repoCfg.simpleFileVersioning) {
|
||||
repoCfg.Versioning = {
|
||||
'Type': 'simple',
|
||||
'Params': {
|
||||
'keep': '' + repoCfg.simpleKeep,
|
||||
}
|
||||
};
|
||||
delete repoCfg.simpleFileVersioning;
|
||||
delete repoCfg.simpleKeep;
|
||||
} else {
|
||||
delete repoCfg.Versioning;
|
||||
}
|
||||
|
||||
$scope.repos[repoCfg.ID] = repoCfg;
|
||||
$scope.config.Repositories = repoList($scope.repos);
|
||||
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.sharesRepo = function(repoCfg) {
|
||||
var names = [];
|
||||
repoCfg.Nodes.forEach(function (node) {
|
||||
names.push($scope.nodeName($scope.findNode(node.NodeID)));
|
||||
});
|
||||
names.sort();
|
||||
return names.join(", ");
|
||||
};
|
||||
|
||||
$scope.deleteRepo = function () {
|
||||
@@ -442,8 +552,11 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
delete $scope.repos[$scope.currentRepo.ID];
|
||||
$scope.config.Repositories = repoList($scope.repos);
|
||||
|
||||
$scope.configInSync = false;
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
$scope.saveConfig();
|
||||
};
|
||||
|
||||
$scope.setAPIKey = function (cfg) {
|
||||
cfg.APIKey = randomString(30, 32);
|
||||
};
|
||||
|
||||
$scope.init = function() {
|
||||
@@ -466,11 +579,75 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$scope.repos = repoMap($scope.config.Repositories);
|
||||
|
||||
$scope.refresh();
|
||||
|
||||
if ($scope.config.Options.URAccepted == 0) {
|
||||
// If usage reporting has been neither accepted nor declined,
|
||||
// we want to ask the user to make a choice. But we don't want
|
||||
// to bug them during initial setup, so we set a cookie with
|
||||
// the time of the first visit. When that cookie is present
|
||||
// and the time is more than four hours ago, we ask the
|
||||
// question.
|
||||
|
||||
var firstVisit = document.cookie.replace(/(?:(?:^|.*;\s*)firstVisit\s*\=\s*([^;]*).*$)|^.*$/, "$1");
|
||||
if (!firstVisit) {
|
||||
document.cookie = "firstVisit=" + Date.now() + ";max-age=" + 30*24*3600;
|
||||
} else {
|
||||
if (+firstVisit < Date.now() - 4*3600*1000){
|
||||
$('#ur').modal({backdrop: 'static', keyboard: false});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
$http.get(urlbase + '/config/sync').success(function (data) {
|
||||
$scope.configInSync = data.configInSync;
|
||||
});
|
||||
|
||||
$http.get(urlbase + '/report').success(function (data) {
|
||||
$scope.reportData = data;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.acceptUR = function () {
|
||||
$scope.config.Options.URAccepted = 1000; // Larger than the largest existing report version
|
||||
$scope.saveConfig();
|
||||
$('#ur').modal('hide');
|
||||
};
|
||||
|
||||
$scope.declineUR = function () {
|
||||
$scope.config.Options.URAccepted = -1;
|
||||
$scope.saveConfig();
|
||||
$('#ur').modal('hide');
|
||||
};
|
||||
|
||||
$scope.showNeed = function (repo) {
|
||||
$scope.neededLoaded = false;
|
||||
$('#needed').modal({backdrop: 'static', keyboard: true});
|
||||
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
|
||||
$scope.needed = data;
|
||||
$scope.neededLoaded = true;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.needAction = function (file) {
|
||||
var fDelete = 4096;
|
||||
var fDirectory = 16384;
|
||||
|
||||
if ((file.Flags & (fDelete+fDirectory)) === fDelete+fDirectory) {
|
||||
return 'rmdir';
|
||||
} else if ((file.Flags & fDelete) === fDelete) {
|
||||
return 'rm';
|
||||
} else if ((file.Flags & fDirectory) === fDirectory) {
|
||||
return 'touch';
|
||||
} else {
|
||||
return 'sync';
|
||||
}
|
||||
};
|
||||
|
||||
$scope.override = function (repo) {
|
||||
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo)).success(function () {
|
||||
$scope.refresh();
|
||||
});
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
@@ -507,7 +684,7 @@ function repoMap(l) {
|
||||
function repoList(m) {
|
||||
var l = [];
|
||||
for (var id in m) {
|
||||
l.push(m[id])
|
||||
l.push(m[id]);
|
||||
}
|
||||
l.sort(repoCompare);
|
||||
return l;
|
||||
@@ -525,6 +702,18 @@ function decimals(val, num) {
|
||||
return decs;
|
||||
}
|
||||
|
||||
function randomString(len, bits)
|
||||
{
|
||||
bits = bits || 36;
|
||||
var outStr = "", newStr;
|
||||
while (outStr.length < len)
|
||||
{
|
||||
newStr = Math.random().toString(bits).slice(2);
|
||||
outStr += newStr.slice(0, Math.min(newStr.length, (len - outStr.length)));
|
||||
}
|
||||
return outStr.toUpperCase();
|
||||
}
|
||||
|
||||
syncthing.filter('natural', function () {
|
||||
return function (input, valid) {
|
||||
return input.toFixed(decimals(input, valid));
|
||||
@@ -596,7 +785,7 @@ syncthing.filter('chunkID', function () {
|
||||
if (!parts)
|
||||
return "";
|
||||
return parts.join('-');
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('shortPath', function () {
|
||||
@@ -608,7 +797,25 @@ syncthing.filter('shortPath', function () {
|
||||
return input;
|
||||
}
|
||||
return ".../" + parts.slice(parts.length-2).join("/");
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('basename', function () {
|
||||
return function (input) {
|
||||
if (input === undefined)
|
||||
return "";
|
||||
var parts = input.split(/[\/\\]/);
|
||||
if (!parts || parts.length < 1) {
|
||||
return input;
|
||||
}
|
||||
return parts[parts.length-1];
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('clean', function () {
|
||||
return function (input) {
|
||||
return encodeURIComponent(input).replace(/%/g, '');
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.directive('optionEditor', function () {
|
||||
@@ -643,3 +850,25 @@ syncthing.directive('uniqueRepo', function() {
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.directive('validNodeid', function() {
|
||||
return {
|
||||
require: 'ngModel',
|
||||
link: function(scope, elm, attrs, ctrl) {
|
||||
ctrl.$parsers.unshift(function(viewValue) {
|
||||
if (scope.editingExisting) {
|
||||
// we shouldn't validate
|
||||
ctrl.$setValidity('validNodeid', true);
|
||||
} else {
|
||||
var cleaned = viewValue.replace(/ /g, '').replace(/-/g, '').toUpperCase().trim();
|
||||
if (cleaned.match(/^[A-Z2-7]{52}$/)) {
|
||||
ctrl.$setValidity('validNodeid', true);
|
||||
} else {
|
||||
ctrl.$setValidity('validNodeid', false);
|
||||
}
|
||||
}
|
||||
return viewValue;
|
||||
});
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
501
gui/index.html
501
gui/index.html
@@ -1,4 +1,9 @@
|
||||
<!DOCTYPE html>
|
||||
<!--
|
||||
Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
Use of this source code is governed by an MIT-style license that can be
|
||||
found in the LICENSE file.
|
||||
-->
|
||||
<html lang="en" ng-app="syncthing" ng-controller="SyncthingCtrl" class="ng-cloak">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
@@ -60,12 +65,26 @@
|
||||
}
|
||||
|
||||
.table th {
|
||||
white-space: nowrap;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.table td {
|
||||
padding-left: 20px !important;
|
||||
}
|
||||
|
||||
.table td.small-data {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
@media (max-width:767px) {
|
||||
.table-responsive>.table>tbody>tr>td {
|
||||
/* revert a bootstrap setting e.g.:
|
||||
* for mobile phones to allow linebreaks in long repro folder/shared with
|
||||
* columns. */
|
||||
white-space: normal;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
@@ -77,20 +96,20 @@
|
||||
<div class="container">
|
||||
<span class="navbar-brand"><img class="logo" src="st-logo-128.png" width="32" height="32" /> Syncthing<small class="hidden-xs"> <span class="text-muted">|</span> {{thisNodeName()}}</small></span>
|
||||
<ul class="nav navbar-nav navbar-right">
|
||||
<li class="dropdown">
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Edit <b class="caret"></b></a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="" ng-click="addRepo()"><span class="glyphicon glyphicon-hdd"></span> Add Repository</a></li>
|
||||
<li><a href="" ng-click="addNode()"><span class="glyphicon glyphicon-retweet"></span> Add Node</a></li>
|
||||
<li class="divider"></li>
|
||||
<li><a href="" ng-click="editSettings()"><span class="glyphicon glyphicon-cog"></span> Settings</a></li>
|
||||
<li><a href="" ng-click="idNode()"><span class="glyphicon glyphicon-qrcode"></span> Show ID</a></span>
|
||||
<li><a href="" ng-click="idNode()"><span class="glyphicon glyphicon-qrcode"></span> Show ID</a></li>
|
||||
<li class="divider"></li>
|
||||
<li><a href="" ng-click="shutdown()"><span class="glyphicon glyphicon-off"></span> Shutdown</a></li>
|
||||
<li><a href="" ng-click="restart()"><span class="glyphicon glyphicon-refresh"></span> Restart</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
@@ -120,164 +139,177 @@
|
||||
<!-- Repository list (top left) -->
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="panel-group" id="repositories">
|
||||
<div class="panel panel-{{repoClass(repo.ID)}}" ng-repeat="repo in repoList()">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#repositories" href="#repo-{{repo.ID}}">
|
||||
<span class="glyphicon glyphicon-hdd"></span> {{repo.Directory | shortPath}}
|
||||
<span class="pull-right">{{repoStatus(repo.ID)}}</span>
|
||||
</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="repo-{{repo.ID}}" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Repository ID</th>
|
||||
<td class="text-right">{{repo.ID}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-folder-open"></span> Folder</th>
|
||||
<td class="text-right">{{repo.Directory}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-comment"></span> Synchronization</th>
|
||||
<td class="text-right">{{repoStatus(repo.ID)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-globe"></span> Global Repository</th>
|
||||
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} files, {{model[repo.ID].globalBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-home"></span> Local Repository</th>
|
||||
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} files, {{model[repo.ID].localBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Out of Sync</th>
|
||||
<td class="text-right">{{model[repo.ID].needFiles | alwaysNumber}} files, {{model[repo.ID].needBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-lock"></span> Master Repository</th>
|
||||
<td class="text-right">
|
||||
<span ng-if="repo.ReadOnly">Yes</span>
|
||||
<span ng-if="!repo.ReadOnly">No</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-share-alt"></span> Shared With</th>
|
||||
<td class="text-right">
|
||||
<span ng-repeat="n in repo.Nodes">
|
||||
{{nodeName(findNode(n.NodeID))}}<span ng-if="!$last">, </span>
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="panel-group" id="repositories">
|
||||
<div class="panel panel-{{repoClass(repo.ID)}}" ng-repeat="repo in repoList()">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#repositories" href="#repo-{{$index}}">
|
||||
<span class="glyphicon glyphicon-hdd"></span> {{repo.Directory | shortPath}}
|
||||
<span class="pull-right hidden-xs">{{repoStatus(repo.ID)}}</span>
|
||||
</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="repo-{{$index}}" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Repository ID</th>
|
||||
<td class="text-right">{{repo.ID}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-folder-open"></span> Folder</th>
|
||||
<td class="text-right">{{repo.Directory}}</td>
|
||||
</tr>
|
||||
<tr ng-if="model[repo.ID].invalid">
|
||||
<th><span class="glyphicon glyphicon-warning-sign"></span> Error</th>
|
||||
<td class="text-right">{{model[repo.ID].invalid}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-comment"></span> Synchronization</th>
|
||||
<td class="text-right">{{repoStatus(repo.ID)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-globe"></span> Global Repository</th>
|
||||
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} items, {{model[repo.ID].globalBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-home"></span> Local Repository</th>
|
||||
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} items, {{model[repo.ID].localBytes | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Out of Sync</th>
|
||||
<td class="text-right">
|
||||
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} items, {{model[repo.ID].needBytes | binary}}B</a>
|
||||
<span ng-if="model[repo.ID].needFiles == 0">0 items, 0 B</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-lock"></span> Master Repository</th>
|
||||
<td class="text-right">
|
||||
<span ng-if="repo.ReadOnly">Yes</span>
|
||||
<span ng-if="!repo.ReadOnly">No</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-unchecked"></span> Ignore Permissions</th>
|
||||
<td class="text-right">
|
||||
<span ng-if="repo.IgnorePerms">Yes</span>
|
||||
<span ng-if="!repo.IgnorePerms">No</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-share-alt"></span> Shared With</th>
|
||||
<td class="text-right">{{sharesRepo(repo)}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<span class="pull-right">
|
||||
<a class="btn btn-sm btn-primary" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span> Edit</a>
|
||||
<a class="btn btn-sm btn-danger" ng-if="repo.ReadOnly && model[repo.ID].needFiles > 0" ng-click="override(repo.ID)" href=""><span class="glyphicon glyphicon-upload"></span> Override Changes</a>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editRepo(repo)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Node list (top right) -->
|
||||
|
||||
<div class="col-md-6">
|
||||
<div class="panel-group" id="nodes">
|
||||
<div class="panel panel-default" ng-repeat="nodeCfg in [thisNode()]">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#nodes" href="#node-{{nodeCfg.NodeID}}"><span class="glyphicon glyphicon-home"></span> {{nodeName(nodeCfg)}}</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="node-{{nodeCfg.NodeID}}" class="panel-collapse collapse in">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-th"></span> RAM Utilization</th>
|
||||
<td class="text-right">{{system.sys | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tasks"></span> CPU Utilization</th>
|
||||
<td class="text-right">{{system.cpuPercent | alwaysNumber | natural:1}}%</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Download Rate</th>
|
||||
<td class="text-right">{{inbps | metric}}bps</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> Upload Rate</th>
|
||||
<td class="text-right">{{outbps | metric}}bps </td>
|
||||
</tr>
|
||||
<tr ng-if="system.extAnnounceOK != undefined">
|
||||
<th><span class="glyphicon glyphicon-bullhorn"></span> Announce Server</th>
|
||||
<td class="text-right">
|
||||
<span class="data text-success" ng-if="system.extAnnounceOK">Online</span>
|
||||
<span class="data text-danger" ng-if="!system.extAnnounceOK">Offline</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Version</th>
|
||||
<td class="text-right">{{version}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="panel-group" id="nodes">
|
||||
<div class="panel panel-default" ng-repeat="nodeCfg in [thisNode()]">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#nodes" href="#node-this"><span class="glyphicon glyphicon-home"></span> {{nodeName(nodeCfg)}}</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="node-this" class="panel-collapse collapse in">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-th"></span> RAM Utilization</th>
|
||||
<td class="text-right">{{system.sys | binary}}B</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tasks"></span> CPU Utilization</th>
|
||||
<td class="text-right">{{system.cpuPercent | alwaysNumber | natural:1}}%</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Download Rate</th>
|
||||
<td class="text-right">{{connections['total'].inbps | metric}}bps ({{connections['total'].InBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> Upload Rate</th>
|
||||
<td class="text-right">{{connections['total'].outbps | metric}}bps ({{connections['total'].OutBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr ng-if="system.extAnnounceOK != undefined">
|
||||
<th><span class="glyphicon glyphicon-bullhorn"></span> Announce Server</th>
|
||||
<td class="text-right">
|
||||
<span class="data text-success" ng-if="system.extAnnounceOK">Online</span>
|
||||
<span class="data text-danger" ng-if="!system.extAnnounceOK">Offline</span>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Version</th>
|
||||
<td class="text-right">{{version}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
|
||||
</div>
|
||||
</div>
|
||||
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="panel panel-{{nodeClass(nodeCfg)}}" ng-repeat="nodeCfg in otherNodes()">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#nodes" href="#node-{{nodeCfg.NodeID}}">
|
||||
<span class="glyphicon glyphicon-retweet"></span>
|
||||
{{nodeName(nodeCfg)}}
|
||||
<span class="pull-right">{{nodeStatus(nodeCfg)}}</span>
|
||||
</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="node-{{nodeCfg.NodeID}}" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-link"></span> Address</th>
|
||||
<td class="text-right">{{nodeAddr(nodeCfg)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-comment"></span> Synchronization</th>
|
||||
<td class="text-right">{{nodeStatus(nodeCfg)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Download Rate</th>
|
||||
<td class="text-right">{{connections[nodeCfg.NodeID].inbps | metric}}bps</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> Upload Rate</th>
|
||||
<td class="text-right">{{connections[nodeCfg.NodeID].outbps | metric}}bps </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Version</th>
|
||||
<td class="text-right">{{nodeVer(nodeCfg)}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="panel panel-{{nodeClass(nodeCfg)}}" ng-repeat="nodeCfg in otherNodes()">
|
||||
<div class="panel-heading">
|
||||
<h3 class="panel-title">
|
||||
<a data-toggle="collapse" data-parent="#nodes" href="#node-{{$index}}">
|
||||
<span class="glyphicon glyphicon-retweet"></span>
|
||||
{{nodeName(nodeCfg)}}
|
||||
<span class="pull-right hidden-xs">{{nodeStatus(nodeCfg)}}</span>
|
||||
</a>
|
||||
</h3>
|
||||
</div>
|
||||
<div id="node-{{$index}}" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-condensed table-striped">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-link"></span> Address</th>
|
||||
<td class="text-right">{{nodeAddr(nodeCfg)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-comment"></span> Synchronization</th>
|
||||
<td class="text-right">{{nodeStatus(nodeCfg)}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-download"></span> Download Rate</th>
|
||||
<td class="text-right">{{connections[nodeCfg.NodeID].inbps | metric}}bps ({{connections[nodeCfg.NodeID].InBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-cloud-upload"></span> Upload Rate</th>
|
||||
<td class="text-right">{{connections[nodeCfg.NodeID].outbps | metric}}bps ({{connections[nodeCfg.NodeID].OutBytesTotal | binary}}B)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th><span class="glyphicon glyphicon-tag"></span> Version</th>
|
||||
<td class="text-right">{{nodeVer(nodeCfg)}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
|
||||
</div>
|
||||
</div>
|
||||
<span class="pull-right"><a class="btn btn-sm btn-primary" href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div> <!-- /row -->
|
||||
|
||||
<!-- Errors -->
|
||||
@@ -386,14 +418,12 @@
|
||||
</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<div class="well well-sm text-monospace text-center">
|
||||
{{myID | chunkID}}
|
||||
</div>
|
||||
<img ng-if="myID" class="center-block img-thumbnail" src="qr/{{myID | chunkID}}"/>
|
||||
<div class="well well-sm text-monospace text-center">{{myID | chunkID}}</div>
|
||||
<img ng-if="myID" class="center-block img-thumbnail" src="qr/{{myID | chunkID}}"/>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> Close</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -411,17 +441,18 @@
|
||||
<form role="form" name="nodeEditor">
|
||||
<div class="form-group" ng-class="{'has-error': nodeEditor.nodeID.$invalid && nodeEditor.nodeID.$dirty}">
|
||||
<label for="nodeID">Node ID</label>
|
||||
<input ng-if="!editingExisting" name="nodeID" id="nodeID" class="form-control text-monospace" type="text" ng-model="currentNode.NodeID" required></input>
|
||||
<input ng-if="!editingExisting" name="nodeID" id="nodeID" class="form-control text-monospace" type="text" ng-model="currentNode.NodeID" required valid-nodeid></input>
|
||||
<div ng-if="editingExisting" class="well well-sm text-monospace">{{currentNode.NodeID | chunkID}}</div>
|
||||
<p class="help-block">
|
||||
<span ng-if="nodeEditor.nodeID.$valid || nodeEditor.nodeID.$pristine">The node ID to enter here can be found in the "Edit > Show ID" dialog on the other node. Spaces and dashes are optional (ignored).
|
||||
<span ng-show="!editingExisting">When adding a new node, keep in mind that <em>this node</em> must be added on the other side too.</span>
|
||||
<span ng-show="!editingExisting">When adding a new node, keep in mind that <em>this node</em> must be added on the other side too.</span>
|
||||
</span>
|
||||
<span ng-if="nodeEditor.nodeID.$error.required && nodeEditor.nodeID.$dirty">The node ID cannot be blank.</span>
|
||||
<span ng-if="nodeEditor.nodeID.$error.validNodeid && nodeEditor.nodeID.$dirty">The entered node ID does not look valid. It should be a 52 character string consisting of letters and numbers, with spaces and dashes being optional.</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="name">Name</label>
|
||||
<label for="name">Node Name</label>
|
||||
<input placeholder="Home Server" id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
|
||||
<p class="help-block">Shown instead of Node ID in the cluster status.</p>
|
||||
</div>
|
||||
@@ -452,39 +483,76 @@
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form role="form" name="repoEditor">
|
||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoID.$invalid && repoEditor.repoID.$dirty}">
|
||||
<label for="repoID">Repository ID</label>
|
||||
<input name="repoID" placeholder="documents" ng-disabled="editingExisting" id="repoID" class="form-control" type="text" ng-model="currentRepo.ID" required unique-repo></input>
|
||||
<p class="help-block">
|
||||
<span ng-if="repoEditor.repoID.$valid || repoEditor.repoID.$pristine">Short identifier for the repository. Must be the same on all cluster nodes.</span>
|
||||
<span ng-if="repoEditor.repoID.$error.uniqueRepo">The repository ID must be unique.</span>
|
||||
<span ng-if="repoEditor.repoID.$error.required && repoEditor.repoID.$dirty">The repository ID cannot be blank.</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoPath.$invalid && repoEditor.repoPath.$dirty}">
|
||||
<label for="repoPath">Repository Path</label>
|
||||
<input name="repoPath" placeholder="~/Documents" id="repoPath" class="form-control" type="text" ng-model="currentRepo.Directory" required></input>
|
||||
<p class="help-block">
|
||||
<span ng-if="repoEditor.repoPath.$valid || repoEditor.repoPath.$pristine">Path to the repository on the local computer. Will be created if it does not exist. The tilde character <code>~</code> can be used as a shortcut for <code>{{system.tilde}}</code>.</span>
|
||||
<span ng-if="repoEditor.repoPath.$error.required && repoEditor.repoPath.$dirty">The repository path cannot be blank.</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.ReadOnly"> Repository Master
|
||||
</label>
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoID.$invalid && repoEditor.repoID.$dirty}">
|
||||
<label for="repoID">Repository ID</label>
|
||||
<input name="repoID" placeholder="documents" ng-disabled="editingExisting" id="repoID" class="form-control" type="text" ng-model="currentRepo.ID" required unique-repo ng-pattern="/^[a-zA-Z0-9-_.]{1,64}$/"></input>
|
||||
<p class="help-block">
|
||||
<span ng-if="repoEditor.repoID.$valid || repoEditor.repoID.$pristine">Short identifier for the repository. Must be the same on all cluster nodes.</span>
|
||||
<span ng-if="repoEditor.repoID.$error.uniqueRepo">The repository ID must be unique.</span>
|
||||
<span ng-if="repoEditor.repoID.$error.required && repoEditor.repoID.$dirty">The repository ID cannot be blank.</span>
|
||||
<span ng-if="repoEditor.repoID.$error.pattern && repoEditor.repoID.$dirty">The repository ID must be a short identifier (64 characters or less) consisting of letters, numbers and the the <code>-_.</code> characters only.</span>
|
||||
</p>
|
||||
</div>
|
||||
<div class="form-group" ng-class="{'has-error': repoEditor.repoPath.$invalid && repoEditor.repoPath.$dirty}">
|
||||
<label for="repoPath">Repository Path</label>
|
||||
<input name="repoPath" placeholder="~/Documents" id="repoPath" class="form-control" type="text" ng-model="currentRepo.Directory" required></input>
|
||||
<p class="help-block">
|
||||
<span ng-if="repoEditor.repoPath.$valid || repoEditor.repoPath.$pristine">Path to the repository on the local computer. Will be created if it does not exist. The tilde character <code>~</code> can be used as a shortcut for <code>{{system.tilde}}</code>.</span>
|
||||
<span ng-if="repoEditor.repoPath.$error.required && repoEditor.repoPath.$dirty">The repository path cannot be blank.</span>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="nodes">Nodes</label>
|
||||
<div class="checkbox" ng-repeat="node in otherNodes()">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.selectedNodes[node.NodeID]"> {{nodeName(node)}}
|
||||
</label>
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.ReadOnly"> Repository Master
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.IgnorePerms"> Ignore Permissions
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">File permission bits are ignored when looking for changes. Use on FAT filesystems.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="nodes">Share With Nodes</label>
|
||||
<div class="checkbox" ng-repeat="node in otherNodes()">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.selectedNodes[node.NodeID]"> {{nodeName(node)}}
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">Select the nodes to share this repository with.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.simpleFileVersioning"> File Versioning
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">Files are moved to date stamped versions in a <code>.stversions</code> folder when replaced or deleted by syncthing.</p>
|
||||
</div>
|
||||
<div class="form-group" ng-if="currentRepo.simpleFileVersioning" ng-class="{'has-error': repoEditor.simpleKeep.$invalid && repoEditor.simpleKeep.$dirty}">
|
||||
<label for="simpleKeep">Keep Versions</label>
|
||||
<input name="simpleKeep" id="simpleKeep" class="form-control" type="number" ng-model="currentRepo.simpleKeep" required min="1"></input>
|
||||
<p class="help-block">
|
||||
<span ng-if="repoEditor.simpleKeep.$valid || repoEditor.simpleKeep.$pristine">The number of old versions to keep, per file.</span>
|
||||
<span ng-if="repoEditor.simpleKeep.$error.required && repoEditor.simpleKeep.$dirty">The number of versions must be a number and cannot be blank.</span>
|
||||
<span ng-if="repoEditor.simpleKeep.$error.min && repoEditor.simpleKeep.$dirty">You must keep at least one version.</span>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<p class="help-block">Select the nodes to share this repository with.</p>
|
||||
</div>
|
||||
</form>
|
||||
<div ng-show="!editingExisting">
|
||||
@@ -506,7 +574,7 @@
|
||||
<div class="modal-dialog modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h4 class="modal-title"> Settings</h4>
|
||||
<h4 class="modal-title">Settings</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form role="form">
|
||||
@@ -515,11 +583,11 @@
|
||||
<div class="form-group" ng-repeat="setting in settings">
|
||||
<div ng-if="setting.type == 'text' || setting.type == 'number'">
|
||||
<label for="{{setting.id}}">{{setting.descr}}</label>
|
||||
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.Options[setting.id]"></input>
|
||||
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="tmpOptions[setting.id]"></input>
|
||||
</div>
|
||||
<div class="checkbox" ng-if="setting.type == 'bool'">
|
||||
<label>
|
||||
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.Options[setting.id]"></input>
|
||||
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="tmpOptions[setting.id]"></input>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
@@ -528,13 +596,18 @@
|
||||
<div class="form-group" ng-repeat="setting in guiSettings">
|
||||
<div ng-if="setting.type == 'text' || setting.type == 'number' || setting.type == 'password'">
|
||||
<label for="{{setting.id}}">{{setting.descr}}</label>
|
||||
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.GUI[setting.id]"></input>
|
||||
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="tmpGUI[setting.id]"></input>
|
||||
</div>
|
||||
<div class="checkbox" ng-if="setting.type == 'bool'">
|
||||
<label>
|
||||
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.GUI[setting.id]"></input>
|
||||
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="tmpGUI[setting.id]"></input>
|
||||
</label>
|
||||
</div>
|
||||
<div ng-if="setting.type == 'apikey'">
|
||||
<label>{{setting.descr}} (<a href="http://discourse.syncthing.net/t/v0-8-14-api-keys/335">Usage</a>)</label>
|
||||
<div class="well well-sm text-monospace">{{tmpGUI[setting.id] || "-"}}</div>
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="setAPIKey(tmpGUI)">Generate</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -548,6 +621,56 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Usage report modal -->
|
||||
|
||||
<div id="ur" class="modal fade">
|
||||
<div class="modal-dialog modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header alert alert-success">
|
||||
<h4 class="modal-title">Allow Anonymous Usage Reporting?</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>
|
||||
The encrypted usage report is sent daily. It is used to track common platforms, repo sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.
|
||||
</p>
|
||||
<p>
|
||||
The aggregated statistics are publicly available at <a href="https://data.syncthing.net/">https://data.syncthing.net/</a>.
|
||||
</p>
|
||||
<button type="button" class="btn btn-default" ng-show="!reportPreview" ng-click="reportPreview = true">Preview Usage Report</button>
|
||||
<pre ng-if="reportPreview"><small>{{reportData | json}}</small></pre>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-success" ng-click="acceptUR()"><span class="glyphicon glyphicon-ok"></span> Yes</button>
|
||||
<button type="button" class="btn btn-danger" ng-click="declineUR()"><span class="glyphicon glyphicon-remove"></span> No</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Needed files modal -->
|
||||
|
||||
<div id="needed" class="modal fade">
|
||||
<div class="modal-dialog modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header alert alert-info">
|
||||
<h4 class="modal-title">Out of Sync Items</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<table class="table table-striped table-condensed">
|
||||
<tr ng-repeat="f in needed" ng-init="a = needAction(f)">
|
||||
<td class="small-data"><span class="glyphicon glyphicon-{{needIcons[a]}}"></span> {{needActions[a]}}</td>
|
||||
<td title="{{f.Name}}">{{f.Name | basename}}</td>
|
||||
<td class="text-right small-data"><span ng-if="f.Size > 0">{{f.Size | binary}}B</span></td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span> Close</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<script src="angular.min.js"></script>
|
||||
<script src="jquery-2.0.3.min.js"></script>
|
||||
|
||||
3
integration/.gitignore
vendored
3
integration/.gitignore
vendored
@@ -12,3 +12,6 @@ json
|
||||
*.idx.gz
|
||||
dirs-*
|
||||
*.out
|
||||
csrftokens.txt
|
||||
s4d
|
||||
http
|
||||
|
||||
@@ -1,27 +1,32 @@
|
||||
<configuration version="1">
|
||||
<repository directory="s1" ro="true">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<configuration version="2">
|
||||
<repository id="default" directory="s1" ro="true" ignorePerms="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
<syncorder></syncorder>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8081</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22001</listenAddress>
|
||||
<readOnly>true</readOnly>
|
||||
<allowDelete>true</allowDelete>
|
||||
<followSymlinks>true</followSymlinks>
|
||||
<guiEnabled>true</guiEnabled>
|
||||
<guiAddress>127.0.0.1:8081</guiAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>10</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -1,27 +1,32 @@
|
||||
<configuration version="1">
|
||||
<repository directory="s2">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<configuration version="2">
|
||||
<repository id="default" directory="s2" ro="false" ignorePerms="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
<syncorder></syncorder>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="f1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="f2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8082</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22002</listenAddress>
|
||||
<readOnly>false</readOnly>
|
||||
<allowDelete>true</allowDelete>
|
||||
<followSymlinks>true</followSymlinks>
|
||||
<guiEnabled>true</guiEnabled>
|
||||
<guiAddress>127.0.0.1:8082</guiAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>15</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
# Use of this source code is governed by an MIT-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
iterations=${1:-5}
|
||||
|
||||
id1=I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA
|
||||
@@ -16,7 +20,7 @@ start() {
|
||||
stop() {
|
||||
echo "Stopping..."
|
||||
for i in 1 2 ; do
|
||||
curl -X POST "http://localhost:808$i/rest/shutdown"
|
||||
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
|
||||
done
|
||||
}
|
||||
|
||||
@@ -35,8 +39,8 @@ setup() {
|
||||
testConvergence() {
|
||||
while true ; do
|
||||
sleep 5
|
||||
s1comp=$(curl -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
|
||||
s2comp=$(curl -s "http://localhost:8081/rest/connections" | ./json "$id2/Completion")
|
||||
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
|
||||
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/connections" | ./json "$id2/Completion")
|
||||
s1comp=${s1comp:-0}
|
||||
s2comp=${s2comp:-0}
|
||||
tot=$(($s1comp + $s2comp))
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,41 +1,45 @@
|
||||
<configuration version="1">
|
||||
<repository directory="s1">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
|
||||
<address>127.0.0.1:22004</address>
|
||||
</node>
|
||||
<configuration version="2">
|
||||
<repository id="default" directory="s1" ro="false" ignorePerms="false">
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<repository id="s12" directory="s12-1">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<repository id="s12" directory="s12-1" ro="false" ignorePerms="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
|
||||
<address>127.0.0.1:22004</address>
|
||||
</node>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8081</address>
|
||||
<apikey>abc123</apikey>
|
||||
<user>testuser</user>
|
||||
<password>testpass</password>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22001</listenAddress>
|
||||
<readOnly>false</readOnly>
|
||||
<allowDelete>true</allowDelete>
|
||||
<followSymlinks>true</followSymlinks>
|
||||
<guiEnabled>true</guiEnabled>
|
||||
<guiAddress>127.0.0.1:8081</guiAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>10</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -1,46 +1,45 @@
|
||||
<configuration version="1">
|
||||
<repository directory="s2">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<configuration version="2">
|
||||
<repository id="default" directory="s2" ro="false" ignorePerms="false">
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<repository id="s12" directory="s12-2">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<repository id="s12" directory="s12-2" ro="false" ignorePerms="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<repository id="s23" directory="s23-2">
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<repository id="s23" directory="s23-2" ro="false" ignorePerms="false">
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8082</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22002</listenAddress>
|
||||
<readOnly>false</readOnly>
|
||||
<allowDelete>true</allowDelete>
|
||||
<followSymlinks>true</followSymlinks>
|
||||
<guiEnabled>true</guiEnabled>
|
||||
<guiAddress>127.0.0.1:8082</guiAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>15</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -1,38 +1,40 @@
|
||||
<configuration version="1">
|
||||
<repository directory="s3">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<configuration version="2">
|
||||
<repository id="default" directory="s3" ro="false" ignorePerms="false">
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<repository id="s23" directory="s23-3">
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<repository id="s23" directory="s23-3" ro="false" ignorePerms="false">
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ"></node>
|
||||
<versioning></versioning>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<gui enabled="true" tls="false">
|
||||
<address>127.0.0.1:8083</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>127.0.0.1:22003</listenAddress>
|
||||
<readOnly>false</readOnly>
|
||||
<allowDelete>true</allowDelete>
|
||||
<followSymlinks>true</followSymlinks>
|
||||
<guiEnabled>true</guiEnabled>
|
||||
<guiAddress>127.0.0.1:8083</guiAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<localAnnouncePort>21025</localAnnouncePort>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>20</rescanIntervalS>
|
||||
<reconnectionIntervalS>5</reconnectionIntervalS>
|
||||
<maxChangeKbps>10000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>true</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3"></node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4"></node>
|
||||
</repository>
|
||||
<repository id="default" directory="s4d" ro="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1"></node>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
@@ -19,6 +22,7 @@
|
||||
</node>
|
||||
<gui enabled="true">
|
||||
<address>127.0.0.1:8084</address>
|
||||
<apikey>abc123</apikey>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>:22004</listenAddress>
|
||||
|
||||
232
integration/http.go
Normal file
232
integration/http.go
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
target string
|
||||
authUser string
|
||||
authPass string
|
||||
csrfToken string
|
||||
csrfFile string
|
||||
apiKey string
|
||||
)
|
||||
|
||||
var jsonEndpoints = []string{
|
||||
"/rest/model?repo=default",
|
||||
"/rest/model/version?repo=default",
|
||||
"/rest/need",
|
||||
"/rest/connections",
|
||||
"/rest/config",
|
||||
"/rest/config/sync",
|
||||
"/rest/system",
|
||||
"/rest/errors",
|
||||
// "/rest/discovery",
|
||||
"/rest/report",
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&target, "target", "localhost:8080", "Test target")
|
||||
flag.StringVar(&authUser, "user", "", "Username")
|
||||
flag.StringVar(&authPass, "pass", "", "Password")
|
||||
flag.StringVar(&csrfFile, "csrf", "", "CSRF token file")
|
||||
flag.StringVar(&apiKey, "api", "", "API key")
|
||||
flag.Parse()
|
||||
|
||||
if len(csrfFile) > 0 {
|
||||
fd, err := os.Open(csrfFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s := bufio.NewScanner(fd)
|
||||
for s.Scan() {
|
||||
csrfToken = s.Text()
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
var tests []testing.InternalTest
|
||||
tests = append(tests, testing.InternalTest{"TestGetIndex", TestGetIndex})
|
||||
tests = append(tests, testing.InternalTest{"TestGetVersion", TestGetVersion})
|
||||
tests = append(tests, testing.InternalTest{"TestGetVersionNoCSRF", TestGetVersion})
|
||||
tests = append(tests, testing.InternalTest{"TestJSONEndpoints", TestJSONEndpoints})
|
||||
if len(authUser) > 0 || len(apiKey) > 0 {
|
||||
tests = append(tests, testing.InternalTest{"TestJSONEndpointsNoAuth", TestJSONEndpointsNoAuth})
|
||||
tests = append(tests, testing.InternalTest{"TestJSONEndpointsIncorrectAuth", TestJSONEndpointsIncorrectAuth})
|
||||
}
|
||||
if len(csrfToken) > 0 {
|
||||
tests = append(tests, testing.InternalTest{"TestJSONEndpointsNoCSRF", TestJSONEndpointsNoCSRF})
|
||||
}
|
||||
|
||||
testing.Main(matcher, tests, nil, nil)
|
||||
}
|
||||
|
||||
func matcher(s0, s1 string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func TestGetIndex(t *testing.T) {
|
||||
res, err := get("/index.html")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Errorf("Status %d != 200", res.StatusCode)
|
||||
}
|
||||
if res.ContentLength < 1024 {
|
||||
t.Errorf("Length %d < 1024", res.ContentLength)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
res, err = get("/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Errorf("Status %d != 200", res.StatusCode)
|
||||
}
|
||||
if res.ContentLength < 1024 {
|
||||
t.Errorf("Length %d < 1024", res.ContentLength)
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
res, err := get("/rest/version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Fatalf("Status %d != 200", res.StatusCode)
|
||||
}
|
||||
ver, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if !regexp.MustCompile(`v\d+\.\d+\.\d+`).Match(ver) {
|
||||
t.Errorf("Invalid version %q", ver)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersionNoCSRF(t *testing.T) {
|
||||
r, err := http.NewRequest("GET", "http://"+target+"/rest/version", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(authUser) > 0 {
|
||||
r.SetBasicAuth(authUser, authPass)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 403 {
|
||||
t.Fatalf("Status %d != 403", res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEndpoints(t *testing.T) {
|
||||
for _, p := range jsonEndpoints {
|
||||
res, err := get(p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
t.Errorf("Status %d != 200 for %q", res.StatusCode, p)
|
||||
}
|
||||
if ct := res.Header.Get("Content-Type"); ct != "application/json; charset=utf-8" {
|
||||
t.Errorf("Content-Type %q != \"application/json\" for %q", ct, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEndpointsNoCSRF(t *testing.T) {
|
||||
for _, p := range jsonEndpoints {
|
||||
r, err := http.NewRequest("GET", "http://"+target+p, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(authUser) > 0 {
|
||||
r.SetBasicAuth(authUser, authPass)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 403 && res.StatusCode != 401 {
|
||||
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEndpointsNoAuth(t *testing.T) {
|
||||
for _, p := range jsonEndpoints {
|
||||
r, err := http.NewRequest("GET", "http://"+target+p, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(csrfToken) > 0 {
|
||||
r.Header.Set("X-CSRF-Token", csrfToken)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 403 && res.StatusCode != 401 {
|
||||
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEndpointsIncorrectAuth(t *testing.T) {
|
||||
for _, p := range jsonEndpoints {
|
||||
r, err := http.NewRequest("GET", "http://"+target+p, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(csrfToken) > 0 {
|
||||
r.Header.Set("X-CSRF-Token", csrfToken)
|
||||
}
|
||||
r.SetBasicAuth("wronguser", "wrongpass")
|
||||
res, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.StatusCode != 403 && res.StatusCode != 401 {
|
||||
t.Fatalf("Status %d != 403/401 for %q", res.StatusCode, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func get(path string) (*http.Response, error) {
|
||||
r, err := http.NewRequest("GET", "http://"+target+path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(authUser) > 0 {
|
||||
r.SetBasicAuth(authUser, authPass)
|
||||
}
|
||||
if len(csrfToken) > 0 {
|
||||
r.Header.Set("X-CSRF-Token", csrfToken)
|
||||
}
|
||||
if len(apiKey) > 0 {
|
||||
r.Header.Set("X-API-Key", apiKey)
|
||||
}
|
||||
return http.DefaultClient.Do(r)
|
||||
}
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
# Use of this source code is governed by an MIT-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
iterations=${1:-5}
|
||||
|
||||
id1=I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA
|
||||
@@ -9,20 +13,47 @@ id3=373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA
|
||||
go build genfiles.go
|
||||
go build md5r.go
|
||||
go build json.go
|
||||
go build http.go
|
||||
|
||||
start() {
|
||||
echo "Starting..."
|
||||
for i in 1 2 3 4 ; do
|
||||
STPROFILER=":909$i" syncthing -home "h$i" > "$i.out" 2>&1 &
|
||||
done
|
||||
|
||||
# Test REST API
|
||||
sleep 2
|
||||
curl -s -o /dev/null http://testuser:testpass@localhost:8081/index.html
|
||||
curl -s -o /dev/null http://localhost:8082/index.html
|
||||
sleep 1
|
||||
./http -target localhost:8081 -user testuser -pass testpass -csrf h1/csrftokens.txt || stop 1
|
||||
./http -target localhost:8081 -api abc123 || stop 1
|
||||
./http -target localhost:8082 -csrf h2/csrftokens.txt || stop 1
|
||||
./http -target localhost:8082 -api abc123 || stop 1
|
||||
}
|
||||
|
||||
stop() {
|
||||
for i in 1 2 3 4 ; do
|
||||
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
|
||||
done
|
||||
exit $1
|
||||
}
|
||||
|
||||
clean() {
|
||||
if [[ $(uname -s) == "Linux" ]] ; then
|
||||
grep -v utf8-nfd
|
||||
else
|
||||
cat
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
testConvergence() {
|
||||
while true ; do
|
||||
sleep 5
|
||||
s1comp=$(curl -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
|
||||
s2comp=$(curl -s "http://localhost:8083/rest/connections" | ./json "$id2/Completion")
|
||||
s3comp=$(curl -s "http://localhost:8081/rest/connections" | ./json "$id3/Completion")
|
||||
s1comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8082/rest/connections" | ./json "$id1/Completion")
|
||||
s2comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8083/rest/connections" | ./json "$id2/Completion")
|
||||
s3comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/connections" | ./json "$id3/Completion")
|
||||
s1comp=${s1comp:-0}
|
||||
s2comp=${s2comp:-0}
|
||||
s3comp=${s3comp:-0}
|
||||
@@ -34,13 +65,13 @@ testConvergence() {
|
||||
done
|
||||
|
||||
echo "Verifying..."
|
||||
cat md5-? | sort | uniq > md5-tot
|
||||
cat md5-12-? | sort | uniq > md5-12-tot
|
||||
cat md5-23-? | sort | uniq > md5-23-tot
|
||||
cat md5-? | sort | clean | uniq > md5-tot
|
||||
cat md5-12-? | sort | clean | uniq > md5-12-tot
|
||||
cat md5-23-? | sort | clean | uniq > md5-23-tot
|
||||
|
||||
for i in 1 2 3 12-1 12-2 23-2 23-3; do
|
||||
pushd "s$i" >/dev/null
|
||||
../md5r -l | sort > ../md5-$i
|
||||
../md5r -l | sort | clean > ../md5-$i
|
||||
popd >/dev/null
|
||||
done
|
||||
|
||||
@@ -70,8 +101,7 @@ testConvergence() {
|
||||
fi
|
||||
done
|
||||
if [[ $ok != 7 ]] ; then
|
||||
pkill syncthing
|
||||
exit 1
|
||||
stop 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -101,10 +131,11 @@ alterFiles() {
|
||||
pkill -CONT syncthing
|
||||
}
|
||||
|
||||
rm -f h?/*.idx.gz
|
||||
rm -rf s? s??-? s4d
|
||||
|
||||
echo "Setting up files..."
|
||||
for i in 1 2 3 12-1 12-2 23-2 23-3; do
|
||||
rm -f h$i/*.idx.gz
|
||||
rm -rf "s$i"
|
||||
mkdir "s$i"
|
||||
pushd "s$i" >/dev/null
|
||||
echo " $i: random nonoverlapping"
|
||||
@@ -113,9 +144,17 @@ for i in 1 2 3 12-1 12-2 23-2 23-3; do
|
||||
touch "empty-$i"
|
||||
echo " $i: large file"
|
||||
dd if=/dev/urandom of=large-$i bs=1024k count=55 2>/dev/null
|
||||
echo " $i: weird encodings"
|
||||
echo somedata > "$(echo -e utf8-nfc-\\xc3\\xad)-$i"
|
||||
echo somedata > "$(echo -e utf8-nfd-i\\xcc\\x81)-$i"
|
||||
echo somedata > "$(echo -e cp850-\\xa1)-$i"
|
||||
touch "empty-$i"
|
||||
popd >/dev/null
|
||||
done
|
||||
|
||||
mkdir s4d
|
||||
echo somerandomdata > s4d/extrafile
|
||||
|
||||
echo "MD5-summing..."
|
||||
for i in 1 2 3 12-1 12-2 23-2 23-3 ; do
|
||||
pushd "s$i" >/dev/null
|
||||
@@ -135,6 +174,4 @@ for ((t = 1; t <= $iterations; t++)) ; do
|
||||
testConvergence
|
||||
done
|
||||
|
||||
for i in 1 2 3 4 ; do
|
||||
curl -X POST "http://localhost:808$i/rest/shutdown"
|
||||
done
|
||||
stop 0
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package lamport implements a simple Lamport Clock for versioning
|
||||
package lamport
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
@@ -16,6 +20,7 @@ type bqBlock struct {
|
||||
file scanner.File
|
||||
block scanner.Block // get this block from the network
|
||||
copy []scanner.Block // copy these blocks from the old version of the file
|
||||
first bool
|
||||
last bool
|
||||
}
|
||||
|
||||
@@ -47,24 +52,30 @@ func (q *blockQueue) addBlock(a bqAdd) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
l := len(a.need)
|
||||
|
||||
if len(a.have) > 0 {
|
||||
// First queue a copy operation
|
||||
q.queued = append(q.queued, bqBlock{
|
||||
file: a.file,
|
||||
copy: a.have,
|
||||
file: a.file,
|
||||
copy: a.have,
|
||||
first: true,
|
||||
last: l == 0,
|
||||
})
|
||||
}
|
||||
|
||||
// Queue the needed blocks individually
|
||||
l := len(a.need)
|
||||
for i, b := range a.need {
|
||||
q.queued = append(q.queued, bqBlock{
|
||||
file: a.file,
|
||||
block: b,
|
||||
first: len(a.have) == 0 && i == 0,
|
||||
last: i == l-1,
|
||||
})
|
||||
}
|
||||
|
||||
if l == 0 {
|
||||
if len(a.need)+len(a.have) == 0 {
|
||||
// If we didn't have anything to fetch, queue an empty block with the "last" flag set to close the file.
|
||||
q.queued = append(q.queued, bqBlock{
|
||||
file: a.file,
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package model implements repository abstraction and file pulling mechanisms
|
||||
package model
|
||||
|
||||
270
model/model.go
270
model/model.go
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
@@ -9,14 +13,15 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/cid"
|
||||
"github.com/calmh/syncthing/config"
|
||||
"github.com/calmh/syncthing/files"
|
||||
"github.com/calmh/syncthing/lamport"
|
||||
"github.com/calmh/syncthing/osutil"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
@@ -43,12 +48,12 @@ type Model struct {
|
||||
clientName string
|
||||
clientVersion string
|
||||
|
||||
repoDirs map[string]string // repo -> dir
|
||||
repoFiles map[string]*files.Set // repo -> files
|
||||
repoNodes map[string][]string // repo -> nodeIDs
|
||||
nodeRepos map[string][]string // nodeID -> repos
|
||||
suppressor map[string]*suppressor // repo -> suppressor
|
||||
rmut sync.RWMutex // protects the above
|
||||
repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
|
||||
repoFiles map[string]*files.Set // repo -> files
|
||||
repoNodes map[string][]string // repo -> nodeIDs
|
||||
nodeRepos map[string][]string // nodeID -> repos
|
||||
suppressor map[string]*suppressor // repo -> suppressor
|
||||
rmut sync.RWMutex // protects the above
|
||||
|
||||
repoState map[string]repoState // repo -> state
|
||||
smut sync.RWMutex
|
||||
@@ -80,7 +85,7 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
|
||||
cfg: cfg,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
repoDirs: make(map[string]string),
|
||||
repoCfgs: make(map[string]config.RepositoryConfiguration),
|
||||
repoFiles: make(map[string]*files.Set),
|
||||
repoNodes: make(map[string][]string),
|
||||
nodeRepos: make(map[string][]string),
|
||||
@@ -93,6 +98,16 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
|
||||
sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
|
||||
}
|
||||
|
||||
var timeout = 20 * 60 // seconds
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
if err == nil {
|
||||
timeout = it
|
||||
}
|
||||
}
|
||||
deadlockDetect(&m.rmut, time.Duration(timeout)*time.Second)
|
||||
deadlockDetect(&m.smut, time.Duration(timeout)*time.Second)
|
||||
deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second)
|
||||
go m.broadcastIndexLoop()
|
||||
return m
|
||||
}
|
||||
@@ -104,10 +119,10 @@ func (m *Model) StartRepoRW(repo string, threads int) {
|
||||
m.rmut.RLock()
|
||||
defer m.rmut.RUnlock()
|
||||
|
||||
if dir, ok := m.repoDirs[repo]; !ok {
|
||||
if cfg, ok := m.repoCfgs[repo]; !ok {
|
||||
panic("cannot start without repo")
|
||||
} else {
|
||||
newPuller(repo, dir, m, threads, m.cfg)
|
||||
newPuller(cfg, m, threads, m.cfg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,9 +164,9 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
|
||||
|
||||
for _, repo := range m.nodeRepos[node] {
|
||||
for _, f := range m.repoFiles[repo].Global() {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
size := f.Size
|
||||
if f.Flags&protocol.FlagDirectory != 0 {
|
||||
if protocol.IsDirectory(f.Flags) {
|
||||
size = zeroEntrySize
|
||||
}
|
||||
tot += size
|
||||
@@ -160,9 +175,9 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
|
||||
}
|
||||
|
||||
for _, f := range m.repoFiles[repo].Need(m.cm.Get(node)) {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
size := f.Size
|
||||
if f.Flags&protocol.FlagDirectory != 0 {
|
||||
if protocol.IsDirectory(f.Flags) {
|
||||
size = zeroEntrySize
|
||||
}
|
||||
have -= size
|
||||
@@ -181,14 +196,23 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
|
||||
m.rmut.RUnlock()
|
||||
m.pmut.RUnlock()
|
||||
|
||||
in, out := protocol.TotalInOut()
|
||||
res["total"] = ConnectionInfo{
|
||||
Statistics: protocol.Statistics{
|
||||
At: time.Now(),
|
||||
InBytesTotal: in,
|
||||
OutBytesTotal: out,
|
||||
},
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func sizeOf(fs []scanner.File) (files, deleted int, bytes int64) {
|
||||
for _, f := range fs {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
files++
|
||||
if f.Flags&protocol.FlagDirectory == 0 {
|
||||
if !protocol.IsDirectory(f.Flags) {
|
||||
bytes += f.Size
|
||||
} else {
|
||||
bytes += zeroEntrySize
|
||||
@@ -234,7 +258,11 @@ func (m *Model) NeedFilesRepo(repo string) []scanner.File {
|
||||
m.rmut.RLock()
|
||||
defer m.rmut.RUnlock()
|
||||
if rf, ok := m.repoFiles[repo]; ok {
|
||||
return rf.Need(cid.LocalID)
|
||||
f := rf.Need(cid.LocalID)
|
||||
if r := m.repoCfgs[repo].FileRanker(); r != nil {
|
||||
files.SortBy(r).Sort(f)
|
||||
}
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -246,13 +274,18 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
|
||||
l.Debugf("IDX(in): %s %q: %d files", nodeID, repo, len(fs))
|
||||
}
|
||||
|
||||
if !m.repoSharedWith(repo, nodeID) {
|
||||
l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
|
||||
return
|
||||
}
|
||||
|
||||
var files = make([]scanner.File, len(fs))
|
||||
for i := range fs {
|
||||
f := fs[i]
|
||||
lamport.Default.Tick(f.Version)
|
||||
if debug {
|
||||
var flagComment string
|
||||
if f.Flags&protocol.FlagDeleted != 0 {
|
||||
if protocol.IsDeleted(f.Flags) {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
l.Debugf("IDX(in): %s %q/%q m=%d f=%o%s v=%d (%d blocks)", nodeID, repo, f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
|
||||
@@ -265,7 +298,7 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
|
||||
if r, ok := m.repoFiles[repo]; ok {
|
||||
r.Replace(id, files)
|
||||
} else {
|
||||
l.Warnf("Index from %s for nonexistant repo %q; dropping", nodeID, repo)
|
||||
l.Fatalf("Index for nonexistant repo %q", repo)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
@@ -277,13 +310,18 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
|
||||
l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs))
|
||||
}
|
||||
|
||||
if !m.repoSharedWith(repo, nodeID) {
|
||||
l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID)
|
||||
return
|
||||
}
|
||||
|
||||
var files = make([]scanner.File, len(fs))
|
||||
for i := range fs {
|
||||
f := fs[i]
|
||||
lamport.Default.Tick(f.Version)
|
||||
if debug {
|
||||
var flagComment string
|
||||
if f.Flags&protocol.FlagDeleted != 0 {
|
||||
if protocol.IsDeleted(f.Flags) {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
l.Debugf("IDXUP(in): %s %q/%q m=%d f=%o%s v=%d (%d blocks)", nodeID, repo, f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
|
||||
@@ -296,11 +334,22 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
|
||||
if r, ok := m.repoFiles[repo]; ok {
|
||||
r.Update(id, files)
|
||||
} else {
|
||||
l.Warnf("Index update from %s for nonexistant repo %q; dropping", nodeID, repo)
|
||||
l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
|
||||
func (m *Model) repoSharedWith(repo, nodeID string) bool {
|
||||
m.rmut.RLock()
|
||||
defer m.rmut.RUnlock()
|
||||
for _, nrepo := range m.nodeRepos[nodeID] {
|
||||
if nrepo == repo {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessage) {
|
||||
compErr := compareClusterConfig(m.clusterConfig(nodeID), config)
|
||||
if debug {
|
||||
@@ -320,20 +369,14 @@ func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessag
|
||||
m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion
|
||||
}
|
||||
m.pmut.Unlock()
|
||||
|
||||
l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion)
|
||||
}
|
||||
|
||||
// Close removes the peer from the model and closes the underlying connection if possible.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) Close(node string, err error) {
|
||||
if debug {
|
||||
l.Debugf("%s: %v", node, err)
|
||||
}
|
||||
|
||||
if err != io.EOF {
|
||||
l.Warnf("Connection to %s closed: %v", node, err)
|
||||
} else if _, ok := err.(ClusterConfigMismatch); ok {
|
||||
l.Warnf("Connection to %s closed: %v", node, err)
|
||||
}
|
||||
l.Infof("Connection to %s closed: %v", node, err)
|
||||
|
||||
cid := m.cm.Get(node)
|
||||
m.rmut.RLock()
|
||||
@@ -368,7 +411,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
||||
}
|
||||
|
||||
lf := r.Get(cid.LocalID, name)
|
||||
if lf.Suppressed || lf.Flags&protocol.FlagDeleted != 0 {
|
||||
if lf.Suppressed || protocol.IsDeleted(lf.Flags) {
|
||||
if debug {
|
||||
l.Debugf("REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", nodeID, repo, name, offset, size, lf)
|
||||
}
|
||||
@@ -386,7 +429,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
||||
l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
|
||||
}
|
||||
m.rmut.RLock()
|
||||
fn := filepath.Join(m.repoDirs[repo], name)
|
||||
fn := filepath.Join(m.repoCfgs[repo].Directory, name)
|
||||
m.rmut.RUnlock()
|
||||
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
||||
if err != nil {
|
||||
@@ -394,7 +437,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
buf := buffers.Get(int(size))
|
||||
buf := make([]byte, size)
|
||||
_, err = fd.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -410,19 +453,6 @@ func (m *Model) ReplaceLocal(repo string, fs []scanner.File) {
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
|
||||
func (m *Model) SeedLocal(repo string, fs []protocol.FileInfo) {
|
||||
var sfs = make([]scanner.File, len(fs))
|
||||
for i := 0; i < len(fs); i++ {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
sfs[i] = fileFromFileInfo(fs[i])
|
||||
sfs[i].Suppressed = false // we might have saved an index with files that were suppressed; the should not be on startup
|
||||
}
|
||||
|
||||
m.rmut.RLock()
|
||||
m.repoFiles[repo].Replace(cid.LocalID, sfs)
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
|
||||
func (m *Model) CurrentRepoFile(repo string, file string) scanner.File {
|
||||
m.rmut.RLock()
|
||||
f := m.repoFiles[repo].Get(cid.LocalID, file)
|
||||
@@ -487,7 +517,14 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
|
||||
if debug {
|
||||
l.Debugf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx))
|
||||
}
|
||||
protoConn.Index(repo, idx)
|
||||
const batchSize = 1000
|
||||
for i := 0; i < len(idx); i += batchSize {
|
||||
if len(idx[i:]) < batchSize {
|
||||
protoConn.Index(repo, idx[i:])
|
||||
} else {
|
||||
protoConn.Index(repo, idx[i:i+batchSize])
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -502,7 +539,7 @@ func (m *Model) protocolIndex(repo string) []protocol.FileInfo {
|
||||
mf := fileInfoFromFile(f)
|
||||
if debug {
|
||||
var flagComment string
|
||||
if mf.Flags&protocol.FlagDeleted != 0 {
|
||||
if protocol.IsDeleted(mf.Flags) {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
l.Debugf("IDX(out): %q/%q m=%d f=%o%s v=%d (%d blocks)", repo, mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
|
||||
@@ -556,7 +593,10 @@ func (m *Model) broadcastIndexLoop() {
|
||||
idx := m.protocolIndex(repo)
|
||||
indexWg.Add(1)
|
||||
go func() {
|
||||
m.saveIndex(repo, m.indexDir, idx)
|
||||
err := m.saveIndex(repo, m.indexDir, idx)
|
||||
if err != nil {
|
||||
l.Infof("Saving index for %q: %v", repo, err)
|
||||
}
|
||||
indexWg.Done()
|
||||
}()
|
||||
|
||||
@@ -582,23 +622,23 @@ func (m *Model) broadcastIndexLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
||||
func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
|
||||
if m.started {
|
||||
panic("cannot add repo to started model")
|
||||
}
|
||||
if len(id) == 0 {
|
||||
if len(cfg.ID) == 0 {
|
||||
panic("cannot add empty repo id")
|
||||
}
|
||||
|
||||
m.rmut.Lock()
|
||||
m.repoDirs[id] = dir
|
||||
m.repoFiles[id] = files.NewSet()
|
||||
m.suppressor[id] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
||||
m.repoCfgs[cfg.ID] = cfg
|
||||
m.repoFiles[cfg.ID] = files.NewSet()
|
||||
m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
|
||||
|
||||
m.repoNodes[id] = make([]string, len(nodes))
|
||||
for i, node := range nodes {
|
||||
m.repoNodes[id][i] = node.NodeID
|
||||
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], id)
|
||||
m.repoNodes[cfg.ID] = make([]string, len(cfg.Nodes))
|
||||
for i, node := range cfg.Nodes {
|
||||
m.repoNodes[cfg.ID][i] = node.NodeID
|
||||
m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID)
|
||||
}
|
||||
|
||||
m.addedRepo = true
|
||||
@@ -607,8 +647,8 @@ func (m *Model) AddRepo(id, dir string, nodes []config.NodeConfiguration) {
|
||||
|
||||
func (m *Model) ScanRepos() {
|
||||
m.rmut.RLock()
|
||||
var repos = make([]string, 0, len(m.repoDirs))
|
||||
for repo := range m.repoDirs {
|
||||
var repos = make([]string, 0, len(m.repoCfgs))
|
||||
for repo := range m.repoCfgs {
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
@@ -618,7 +658,10 @@ func (m *Model) ScanRepos() {
|
||||
for _, repo := range repos {
|
||||
repo := repo
|
||||
go func() {
|
||||
m.ScanRepo(repo)
|
||||
err := m.ScanRepo(repo)
|
||||
if err != nil {
|
||||
invalidateRepo(m.cfg, repo, err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -627,9 +670,9 @@ func (m *Model) ScanRepos() {
|
||||
|
||||
func (m *Model) CleanRepos() {
|
||||
m.rmut.RLock()
|
||||
var dirs = make([]string, 0, len(m.repoDirs))
|
||||
for _, dir := range m.repoDirs {
|
||||
dirs = append(dirs, dir)
|
||||
var dirs = make([]string, 0, len(m.repoCfgs))
|
||||
for _, cfg := range m.repoCfgs {
|
||||
dirs = append(dirs, cfg.Directory)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
|
||||
@@ -651,12 +694,13 @@ func (m *Model) CleanRepos() {
|
||||
func (m *Model) ScanRepo(repo string) error {
|
||||
m.rmut.RLock()
|
||||
w := &scanner.Walker{
|
||||
Dir: m.repoDirs[repo],
|
||||
Dir: m.repoCfgs[repo].Directory,
|
||||
IgnoreFile: ".stignore",
|
||||
BlockSize: scanner.StandardBlockSize,
|
||||
TempNamer: defTempNamer,
|
||||
Suppressor: m.suppressor[repo],
|
||||
CurrentFiler: cFiler{m, repo},
|
||||
IgnorePerms: m.repoCfgs[repo].IgnorePerms,
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
m.setState(repo, RepoScanning)
|
||||
@@ -671,46 +715,75 @@ func (m *Model) ScanRepo(repo string) error {
|
||||
|
||||
func (m *Model) SaveIndexes(dir string) {
|
||||
m.rmut.RLock()
|
||||
for repo := range m.repoDirs {
|
||||
for repo := range m.repoCfgs {
|
||||
fs := m.protocolIndex(repo)
|
||||
m.saveIndex(repo, dir, fs)
|
||||
err := m.saveIndex(repo, dir, fs)
|
||||
if err != nil {
|
||||
l.Infof("Saving index for %q: %v", repo, err)
|
||||
}
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
|
||||
func (m *Model) LoadIndexes(dir string) {
|
||||
m.rmut.RLock()
|
||||
for repo := range m.repoDirs {
|
||||
for repo := range m.repoCfgs {
|
||||
fs := m.loadIndex(repo, dir)
|
||||
m.SeedLocal(repo, fs)
|
||||
|
||||
var sfs = make([]scanner.File, len(fs))
|
||||
for i := 0; i < len(fs); i++ {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
sfs[i] = fileFromFileInfo(fs[i])
|
||||
sfs[i].Suppressed = false // we might have saved an index with files that were suppressed; the should not be on startup
|
||||
}
|
||||
|
||||
m.repoFiles[repo].Replace(cid.LocalID, sfs)
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
}
|
||||
|
||||
func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) {
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
||||
func (m *Model) saveIndex(repo string, dir string, fs []protocol.FileInfo) error {
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||
name := id + ".idx.gz"
|
||||
name = filepath.Join(dir, name)
|
||||
|
||||
idxf, err := os.Create(name + ".tmp")
|
||||
tmp := fmt.Sprintf("%s.tmp.%d", name, time.Now().UnixNano())
|
||||
idxf, err := os.OpenFile(tmp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmp)
|
||||
|
||||
gzw := gzip.NewWriter(idxf)
|
||||
|
||||
protocol.IndexMessage{
|
||||
n, err := protocol.IndexMessage{
|
||||
Repository: repo,
|
||||
Files: fs,
|
||||
}.EncodeXDR(gzw)
|
||||
gzw.Close()
|
||||
idxf.Close()
|
||||
if err != nil {
|
||||
gzw.Close()
|
||||
idxf.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
Rename(name+".tmp", name)
|
||||
err = gzw.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idxf.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugln("wrote index,", n, "bytes uncompressed")
|
||||
}
|
||||
|
||||
return osutil.Rename(tmp, name)
|
||||
}
|
||||
|
||||
func (m *Model) loadIndex(repo string, dir string) []protocol.FileInfo {
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoDirs[repo])))
|
||||
id := fmt.Sprintf("%x", sha1.Sum([]byte(m.repoCfgs[repo].Directory)))
|
||||
name := id + ".idx.gz"
|
||||
name = filepath.Join(dir, name)
|
||||
|
||||
@@ -784,3 +857,42 @@ func (m *Model) State(repo string) string {
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) Override(repo string) {
|
||||
fs := m.NeedFilesRepo(repo)
|
||||
|
||||
m.rmut.RLock()
|
||||
r := m.repoFiles[repo]
|
||||
m.rmut.RUnlock()
|
||||
|
||||
for i := range fs {
|
||||
f := &fs[i]
|
||||
h := r.Get(cid.LocalID, f.Name)
|
||||
if h.Name != f.Name {
|
||||
// We are missing the file
|
||||
f.Flags |= protocol.FlagDeleted
|
||||
f.Blocks = nil
|
||||
} else {
|
||||
// We have the file, replace with our version
|
||||
*f = h
|
||||
}
|
||||
f.Version = lamport.Default.Tick(f.Version)
|
||||
}
|
||||
|
||||
r.Update(cid.LocalID, fs)
|
||||
}
|
||||
|
||||
// Version returns the change version for the given repository. This is
|
||||
// guaranteed to increment if the contents of the local or global repository
|
||||
// has changed.
|
||||
func (m *Model) Version(repo string) uint64 {
|
||||
var ver uint64
|
||||
|
||||
m.rmut.Lock()
|
||||
for _, n := range m.repoNodes[repo] {
|
||||
ver += m.repoFiles[repo].Changes(m.cm.Get(n))
|
||||
}
|
||||
m.rmut.Unlock()
|
||||
|
||||
return ver
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
@@ -49,7 +53,7 @@ func init() {
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
m := NewModel("/tmp", &config.Configuration{}, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
|
||||
bs, err := m.Request("some node", "default", "foo", 0, 6)
|
||||
@@ -85,7 +89,7 @@ func genFiles(n int) []protocol.FileInfo {
|
||||
|
||||
func BenchmarkIndex10000(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
|
||||
@@ -97,7 +101,7 @@ func BenchmarkIndex10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndex00100(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(100)
|
||||
|
||||
@@ -109,7 +113,7 @@ func BenchmarkIndex00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
m.Index("42", "default", files)
|
||||
@@ -122,7 +126,7 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
m.Index("42", "default", files)
|
||||
@@ -136,7 +140,7 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
files := genFiles(10000)
|
||||
m.Index("42", "default", files)
|
||||
@@ -183,7 +187,7 @@ func (FakeConnection) Statistics() protocol.Statistics {
|
||||
|
||||
func BenchmarkRequest(b *testing.B) {
|
||||
m := NewModel("/tmp", nil, "syncthing", "dev")
|
||||
m.AddRepo("default", "testdata", nil)
|
||||
m.AddRepo(config.RepositoryConfiguration{ID: "default", Directory: "testdata"})
|
||||
m.ScanRepo("default")
|
||||
|
||||
const n = 1000
|
||||
|
||||
225
model/puller.go
225
model/puller.go
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
@@ -5,13 +9,15 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/cid"
|
||||
"github.com/calmh/syncthing/config"
|
||||
"github.com/calmh/syncthing/osutil"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/calmh/syncthing/versioner"
|
||||
)
|
||||
|
||||
type requestResult struct {
|
||||
@@ -63,8 +69,7 @@ var errNoNode = errors.New("no available source node")
|
||||
|
||||
type puller struct {
|
||||
cfg *config.Configuration
|
||||
repo string
|
||||
dir string
|
||||
repoCfg config.RepositoryConfiguration
|
||||
bq *blockQueue
|
||||
model *Model
|
||||
oustandingPerNode activityMap
|
||||
@@ -72,13 +77,13 @@ type puller struct {
|
||||
requestSlots chan bool
|
||||
blocks chan bqBlock
|
||||
requestResults chan requestResult
|
||||
versioner versioner.Versioner
|
||||
}
|
||||
|
||||
func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configuration) *puller {
|
||||
func newPuller(repoCfg config.RepositoryConfiguration, model *Model, slots int, cfg *config.Configuration) *puller {
|
||||
p := &puller{
|
||||
repoCfg: repoCfg,
|
||||
cfg: cfg,
|
||||
repo: repo,
|
||||
dir: dir,
|
||||
bq: newBlockQueue(),
|
||||
model: model,
|
||||
oustandingPerNode: make(activityMap),
|
||||
@@ -88,19 +93,27 @@ func newPuller(repo, dir string, model *Model, slots int, cfg *config.Configurat
|
||||
requestResults: make(chan requestResult),
|
||||
}
|
||||
|
||||
if len(repoCfg.Versioning.Type) > 0 {
|
||||
factory, ok := versioner.Factories[repoCfg.Versioning.Type]
|
||||
if !ok {
|
||||
l.Fatalf("Requested versioning type %q that does not exist", repoCfg.Versioning.Type)
|
||||
}
|
||||
p.versioner = factory(repoCfg.Versioning.Params)
|
||||
}
|
||||
|
||||
if slots > 0 {
|
||||
// Read/write
|
||||
for i := 0; i < slots; i++ {
|
||||
p.requestSlots <- true
|
||||
}
|
||||
if debug {
|
||||
l.Debugf("starting puller; repo %q dir %q slots %d", repo, dir, slots)
|
||||
l.Debugf("starting puller; repo %q dir %q slots %d", repoCfg.ID, repoCfg.Directory, slots)
|
||||
}
|
||||
go p.run()
|
||||
} else {
|
||||
// Read only
|
||||
if debug {
|
||||
l.Debugf("starting puller; repo %q dir %q (read only)", repo, dir)
|
||||
l.Debugf("starting puller; repo %q dir %q (read only)", repoCfg.ID, repoCfg.Directory)
|
||||
}
|
||||
go p.runRO()
|
||||
}
|
||||
@@ -114,7 +127,7 @@ func (p *puller) run() {
|
||||
<-p.requestSlots
|
||||
b := p.bq.get()
|
||||
if debug {
|
||||
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repo, b.file.Name, b.block.Offset, len(b.copy))
|
||||
l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repoCfg.ID, b.file.Name, b.block.Offset, len(b.copy))
|
||||
}
|
||||
p.blocks <- b
|
||||
}
|
||||
@@ -123,6 +136,7 @@ func (p *puller) run() {
|
||||
walkTicker := time.Tick(time.Duration(p.cfg.Options.RescanIntervalS) * time.Second)
|
||||
timeout := time.Tick(5 * time.Second)
|
||||
changed := true
|
||||
var prevVer uint64
|
||||
|
||||
for {
|
||||
// Run the pulling loop as long as there are blocks to fetch
|
||||
@@ -130,13 +144,13 @@ func (p *puller) run() {
|
||||
for {
|
||||
select {
|
||||
case res := <-p.requestResults:
|
||||
p.model.setState(p.repo, RepoSyncing)
|
||||
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||
changed = true
|
||||
p.requestSlots <- true
|
||||
p.handleRequestResult(res)
|
||||
|
||||
case b := <-p.blocks:
|
||||
p.model.setState(p.repo, RepoSyncing)
|
||||
p.model.setState(p.repoCfg.ID, RepoSyncing)
|
||||
changed = true
|
||||
if p.handleBlock(b) {
|
||||
// Block was fully handled, free up the slot
|
||||
@@ -149,7 +163,7 @@ func (p *puller) run() {
|
||||
break pull
|
||||
}
|
||||
if debug {
|
||||
l.Debugf("%q: idle but have %d open files", p.repo, len(p.openFiles))
|
||||
l.Debugf("%q: idle but have %d open files", p.repoCfg.ID, len(p.openFiles))
|
||||
i := 5
|
||||
for _, f := range p.openFiles {
|
||||
l.Debugf(" %v", f)
|
||||
@@ -163,30 +177,33 @@ func (p *puller) run() {
|
||||
}
|
||||
|
||||
if changed {
|
||||
p.model.setState(p.repo, RepoCleaning)
|
||||
p.model.setState(p.repoCfg.ID, RepoCleaning)
|
||||
p.fixupDirectories()
|
||||
changed = false
|
||||
}
|
||||
|
||||
p.model.setState(p.repo, RepoIdle)
|
||||
p.model.setState(p.repoCfg.ID, RepoIdle)
|
||||
|
||||
// Do a rescan if it's time for it
|
||||
select {
|
||||
case <-walkTicker:
|
||||
if debug {
|
||||
l.Debugf("%q: time for rescan", p.repo)
|
||||
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||
}
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||
if err != nil {
|
||||
invalidateRepo(p.cfg, p.repo, err)
|
||||
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
}
|
||||
|
||||
// Queue more blocks to fetch, if any
|
||||
p.queueNeededBlocks()
|
||||
if v := p.model.Version(p.repoCfg.ID); v > prevVer {
|
||||
// Queue more blocks to fetch, if any
|
||||
p.queueNeededBlocks()
|
||||
prevVer = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,11 +212,11 @@ func (p *puller) runRO() {
|
||||
|
||||
for _ = range walkTicker {
|
||||
if debug {
|
||||
l.Debugf("%q: time for rescan", p.repo)
|
||||
l.Debugf("%q: time for rescan", p.repoCfg.ID)
|
||||
}
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repoCfg.ID)
|
||||
if err != nil {
|
||||
invalidateRepo(p.cfg, p.repo, err)
|
||||
invalidateRepo(p.cfg, p.repoCfg.ID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -210,11 +227,15 @@ func (p *puller) fixupDirectories() {
|
||||
var changed = 0
|
||||
|
||||
var walkFn = func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(p.dir, path)
|
||||
rn, err := filepath.Rel(p.repoCfg.Directory, path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -223,7 +244,11 @@ func (p *puller) fixupDirectories() {
|
||||
return nil
|
||||
}
|
||||
|
||||
cur := p.model.CurrentRepoFile(p.repo, rn)
|
||||
if filepath.Base(rn) == ".stversions" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cur := p.model.CurrentRepoFile(p.repoCfg.ID, rn)
|
||||
if cur.Name != rn {
|
||||
// No matching dir in current list; weird
|
||||
if debug {
|
||||
@@ -232,7 +257,7 @@ func (p *puller) fixupDirectories() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur.Flags&protocol.FlagDeleted != 0 {
|
||||
if protocol.IsDeleted(cur.Flags) {
|
||||
if debug {
|
||||
l.Debugf("queue delete dir: %v", cur)
|
||||
}
|
||||
@@ -245,10 +270,10 @@ func (p *puller) fixupDirectories() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur.Flags&uint32(os.ModePerm) != uint32(info.Mode()&os.ModePerm) {
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(cur.Flags) && !scanner.PermsEqual(cur.Flags, uint32(info.Mode())) {
|
||||
err := os.Chmod(path, os.FileMode(cur.Flags)&os.ModePerm)
|
||||
if err != nil {
|
||||
l.Warnln("Restoring folder flags: %q: %v", path, err)
|
||||
l.Warnf("Restoring folder flags: %q: %v", path, err)
|
||||
} else {
|
||||
changed++
|
||||
if debug {
|
||||
@@ -261,7 +286,10 @@ func (p *puller) fixupDirectories() {
|
||||
t := time.Unix(cur.Modified, 0)
|
||||
err := os.Chtimes(path, t, t)
|
||||
if err != nil {
|
||||
l.Warnln("Restoring folder modtime: %q: %v", path, err)
|
||||
if runtime.GOOS != "windows" {
|
||||
// https://code.google.com/p/go/issues/detail?id=8090
|
||||
l.Warnf("Restoring folder modtime: %q: %v", path, err)
|
||||
}
|
||||
} else {
|
||||
changed++
|
||||
if debug {
|
||||
@@ -276,7 +304,7 @@ func (p *puller) fixupDirectories() {
|
||||
for {
|
||||
deleteDirs = nil
|
||||
changed = 0
|
||||
filepath.Walk(p.dir, walkFn)
|
||||
filepath.Walk(p.repoCfg.Directory, walkFn)
|
||||
|
||||
var deleted = 0
|
||||
// Delete any queued directories
|
||||
@@ -286,10 +314,10 @@ func (p *puller) fixupDirectories() {
|
||||
l.Debugln("delete dir:", dir)
|
||||
}
|
||||
err := os.Remove(dir)
|
||||
if err != nil {
|
||||
l.Warnln(err)
|
||||
} else {
|
||||
if err == nil {
|
||||
deleted++
|
||||
} else if p.versioner == nil { // Failures are expected in the presence of versioning
|
||||
l.Warnln(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,13 +342,12 @@ func (p *puller) handleRequestResult(res requestResult) {
|
||||
}
|
||||
|
||||
_, of.err = of.file.WriteAt(res.data, res.offset)
|
||||
buffers.Put(res.data)
|
||||
|
||||
of.outstanding--
|
||||
p.openFiles[f.Name] = of
|
||||
|
||||
if debug {
|
||||
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repo, f.Name, res.offset, of.outstanding, of.done)
|
||||
l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repoCfg.ID, f.Name, res.offset, of.outstanding, of.done)
|
||||
}
|
||||
|
||||
if of.done && of.outstanding == 0 {
|
||||
@@ -336,9 +363,9 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
|
||||
// For directories, making sure they exist is enough.
|
||||
// Deleted directories we mark as handled and delete later.
|
||||
if f.Flags&protocol.FlagDirectory != 0 {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
path := filepath.Join(p.dir, f.Name)
|
||||
if protocol.IsDirectory(f.Flags) {
|
||||
if !protocol.IsDeleted(f.Flags) {
|
||||
path := filepath.Join(p.repoCfg.Directory, f.Name)
|
||||
_, err := os.Stat(path)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
if debug {
|
||||
@@ -352,7 +379,30 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
} else if debug {
|
||||
l.Debugf("ignore delete dir: %v", f)
|
||||
}
|
||||
p.model.updateLocal(p.repo, f)
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(b.copy) > 0 && len(b.copy) == len(b.file.Blocks) && b.last {
|
||||
// We are supposed to copy the entire file, and then fetch nothing.
|
||||
// We don't actually need to make the copy.
|
||||
if debug {
|
||||
l.Debugln("taking shortcut:", f)
|
||||
}
|
||||
fp := filepath.Join(p.repoCfg.Directory, f.Name)
|
||||
t := time.Unix(f.Modified, 0)
|
||||
err := os.Chtimes(fp, t, t)
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
||||
err = os.Chmod(fp, os.FileMode(f.Flags&0777))
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -361,12 +411,12 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
|
||||
if !ok {
|
||||
if debug {
|
||||
l.Debugf("pull: %q: opening file %q", p.repo, f.Name)
|
||||
l.Debugf("pull: %q: opening file %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
of.availability = uint64(p.model.repoFiles[p.repo].Availability(f.Name))
|
||||
of.filepath = filepath.Join(p.dir, f.Name)
|
||||
of.temp = filepath.Join(p.dir, defTempNamer.TempName(f.Name))
|
||||
of.availability = uint64(p.model.repoFiles[p.repoCfg.ID].Availability(f.Name))
|
||||
of.filepath = filepath.Join(p.repoCfg.Directory, f.Name)
|
||||
of.temp = filepath.Join(p.repoCfg.Directory, defTempNamer.TempName(f.Name))
|
||||
|
||||
dirName := filepath.Dir(of.filepath)
|
||||
_, err := os.Stat(dirName)
|
||||
@@ -374,26 +424,26 @@ func (p *puller) handleBlock(b bqBlock) bool {
|
||||
err = os.MkdirAll(dirName, 0777)
|
||||
}
|
||||
if err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
|
||||
of.file, of.err = os.Create(of.temp)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
if !b.last {
|
||||
p.openFiles[f.Name] = of
|
||||
}
|
||||
return true
|
||||
}
|
||||
defTempNamer.Hide(of.temp)
|
||||
osutil.HideFile(of.temp)
|
||||
}
|
||||
|
||||
if of.err != nil {
|
||||
// We have already failed this file.
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q has already failed: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q has already failed: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
if b.last {
|
||||
delete(p.openFiles, f.Name)
|
||||
@@ -424,14 +474,14 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
||||
of := p.openFiles[f.Name]
|
||||
|
||||
if debug {
|
||||
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repo, f.Name)
|
||||
l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
var exfd *os.File
|
||||
exfd, of.err = os.Open(of.filepath)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
of.file.Close()
|
||||
of.file = nil
|
||||
@@ -442,15 +492,14 @@ func (p *puller) handleCopyBlock(b bqBlock) {
|
||||
defer exfd.Close()
|
||||
|
||||
for _, b := range b.copy {
|
||||
bs := buffers.Get(int(b.Size))
|
||||
bs := make([]byte, b.Size)
|
||||
_, of.err = exfd.ReadAt(bs, b.Offset)
|
||||
if of.err == nil {
|
||||
_, of.err = of.file.WriteAt(bs, b.Offset)
|
||||
}
|
||||
buffers.Put(bs)
|
||||
if of.err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, of.err)
|
||||
}
|
||||
exfd.Close()
|
||||
of.file.Close()
|
||||
@@ -493,10 +542,10 @@ func (p *puller) handleRequestBlock(b bqBlock) bool {
|
||||
|
||||
go func(node string, b bqBlock) {
|
||||
if debug {
|
||||
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repo, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
||||
l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repoCfg.ID, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
|
||||
}
|
||||
|
||||
bs, err := p.model.requestGlobal(node, p.repo, f.Name, b.block.Offset, int(b.block.Size), nil)
|
||||
bs, err := p.model.requestGlobal(node, p.repoCfg.ID, f.Name, b.block.Offset, int(b.block.Size), nil)
|
||||
p.requestResults <- requestResult{
|
||||
node: node,
|
||||
file: f,
|
||||
@@ -520,31 +569,35 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
||||
}
|
||||
}
|
||||
|
||||
if f.Flags&protocol.FlagDeleted != 0 {
|
||||
if protocol.IsDeleted(f.Flags) {
|
||||
if debug {
|
||||
l.Debugf("pull: delete %q", f.Name)
|
||||
}
|
||||
os.Remove(of.temp)
|
||||
os.Chmod(of.filepath, 0666)
|
||||
if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
if p.versioner != nil {
|
||||
if err := p.versioner.Archive(of.filepath); err == nil {
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
}
|
||||
} else if err := os.Remove(of.filepath); err == nil || os.IsNotExist(err) {
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
}
|
||||
} else {
|
||||
if debug {
|
||||
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repo, f.Name)
|
||||
l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
t := time.Unix(f.Modified, 0)
|
||||
if os.Chtimes(of.temp, t, t) != nil {
|
||||
delete(p.openFiles, f.Name)
|
||||
return
|
||||
}
|
||||
if os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) && os.Chmod(of.temp, os.FileMode(f.Flags&0777)) != nil {
|
||||
delete(p.openFiles, f.Name)
|
||||
return
|
||||
}
|
||||
defTempNamer.Show(of.temp)
|
||||
if Rename(of.temp, of.filepath) == nil {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
osutil.ShowFile(of.temp)
|
||||
if osutil.Rename(of.temp, of.filepath) == nil {
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
}
|
||||
}
|
||||
delete(p.openFiles, f.Name)
|
||||
@@ -552,8 +605,8 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
|
||||
|
||||
func (p *puller) queueNeededBlocks() {
|
||||
queued := 0
|
||||
for _, f := range p.model.NeedFilesRepo(p.repo) {
|
||||
lf := p.model.CurrentRepoFile(p.repo, f.Name)
|
||||
for _, f := range p.model.NeedFilesRepo(p.repoCfg.ID) {
|
||||
lf := p.model.CurrentRepoFile(p.repoCfg.ID, f.Name)
|
||||
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
|
||||
if debug {
|
||||
l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
|
||||
@@ -566,13 +619,13 @@ func (p *puller) queueNeededBlocks() {
|
||||
})
|
||||
}
|
||||
if debug && queued > 0 {
|
||||
l.Debugf("%q: queued %d blocks", p.repo, queued)
|
||||
l.Debugf("%q: queued %d blocks", p.repoCfg.ID, queued)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *puller) closeFile(f scanner.File) {
|
||||
if debug {
|
||||
l.Debugf("pull: closing %q / %q", p.repo, f.Name)
|
||||
l.Debugf("pull: closing %q / %q", p.repoCfg.ID, f.Name)
|
||||
}
|
||||
|
||||
of := p.openFiles[f.Name]
|
||||
@@ -584,7 +637,7 @@ func (p *puller) closeFile(f scanner.File) {
|
||||
fd, err := os.Open(of.temp)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -593,29 +646,49 @@ func (p *puller) closeFile(f scanner.File) {
|
||||
|
||||
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
|
||||
if debug {
|
||||
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
|
||||
l.Debugf("pull: %q / %q: nblocks %d != %d", p.repoCfg.ID, f.Name, l0, l1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := range hb {
|
||||
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
|
||||
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
|
||||
l.Debugf("pull: %q / %q: block %d hash mismatch", p.repoCfg.ID, f.Name, i)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
t := time.Unix(f.Modified, 0)
|
||||
os.Chtimes(of.temp, t, t)
|
||||
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
||||
defTempNamer.Show(of.temp)
|
||||
if debug {
|
||||
l.Debugf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
|
||||
err = os.Chtimes(of.temp, t, t)
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
if err := Rename(of.temp, of.filepath); err == nil {
|
||||
p.model.updateLocal(p.repo, f)
|
||||
if !p.repoCfg.IgnorePerms && protocol.HasPermissionBits(f.Flags) {
|
||||
err = os.Chmod(of.temp, os.FileMode(f.Flags&0777))
|
||||
if debug && err != nil {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
osutil.ShowFile(of.temp)
|
||||
|
||||
if p.versioner != nil {
|
||||
err := p.versioner.Archive(of.filepath)
|
||||
if err != nil {
|
||||
if debug {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("pull: rename %q / %q: %q", p.repoCfg.ID, f.Name, of.filepath)
|
||||
}
|
||||
if err := osutil.Rename(of.temp, of.filepath); err == nil {
|
||||
p.model.updateLocal(p.repoCfg.ID, f)
|
||||
} else {
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
|
||||
l.Debugf("pull: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package model
|
||||
@@ -23,11 +27,3 @@ func (t tempNamer) TempName(name string) string {
|
||||
tname := fmt.Sprintf("%s.%s", t.prefix, filepath.Base(name))
|
||||
return filepath.Join(tdir, tname)
|
||||
}
|
||||
|
||||
func (t tempNamer) Hide(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t tempNamer) Show(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package model
|
||||
@@ -6,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type tempNamer struct {
|
||||
@@ -24,33 +27,3 @@ func (t tempNamer) TempName(name string) string {
|
||||
tname := fmt.Sprintf("%s.%s.tmp", t.prefix, filepath.Base(name))
|
||||
return filepath.Join(tdir, tname)
|
||||
}
|
||||
|
||||
func (t tempNamer) Hide(path string) error {
|
||||
p, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs, err := syscall.GetFileAttributes(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs |= syscall.FILE_ATTRIBUTE_HIDDEN
|
||||
return syscall.SetFileAttributes(p, attrs)
|
||||
}
|
||||
|
||||
func (t tempNamer) Show(path string) error {
|
||||
p, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs, err := syscall.GetFileAttributes(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs &^= syscall.FILE_ATTRIBUTE_HIDDEN
|
||||
return syscall.SetFileAttributes(p, attrs)
|
||||
}
|
||||
|
||||
@@ -1,27 +1,19 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
func Rename(from, to string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
os.Chmod(to, 0666) // Make sure the file is user writeable
|
||||
err := os.Remove(to)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
l.Warnln(err)
|
||||
}
|
||||
}
|
||||
defer os.Remove(from) // Don't leave a dangling temp file in case of rename error
|
||||
return os.Rename(from, to)
|
||||
}
|
||||
|
||||
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
|
||||
var blocks = make([]scanner.Block, len(f.Blocks))
|
||||
var offset int64
|
||||
@@ -95,17 +87,32 @@ func compareClusterConfig(local, remote protocol.ClusterConfigMessage) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return ClusterConfigMismatch(fmt.Errorf("remote is missing repository %q", repo))
|
||||
}
|
||||
}
|
||||
|
||||
for repo := range rm {
|
||||
if _, ok := lm[repo]; !ok {
|
||||
return ClusterConfigMismatch(fmt.Errorf("remote has extra repository %q", repo))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deadlockDetect(mut sync.Locker, timeout time.Duration) {
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(timeout / 4)
|
||||
ok := make(chan bool, 2)
|
||||
|
||||
go func() {
|
||||
mut.Lock()
|
||||
mut.Unlock()
|
||||
ok <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
time.Sleep(timeout)
|
||||
ok <- false
|
||||
}()
|
||||
|
||||
if r := <-ok; !r {
|
||||
panic("deadlock detected")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
@@ -20,24 +24,6 @@ var testcases = []struct {
|
||||
remote: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "foo"},
|
||||
},
|
||||
},
|
||||
remote: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
|
||||
err: `remote is missing repository "foo"`,
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{ClientName: "c", ClientVersion: "d"},
|
||||
remote: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "foo"},
|
||||
},
|
||||
},
|
||||
err: `remote has extra repository "foo"`,
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
@@ -53,38 +39,6 @@ var testcases = []struct {
|
||||
},
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "quux"},
|
||||
{ID: "foo"},
|
||||
{ID: "bar"},
|
||||
},
|
||||
},
|
||||
remote: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "bar"},
|
||||
{ID: "quux"},
|
||||
},
|
||||
},
|
||||
err: `remote is missing repository "foo"`,
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "quux"},
|
||||
{ID: "bar"},
|
||||
},
|
||||
},
|
||||
remote: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
{ID: "bar"},
|
||||
{ID: "foo"},
|
||||
{ID: "quux"},
|
||||
},
|
||||
},
|
||||
err: `remote has extra repository "foo"`,
|
||||
},
|
||||
{
|
||||
local: protocol.ClusterConfigMessage{
|
||||
Repositories: []protocol.Repository{
|
||||
|
||||
15
osutil/hidden_unix.go
Normal file
15
osutil/hidden_unix.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package osutil
|
||||
|
||||
func HideFile(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ShowFile(path string) error {
|
||||
return nil
|
||||
}
|
||||
39
osutil/hidden_windows.go
Normal file
39
osutil/hidden_windows.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package osutil
|
||||
|
||||
import "syscall"
|
||||
|
||||
func HideFile(path string) error {
|
||||
p, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs, err := syscall.GetFileAttributes(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs |= syscall.FILE_ATTRIBUTE_HIDDEN
|
||||
return syscall.SetFileAttributes(p, attrs)
|
||||
}
|
||||
|
||||
func ShowFile(path string) error {
|
||||
p, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs, err := syscall.GetFileAttributes(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attrs &^= syscall.FILE_ATTRIBUTE_HIDDEN
|
||||
return syscall.SetFileAttributes(p, attrs)
|
||||
}
|
||||
22
osutil/osutil.go
Normal file
22
osutil/osutil.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func Rename(from, to string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
os.Chmod(to, 0666) // Make sure the file is user writeable
|
||||
err := os.Remove(to)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer os.Remove(from) // Don't leave a dangling temp file in case of rename error
|
||||
return os.Rename(from, to)
|
||||
}
|
||||
@@ -59,10 +59,11 @@ or certificate pinning combined with some out of band first
|
||||
verification. The reference implementation uses preshared certificate
|
||||
fingerprints (SHA-256) referred to as "Node IDs".
|
||||
|
||||
There is no required order or synchronization among BEP messages - any
|
||||
message type may be sent at any time and the sender need not await a
|
||||
response to one message before sending another. Responses MUST however
|
||||
be sent in the same order as the requests are received.
|
||||
There is no required order or synchronization among BEP messages except
|
||||
as noted per message type - any message type may be sent at any time and
|
||||
the sender need not await a response to one message before sending
|
||||
another. Responses MUST however be sent in the same order as the
|
||||
requests are received.
|
||||
|
||||
The underlying transport protocol MUST be TCP.
|
||||
|
||||
@@ -70,12 +71,13 @@ Messages
|
||||
--------
|
||||
|
||||
Every message starts with one 32 bit word indicating the message
|
||||
version, type and ID.
|
||||
version, type and ID. The header is in network byte order, i.e. big
|
||||
endian.
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Ver | Type | Message ID | Reply To |
|
||||
| Ver | Message ID | Type | Reserved |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
For BEP v1 the Version field is set to zero. Future versions with
|
||||
@@ -84,19 +86,19 @@ with an unknown version is a protocol error and MUST result in the
|
||||
connection being terminated. A client supporting multiple versions MAY
|
||||
retry with a different protocol version upon disconnection.
|
||||
|
||||
The Message ID is set to a unique value for each transmitted request
|
||||
message. In response messages it is set to the Message ID of the
|
||||
corresponding request message. The uniqueness requirement implies that
|
||||
no more than 4096 messages may be outstanding at any given moment. The
|
||||
ordering requirement implies that a response to a given message ID also
|
||||
means that all preceding messages have been received, specifically those
|
||||
which do not otherwise demand a response. Hence their message ID:s may
|
||||
be reused.
|
||||
|
||||
The Type field indicates the type of data following the message header
|
||||
and is one of the integers defined below. A message of an unknown type
|
||||
is a protocol error and MUST result in the connection being terminated.
|
||||
|
||||
The Message ID is set to a unique value for each transmitted message. In
|
||||
request messages the Reply To is set to zero. In response messages it is
|
||||
set to the message ID of the corresponding request. The uniqueness
|
||||
requirement implies that no more than 4096 messages may be outstanding
|
||||
at any given moment. The ordering requirement implies that a response to
|
||||
a given message ID also means that all preceding messages have been
|
||||
received, specifically those which do not otherwise demand a response.
|
||||
Hence their message ID:s may be reused.
|
||||
|
||||
All data following the message header MUST be in XDR (RFC 1014)
|
||||
encoding. All fields shorter than 32 bits and all variable length data
|
||||
MUST be padded to a multiple of 32 bits. The actual data types in use by
|
||||
@@ -117,8 +119,9 @@ normalization form C.
|
||||
### Cluster Config (Type = 0)
|
||||
|
||||
This informational message provides information about the cluster
|
||||
configuration, as it pertains to the current connection. It is sent by
|
||||
both sides after connection establishment.
|
||||
configuration as it pertains to the current connection. A Cluster Config
|
||||
message MUST be the first message sent on a BEP connection. Additional
|
||||
Cluster Config messages MUST NOT be sent after the initial exchange.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
@@ -294,11 +297,12 @@ peers acting in a specific manner as a result of sent options.
|
||||
### Index (Type = 1)
|
||||
|
||||
The Index message defines the contents of the senders repository. An
|
||||
Index message MUST be sent by each node immediately upon connection. A
|
||||
node with no data to advertise MUST send an empty Index message (a file
|
||||
list of zero length). If the repository contents change from non-empty
|
||||
to empty, an empty Index message MUST be sent. There is no response to
|
||||
the Index message.
|
||||
Index message MUST be sent for each repository mentioned in the Cluster
|
||||
Config message. An Index message for a repository MUST be sent before
|
||||
any other message referring to that repository. A node with no data to
|
||||
advertise MUST send an empty Index message (a file list of zero length).
|
||||
If the repository contents change from non-empty to empty, an empty
|
||||
Index message MUST be sent. There is no response to the Index message.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
@@ -388,7 +392,7 @@ The Flags field is made up of the following single bit flags:
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |I|D| Unix Perm. & Mode |
|
||||
| Reserved |P|I|D| Unix Perm. & Mode |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
- The lower 12 bits hold the common Unix permission and mode bits. An
|
||||
@@ -404,7 +408,13 @@ The Flags field is made up of the following single bit flags:
|
||||
synchronization. A peer MAY set this bit to indicate that it can
|
||||
temporarily not serve data for the file.
|
||||
|
||||
- Bit 0 through 17 are reserved for future use and SHALL be set to
|
||||
- Bit 17 ("P") is set when there is no permission information for the
|
||||
file. This is the case when it originates on a non-permission-
|
||||
supporting file system. Changes to only permission bits SHOULD be
|
||||
disregarded on files with this bit set. The permissions bits MUST be
|
||||
set to the octal value 0666.
|
||||
|
||||
- Bit 0 through 16 are reserved for future use and SHALL be set to
|
||||
zero.
|
||||
|
||||
The hash algorithm is implied by the Hash length. Currently, the hash
|
||||
@@ -569,7 +579,7 @@ Message Limits
|
||||
|
||||
An implementation MAY impose reasonable limits on the length of message
|
||||
fields to aid robustness in the face of corruption or broken
|
||||
implementations. These limits, if imposed, SHOULD not be more
|
||||
implementations. These limits, if imposed, SHOULD NOT be more
|
||||
restrictive than the following:
|
||||
|
||||
### Index and Index Update Messages
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
@@ -10,9 +14,15 @@ type countingReader struct {
|
||||
tot uint64
|
||||
}
|
||||
|
||||
var (
|
||||
totalIncoming uint64
|
||||
totalOutgoing uint64
|
||||
)
|
||||
|
||||
func (c *countingReader) Read(bs []byte) (int, error) {
|
||||
n, err := c.Reader.Read(bs)
|
||||
atomic.AddUint64(&c.tot, uint64(n))
|
||||
atomic.AddUint64(&totalIncoming, uint64(n))
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -28,9 +38,14 @@ type countingWriter struct {
|
||||
func (c *countingWriter) Write(bs []byte) (int, error) {
|
||||
n, err := c.Writer.Write(bs)
|
||||
atomic.AddUint64(&c.tot, uint64(n))
|
||||
atomic.AddUint64(&totalOutgoing, uint64(n))
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *countingWriter) Tot() uint64 {
|
||||
return atomic.LoadUint64(&c.tot)
|
||||
}
|
||||
|
||||
func TotalInOut() (uint64, uint64) {
|
||||
return atomic.LoadUint64(&totalIncoming), atomic.LoadUint64(&totalOutgoing)
|
||||
}
|
||||
|
||||
17
protocol/debug.go
Normal file
17
protocol/debug.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/syncthing/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
@@ -1,2 +1,6 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package protocol implements the Block Exchange Protocol.
|
||||
package protocol
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import "github.com/calmh/syncthing/xdr"
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
type IndexMessage struct {
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !windows,!darwin
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -1,24 +1,48 @@
|
||||
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
||||
// Use of this source code is governed by an MIT-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package protocol
|
||||
|
||||
// Windows uses backslashes as file separator
|
||||
// Windows uses backslashes as file separator and disallows a bunch of
|
||||
// characters in the filename
|
||||
|
||||
import "path/filepath"
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var disallowedCharacters = string([]rune{
|
||||
'<', '>', ':', '"', '|', '?', '*',
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
||||
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
||||
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
||||
31,
|
||||
})
|
||||
|
||||
type nativeModel struct {
|
||||
next Model
|
||||
}
|
||||
|
||||
func (m nativeModel) Index(nodeID string, repo string, files []FileInfo) {
|
||||
for i := range files {
|
||||
files[i].Name = filepath.FromSlash(files[i].Name)
|
||||
for i, f := range files {
|
||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||
files[i].Flags |= FlagInvalid
|
||||
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
|
||||
}
|
||||
files[i].Name = filepath.FromSlash(f.Name)
|
||||
}
|
||||
m.next.Index(nodeID, repo, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) IndexUpdate(nodeID string, repo string, files []FileInfo) {
|
||||
for i := range files {
|
||||
for i, f := range files {
|
||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||
files[i].Flags |= FlagInvalid
|
||||
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
|
||||
}
|
||||
files[i].Name = filepath.FromSlash(files[i].Name)
|
||||
}
|
||||
m.next.IndexUpdate(nodeID, repo, files)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user