Compare commits

...

72 Commits

Author SHA1 Message Date
Jakob Borg
5980952495 Actually load index cache again (fixes #45) 2014-01-29 22:02:38 +01:00
Jakob Borg
618c376e18 Synchronize zero sized files (fixes #44) 2014-01-29 21:52:27 +01:00
Jakob Borg
d31a126408 CONTRIBUTING.md 2014-01-28 19:10:39 +01:00
Jakob Borg
6d3f8a2c06 Parallell -> parallel (ref #13) 2014-01-26 16:48:20 +01:00
Jakob Borg
b1ba976122 Move auto generated source to a package 2014-01-26 15:02:06 +01:00
Jakob Borg
81d5d1d4a6 Rework config/flags (fixes #13) 2014-01-26 14:45:03 +01:00
Jakob Borg
ea5ef28c5a Performance: improve need computation 2014-01-23 22:20:15 +01:00
Jakob Borg
fc2ebc6cad Performance: make filequeue not suck 2014-01-23 16:39:12 +01:00
Jakob Borg
01096fff6c Add version info to GUI (fixes #41) 2014-01-23 13:13:15 +01:00
Jakob Borg
2ea3558283 Add Options message to protocol 2014-01-23 13:12:45 +01:00
Jakob Borg
20a47695fb Create syncthing.ini template (fixes #39) 2014-01-22 14:28:14 +01:00
Jakob Borg
1dde9ec2d8 New file change suppression algorithm (fixes #30) 2014-01-22 12:52:27 +01:00
Jakob Borg
0841a46055 Don't crash on invalid options 2014-01-22 12:52:15 +01:00
Jakob Borg
84c0749d20 Slightly more compact GUI resources 2014-01-20 23:17:57 +01:00
Jakob Borg
6b02f9e44f Fix GUI files modtime (ish...) 2014-01-20 23:08:29 +01:00
Jakob Borg
84d7452f9e Use embed instead of nrsc, enables 'go get' 2014-01-20 23:01:38 +01:00
Jakob Borg
9b449cb527 Fix windows build (fixes #38) 2014-01-20 23:00:49 +01:00
Jakob Borg
d9ffd359e2 Tweak locking and integration test. 2014-01-20 22:22:27 +01:00
Jakob Borg
b67443eb40 Integration test 2014-01-20 07:38:57 +01:00
Jakob Borg
4ac204b604 Fine grained locking 2014-01-20 07:38:48 +01:00
Jakob Borg
fff50b5472 Delete deadlock 2014-01-14 17:47:27 -07:00
Jakob Borg
8d5aed410f Clear availability for disconnected node 2014-01-13 11:22:57 -07:00
Jakob Borg
ba0e4ded65 Deadlock fix and cleanups 2014-01-13 10:29:23 -07:00
Jakob Borg
f0b18685a5 Show 'this node' in GUI 2014-01-12 15:19:03 -07:00
Jakob Borg
fc2b557ae6 Don't print help twice 2014-01-12 14:47:04 -07:00
Jakob Borg
af399ae9f3 Cleanup ignore tests 2014-01-12 11:10:15 -07:00
Jakob Borg
45fcf4bc84 Implement new puller routine (fixes #33) 2014-01-12 11:02:16 -07:00
Jakob Borg
55f61ccb5e Simple send rate limit 2014-01-12 10:19:22 -07:00
Jakob Borg
b601fc5627 Don't build with CPU usage on Solaris 2014-01-10 15:32:30 +01:00
Jakob Borg
832c0ffad0 Report CPU/mem usage in GUI 2014-01-10 00:12:32 +01:00
Jakob Borg
cb33f27f23 Woops: reignore .stignore 2014-01-09 23:00:42 +01:00
Jakob Borg
92dee7c082 Only fetch deps, don't build 2014-01-09 23:00:23 +01:00
Jakob Borg
b9af45bc6b Prepopulate ignore patterns (fixes #21) 2014-01-09 22:46:01 +01:00
Jakob Borg
a18f6c6d90 Do go get as part of build unless fast build requested (fixes #31) 2014-01-09 21:22:05 +01:00
Jakob Borg
6e11e3cda9 Build for Linux on ARM (fixes #32) 2014-01-09 21:17:41 +01:00
Jakob Borg
2935aebe53 Benchmarking 2014-01-09 14:11:55 +01:00
Jakob Borg
71f78f0d62 Future proofing: handle file records with unknown flags 2014-01-09 11:04:42 +01:00
Jakob Borg
3e1194e5ff Show web GUI address on startup (fixes #27) 2014-01-09 10:40:12 +01:00
Jakob Borg
6d64992e64 Display alert on GUI connection error (fixes #26) 2014-01-09 10:31:27 +01:00
Jakob Borg
211180108e Tweak TLS settings (ref #23) 2014-01-09 09:30:22 +01:00
Jakob Borg
17e78d6f7e Option to show version (fixes #24) 2014-01-08 14:37:33 +01:00
Jakob Borg
1ef86379fb Actually send index updates for version bumps 2014-01-08 14:21:47 +01:00
Jakob Borg
884a7d6a1b Default to running GUI on 127.0.0.1:8080 2014-01-08 13:56:29 +01:00
Jakob Borg
334961fe10 Footer with links 2014-01-08 13:52:17 +01:00
Jakob Borg
2cfb24892f Add version and invalid bit to protocol 2014-01-07 22:44:21 +01:00
Jakob Borg
d4fe1400d2 Longer RSA key and stronger node ID hash (ref #23) 2014-01-07 22:04:30 +01:00
Jakob Borg
69ef4d261d Unbreak build script 2014-01-07 17:07:46 +01:00
Jakob Borg
91c102e4fe Syncronize file mode (fixes #20) 2014-01-07 16:38:07 +01:00
Jakob Borg
b4db177045 Allow deletes per default (fixes #19) 2014-01-07 16:15:18 +01:00
Jakob Borg
340c9095dd Suppress frequent changes to files (fixes #12) 2014-01-07 16:10:38 +01:00
Jakob Borg
e3bc33dc88 Move binary to build destination 2014-01-07 12:14:50 +01:00
Jakob Borg
eebc145055 Point to the wiki for documentation (fixes #10) 2014-01-07 12:07:56 +01:00
Jakob Borg
92b01fa48a Build tar file for current OS/architecture 2014-01-07 11:52:42 +01:00
Jakob Borg
2a0d1ab294 Merge pull request #18 from jpjp/patch-1
synch -> sync
2014-01-07 02:33:55 -08:00
jpjp
2bdab426ff synch -> sync 2014-01-06 22:27:57 +01:00
Jakob Borg
e769de9986 Announce read/write/delete status on startup (fixes #14) 2014-01-06 21:41:29 +01:00
Jakob Borg
4b11e66914 Verify requests against model (fixes #15) 2014-01-06 21:31:36 +01:00
Jakob Borg
28d3936a3c Add .stignore to testdata 2014-01-06 21:31:20 +01:00
Jakob Borg
986b15573a Ignore files matching patterns in .stignore (fixes #7) 2014-01-06 21:17:18 +01:00
Jakob Borg
46d828e349 Expand tilde in repository path (fixes #8) 2014-01-06 19:37:26 +01:00
Jakob Borg
48603a1619 Do quick build even on test failure 2014-01-06 15:50:15 +01:00
Jakob Borg
17d5f2bbfc Fix model test 2014-01-06 15:49:51 +01:00
Jakob Borg
b64af73607 Dead code removal 2014-01-06 12:45:43 +01:00
Jakob Borg
c9cce9613e Refactor out the model into a subpackage 2014-01-06 11:11:18 +01:00
Jakob Borg
1392905d63 Link to issues 2014-01-06 08:19:33 +01:00
Jakob Borg
271d7eedc4 Better progress bar calculation 2014-01-06 06:38:01 +01:00
Jakob Borg
ab8482a424 Add --trace-need 2014-01-06 06:12:40 +01:00
Jakob Borg
c8a14d1c3d Refactor how --delete affects things 2014-01-06 06:12:33 +01:00
Jakob Borg
8974c33f2f Move build artefacts dir 2014-01-06 06:11:19 +01:00
Jakob Borg
ed675a61d7 Prettify need table 2014-01-06 06:10:53 +01:00
Jakob Borg
60b00af0bb Workaround for bug in Solaris compiler 2014-01-06 05:57:41 +01:00
Jakob Borg
0ceddc4fa3 Redirect / to index.html 2014-01-06 00:05:07 +01:00
51 changed files with 3789 additions and 10204 deletions

2
.gitignore vendored
View File

@@ -1,3 +1,3 @@
syncthing
*.tar.gz
build
dist

22
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,22 @@
Please do contribute!
## Building
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
## Tests
Yes please!
## Style
`go fmt`
## Documentation
[Hack it here](https://github.com/calmh/syncthing/wiki)
## License
MIT

160
README.md
View File

@@ -25,163 +25,11 @@ making sure large swarms of selfish agents behave and somehow work
towards a common goal. Here we have a much smaller swarm of cooperative
agents and a simpler approach will suffice.
Features
--------
Documentation
=============
The following features are _currently implemented and working_:
* The formation of a cluster of nodes, certificate authenticated and
communicating over TLS over TCP.
* Synchronization of a single directory among the cluster nodes.
* Change detection by periodic scanning of the local repository.
* Static configuration of cluster nodes.
* Automatic discovery of cluster nodes. See [discover.go][discover.go]
for the protocol specification. Discovery on the LAN is performed by
broadcasts, Internet wide discovery is performed with the assistance
of a global server.
* Handling of deleted files. Deletes can be propagated or ignored per
client.
* Synchronizing multiple unrelated directory trees by following
symlinks directly below the repository level.
* HTTP GUI.
The following features are _not yet implemented but planned_:
* Change detection by listening to file system notifications instead of
periodic scanning.
The following features are _not implemented but may be implemented_ in
the future:
* Syncing multiple directories from the same syncthing instance.
* Automatic NAT handling via UPNP.
* Conflict resolution. Currently whichever file has the newest
modification time "wins". The correct behavior in the face of
conflicts is open for discussion.
[discover.go]: https://github.com/calmh/syncthing/blob/master/discover/discover.go
Security
--------
Security is one of the primary project goals. This means that it should
not be possible for an attacker to join a cluster uninvited, and it
should not be possible to extract private information from intercepted
traffic. Currently this is implemented as follows.
All traffic is protected by TLS. To prevent uninvited nodes from joining
a cluster, the certificate fingerprint of each node is compared to a
preset list of acceptable nodes at connection establishment. The
fingerprint is computed as the SHA-1 hash of the certificate and
displayed in BASE32 encoding to form a compact yet convenient string.
Currently SHA-1 is deemed secure against preimage attacks.
Installing
==========
Download the appropriate precompiled binary from the
[releases](https://github.com/calmh/syncthing/releases) page. Untar and
put the `syncthing` binary somewhere convenient in your `$PATH`.
If you are a developer and have Go 1.2 installed you can also install
the latest version from source. `go get` works as expected but builds
a binary without GUI capabilities. Use the included `build.sh` script
without parameters to build a syncthing with GUI.
Usage
=====
Check out the options:
```
$ syncthing --help
Usage:
syncthing [options]
...
```
Run syncthing to let it create it's config directory and certificate:
```
$ syncthing
11:34:13 main.go:85: INFO: Version v0.1-40-gbb0fd87
11:34:13 tls.go:61: OK: Created TLS certificate file
11:34:13 tls.go:67: OK: Created TLS key file
11:34:13 main.go:66: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
11:34:13 main.go:90: FATAL: No config file
```
Take note of the "My ID: ..." line. Perform the same operation on
another computer to create another node. Take note of that ID as well,
and create a config file `~/.syncthing/syncthing.ini` looking something
like this:
```
[repository]
dir = /Users/jb/Synced
[nodes]
NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q = 172.16.32.1:22000 192.23.34.56:22000
CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL = dynamic
```
This assumes that the first node is reachable on either of the two
addresses listed (perhaps one internal and one port-forwarded external)
and that the other node is not normally reachable from the outside. Save
this config file, identically, to both nodes.
If the nodes are running on the same network, or reachable on port 22000
from the outside world, you can set all addresses to "dynamic" and they
will find each other using automatic discovery. (This discovery,
including port numbers, can be tweaked or disabled using command line
options.)
Start syncthing on both nodes. For the cautious, one side can be set to
be read only.
```
$ syncthing --ro
13:30:55 main.go:85: INFO: Version v0.1-40-gbb0fd87
13:30:55 main.go:102: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
13:30:55 main.go:149: INFO: Initial repository scan in progress
13:30:59 main.go:153: INFO: Listening for incoming connections
13:30:59 main.go:157: INFO: Attempting to connect to other nodes
13:30:59 main.go:247: INFO: Starting local discovery
13:30:59 main.go:165: OK: Ready to synchronize
13:31:04 discover.go:113: INFO: Discovered node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL at 172.16.32.24:22000
13:31:14 main.go:296: INFO: Connected to node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL
13:31:19 main.go:345: INFO: Transferred 139 KiB in (14 KiB/s), 139 KiB out (14 KiB/s)
13:32:20 model.go:94: INFO: CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL: 263.4 KB/s in, 69.1 KB/s out
13:32:20 model.go:104: INFO: 18289 files, 24.24 GB in cluster
13:32:20 model.go:111: INFO: 17132 files, 22.39 GB in local repo
13:32:20 model.go:117: INFO: 1157 files, 1.84 GB to synchronize
...
```
You should see the synchronization start and then finish a short while
later. Add nodes to taste.
GUI
---
The web based GUI is disabled per default. To enable and access it you
must start syncthing with the `--gui` command line option, giving a
listen address. For example:
```
$ syncthing --gui 127.0.0.1:8080
```
You then point your browser to the given address.
The syncthing documentation is kept on the
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
License
=======

5
auto/gui.files.go Normal file
View File

File diff suppressed because one or more lines are too long

View File

@@ -1,15 +1,27 @@
#!/bin/bash
version=$(git describe --always)
buildDir=dist
if [[ $fast != yes ]] ; then
go get -d
go test ./...
fi
if [[ -z $1 ]] ; then
go build -ldflags "-X main.Version $version"
elif [[ $1 == "embed" ]] ; then
embedder auto gui > auto/gui.files.go \
&& go build -ldflags "-X main.Version $version"
elif [[ $1 == "tar" ]] ; then
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui
else
go test ./... || exit 1
rm -rf build
mkdir -p build || exit 1
&& mkdir syncthing-dist \
&& cp syncthing README.md LICENSE syncthing-dist \
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
&& rm -rf syncthing-dist
elif [[ $1 == "all" ]] ; then
rm -rf "$buildDir"
mkdir -p "$buildDir" || exit 1
for goos in darwin linux freebsd ; do
for goarch in amd64 386 ; do
@@ -18,16 +30,34 @@ else
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui \
&& mkdir -p "$name" \
&& cp syncthing "build/$name" \
&& cp syncthing "$buildDir/$name" \
&& cp README.md LICENSE "$name" \
&& mv syncthing "$name" \
&& tar zcf "build/$name.tar.gz" "$name" \
&& tar zcf "$buildDir/$name.tar.gz" "$name" \
&& rm -r "$name"
done
done
for goos in linux ; do
for goarm in 5 6 7 ; do
for goarch in arm ; do
echo "$goos-${goarch}v$goarm"
export GOARM="$goarm"
export GOOS="$goos"
export GOARCH="$goarch"
export name="syncthing-$goos-${goarch}v$goarm"
go build -ldflags "-X main.Version $version" \
&& mkdir -p "$name" \
&& cp syncthing "$buildDir/$name" \
&& cp README.md LICENSE "$name" \
&& mv syncthing "$name" \
&& tar zcf "$buildDir/$name.tar.gz" "$name" \
&& rm -r "$name"
done
done
done
for goos in windows ; do
for goarch in amd64 386 ; do
echo "$goos-$goarch"
@@ -35,11 +65,11 @@ else
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing.exe gui \
&& mkdir -p "$name" \
&& cp syncthing.exe "build/$name.exe" \
&& cp syncthing.exe "$buildDir/$name.exe" \
&& cp README.md LICENSE "$name" \
&& zip -qr "build/$name.zip" "$name" \
&& mv syncthing.exe "$name" \
&& zip -qr "$buildDir/$name.zip" "$name" \
&& rm -r "$name"
done
done

129
config.go Normal file
View File

@@ -0,0 +1,129 @@
package main
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"text/template"
"time"
)
type Options struct {
Listen string `ini:"listen-address" default:":22000" description:"ip:port to for incoming sync connections"`
ReadOnly bool `ini:"read-only" description:"Allow changes to the local repository"`
Delete bool `ini:"allow-delete" default:"true" description:"Allow deletes of files in the local repository"`
Symlinks bool `ini:"follow-symlinks" default:"true" description:"Follow symbolic links at the top level of the repository"`
GUI bool `ini:"gui-enabled" default:"true" description:"Enable the HTTP GUI"`
GUIAddr string `ini:"gui-address" default:"127.0.0.1:8080" description:"ip:port for GUI connections"`
ExternalServer string `ini:"global-announce-server" default:"syncthing.nym.se:22025" description:"Global server for announcements"`
ExternalDiscovery bool `ini:"global-announce-enabled" default:"true" description:"Announce to the global announce server"`
LocalDiscovery bool `ini:"local-announce-enabled" default:"true" description:"Announce to the local network"`
ParallelRequests int `ini:"parallel-requests" default:"16" description:"Maximum number of blocks to request in parallel"`
LimitRate int `ini:"max-send-kbps" description:"Limit outgoing data rate (kbyte/s)"`
ScanInterval time.Duration `ini:"rescan-interval" default:"60s" description:"Scan repository for changes this often"`
ConnInterval time.Duration `ini:"reconnection-interval" default:"60s" description:"Attempt to (re)connect to peers this often"`
MaxChangeBW int `ini:"max-change-bw" default:"1000" description:"Suppress files changing more than this (kbyte/s)"`
}
func loadConfig(m map[string]string, data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
name := tag.Get("ini")
if len(name) == 0 {
name = strings.ToLower(t.Field(i).Name)
}
v, ok := m[name]
if !ok {
v = tag.Get("default")
}
if len(v) > 0 {
switch f.Interface().(type) {
case time.Duration:
d, err := time.ParseDuration(v)
if err != nil {
return err
}
f.SetInt(int64(d))
case string:
f.SetString(v)
case int:
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
f.SetInt(i)
case bool:
f.SetBool(v == "true")
default:
panic(f.Type())
}
}
}
return nil
}
type cfg struct {
Key string
Value string
Comment string
}
func structToValues(data interface{}) []cfg {
s := reflect.ValueOf(data).Elem()
t := s.Type()
var vals []cfg
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
var c cfg
c.Key = tag.Get("ini")
if len(c.Key) == 0 {
c.Key = strings.ToLower(t.Field(i).Name)
}
c.Value = fmt.Sprint(f.Interface())
c.Comment = tag.Get("description")
vals = append(vals, c)
}
return vals
}
var configTemplateStr = `[repository]
{{if .comments}}; The directory to synchronize. Will be created if it does not exist.
{{end}}dir = {{.dir}}
[nodes]
{{if .comments}}; Map of node ID to addresses, or "dynamic" for automatic discovery. Examples:
; J3MZ4G5O4CLHJKB25WX47K5NUJUWDOLO2TTNY3TV3NRU4HVQRKEQ = 172.16.32.24:22000
; ZNJZRXQKYHF56A2VVNESRZ6AY4ZOWGFJCV6FXDZJUTRVR3SNBT6Q = dynamic
{{end}}{{range $n, $a := .nodes}}{{$n}} = {{$a}}
{{end}}
[settings]
{{range $v := .settings}}; {{$v.Comment}}
{{$v.Key}} = {{$v.Value}}
{{end}}
`
var configTemplate = template.Must(template.New("config").Parse(configTemplateStr))
func writeConfig(wr io.Writer, dir string, nodes map[string]string, opts Options, comments bool) {
configTemplate.Execute(wr, map[string]interface{}{
"dir": dir,
"nodes": nodes,
"settings": structToValues(&opts),
"comments": comments,
})
}

View File

@@ -100,7 +100,6 @@ type Discoverer struct {
MyID string
ListenPort int
BroadcastIntv time.Duration
ExtListenPort int
ExtBroadcastIntv time.Duration
conn *net.UDPConn
@@ -114,7 +113,7 @@ type Discoverer struct {
// When we hit this many errors in succession, we stop.
const maxErrors = 30
func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discoverer, error) {
func NewDiscoverer(id string, port int, extServer string) (*Discoverer, error) {
local4 := &net.UDPAddr{IP: net.IP{0, 0, 0, 0}, Port: AnnouncementPort}
conn, err := net.ListenUDP("udp4", local4)
if err != nil {
@@ -125,7 +124,6 @@ func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discove
MyID: id,
ListenPort: port,
BroadcastIntv: 30 * time.Second,
ExtListenPort: extPort,
ExtBroadcastIntv: 1800 * time.Second,
conn: conn,
@@ -138,7 +136,7 @@ func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discove
if disc.ListenPort > 0 {
disc.sendAnnouncements()
}
if len(disc.extServer) > 0 && disc.ExtListenPort > 0 {
if len(disc.extServer) > 0 {
disc.sendExtAnnouncements()
}
@@ -153,13 +151,13 @@ func (d *Discoverer) sendAnnouncements() {
}
func (d *Discoverer) sendExtAnnouncements() {
extIP, err := net.ResolveUDPAddr("udp", d.extServer+":22025")
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
log.Printf("discover/external: %v; no external announcements", err)
return
}
buf := EncodePacket(Packet{AnnouncementMagic, uint16(d.ExtListenPort), d.MyID, nil})
buf := EncodePacket(Packet{AnnouncementMagic, uint16(22000), d.MyID, nil})
go d.writeAnnouncements(buf, extIP, d.ExtBroadcastIntv)
}
@@ -213,7 +211,7 @@ func (d *Discoverer) recvAnnouncements() {
}
func (d *Discoverer) externalLookup(node string) (string, bool) {
extIP, err := net.ResolveUDPAddr("udp", d.extServer+":22025")
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
log.Printf("discover/external: %v; no external lookup", err)
return "", false

113
gui.go
View File

@@ -3,43 +3,69 @@ package main
import (
"encoding/json"
"fmt"
"io"
"log"
"mime"
"net/http"
"path/filepath"
"bitbucket.org/tebeka/nrsc"
"runtime"
"sync"
"time"
"github.com/calmh/syncthing/auto"
"github.com/calmh/syncthing/model"
"github.com/codegangsta/martini"
"github.com/cratonica/embed"
)
func startGUI(addr string, m *Model) {
func startGUI(addr string, m *model.Model) {
router := martini.NewRouter()
router.Get("/", getRoot)
router.Get("/rest/version", restGetVersion)
router.Get("/rest/model", restGetModel)
router.Get("/rest/connections", restGetConnections)
router.Get("/rest/config", restGetConfig)
router.Get("/rest/need", restGetNeed)
router.Get("/rest/system", restGetSystem)
fs, err := embed.Unpack(auto.Resources)
if err != nil {
panic(err)
}
go func() {
mr := martini.New()
mr.Use(nrscStatic("gui"))
mr.Use(embeddedStatic(fs))
mr.Use(martini.Recovery())
mr.Action(router.Handle)
mr.Map(m)
http.ListenAndServe(addr, mr)
err := http.ListenAndServe(addr, mr)
if err != nil {
warnln("GUI not possible:", err)
}
}()
}
func getRoot(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/index.html", 302)
}
func restGetVersion() string {
return Version
}
func restGetModel(m *Model, w http.ResponseWriter) {
func restGetModel(m *model.Model, w http.ResponseWriter) {
var res = make(map[string]interface{})
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = m.GlobalSize()
res["localFiles"], res["localDeleted"], res["localBytes"] = m.LocalSize()
globalFiles, globalDeleted, globalBytes := m.GlobalSize()
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
localFiles, localDeleted, localBytes := m.LocalSize()
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
inSyncFiles, inSyncBytes := m.InSyncSize()
res["inSyncFiles"], res["inSyncBytes"] = inSyncFiles, inSyncBytes
files, total := m.NeedFiles()
res["needFiles"], res["needBytes"] = len(files), total
@@ -47,7 +73,7 @@ func restGetModel(m *Model, w http.ResponseWriter) {
json.NewEncoder(w).Encode(res)
}
func restGetConnections(m *Model, w http.ResponseWriter) {
func restGetConnections(m *model.Model, w http.ResponseWriter) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
@@ -55,52 +81,77 @@ func restGetConnections(m *Model, w http.ResponseWriter) {
func restGetConfig(w http.ResponseWriter) {
var res = make(map[string]interface{})
res["myID"] = myID
res["repository"] = config.OptionMap("repository")
res["nodes"] = config.OptionMap("nodes")
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
func restGetNeed(m *Model, w http.ResponseWriter) {
files, _ := m.NeedFiles()
if files == nil {
// We don't want the empty list to serialize as "null\n"
files = make([]FileInfo, 0)
type guiFile model.File
func (f guiFile) MarshalJSON() ([]byte, error) {
type t struct {
Name string
Size int
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(files)
return json.Marshal(t{
Name: f.Name,
Size: model.File(f).Size(),
})
}
func nrscStatic(path string) interface{} {
if err := nrsc.Initialize(); err != nil {
panic("Unable to initialize nrsc: " + err.Error())
func restGetNeed(m *model.Model, w http.ResponseWriter) {
files, _ := m.NeedFiles()
gfs := make([]guiFile, len(files))
for i, f := range files {
gfs[i] = guiFile(f)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(gfs)
}
var cpuUsagePercent float64
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
res := make(map[string]interface{})
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys
cpuUsageLock.RLock()
res["cpuPercent"] = cpuUsagePercent
cpuUsageLock.RUnlock()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
func embeddedStatic(fs map[string][]byte) interface{} {
var modt = time.Now().UTC().Format(http.TimeFormat)
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
file := req.URL.Path
// nrsc expects there not to be a leading slash
if file[0] == '/' {
file = file[1:]
}
f := nrsc.Get(file)
if f == nil {
bs, ok := fs[file]
if !ok {
return
}
rdr, err := f.Open()
if err != nil {
http.Error(res, "Internal Server Error", http.StatusInternalServerError)
}
defer rdr.Close()
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
if len(mtype) != 0 {
res.Header().Set("Content-Type", mtype)
}
res.Header().Set("Content-Size", fmt.Sprintf("%d", f.Size()))
res.Header().Set("Last-Modified", f.ModTime().UTC().Format(http.TimeFormat))
res.Header().Set("Content-Size", fmt.Sprintf("%d", len(bs)))
res.Header().Set("Last-Modified", modt)
io.Copy(res, rdr)
res.Write(bs)
}
}

View File

@@ -1,18 +1,39 @@
var syncthing = angular.module('syncthing', []);
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
var prevDate = 0;
var modelGetOK = true;
function modelGetSucceeded() {
if (!modelGetOK) {
$('#networkError').modal('hide');
modelGetOK = true;
}
}
function modelGetFailed() {
if (modelGetOK) {
$('#networkError').modal({backdrop: 'static', keyboard: false});
modelGetOK = false;
}
}
$http.get("/rest/version").success(function (data) {
$scope.version = data;
});
$http.get("/rest/config").success(function (data) {
$scope.config = data;
});
var prevDate = 0;
$scope.refresh = function () {
$http.get("/rest/system").success(function (data) {
$scope.system = data;
});
$http.get("/rest/model").success(function (data) {
$scope.model = data;
modelGetSucceeded();
}).error(function () {
modelGetFailed();
});
$http.get("/rest/connections").success(function (data) {
var now = Date.now();
@@ -21,17 +42,30 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
for (var id in data) {
try {
data[id].inbps = 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td;
data[id].outbps = 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td;
data[id].inbps = Math.max(0, 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td);
data[id].outbps = Math.max(0, 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td);
} catch (e) {
data[id].inbps = 0;
data[id].outbps = 0;
}
}
$scope.connections = data;
});
$http.get("/rest/need").success(function (data) {
var i, name;
for (i = 0; i < data.length; i++) {
name = data[i].Name.split("/");
data[i].ShortName = name[name.length-1];
}
data.sort(function (a, b) {
if (a.ShortName < b.ShortName) {
return -1;
}
if (a.ShortName > b.ShortName) {
return 1;
}
return 0;
});
$scope.need = data;
});
};
@@ -40,16 +74,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
setInterval($scope.refresh, 10000);
});
function decimals(num) {
if (num > 100) {
return 0;
}
if (num > 10) {
return 1;
}
return 2;
function decimals(val, num) {
if (val === 0) { return 0; }
var digits = Math.floor(Math.log(Math.abs(val))/Math.log(10));
var decimals = Math.max(0, num - digits);
return decimals;
}
syncthing.filter('natural', function() {
return function(input, valid) {
return input.toFixed(decimals(input, valid));
}
});
syncthing.filter('binary', function() {
return function(input) {
if (input === undefined) {
@@ -57,15 +94,15 @@ syncthing.filter('binary', function() {
}
if (input > 1024 * 1024 * 1024) {
input /= 1024 * 1024 * 1024;
return input.toFixed(decimals(input)) + ' Gi';
return input.toFixed(decimals(input, 2)) + ' Gi';
}
if (input > 1024 * 1024) {
input /= 1024 * 1024;
return input.toFixed(decimals(input)) + ' Mi';
return input.toFixed(decimals(input, 2)) + ' Mi';
}
if (input > 1024) {
input /= 1024;
return input.toFixed(decimals(input)) + ' Ki';
return input.toFixed(decimals(input, 2)) + ' Ki';
}
return Math.round(input) + ' ';
}
@@ -78,15 +115,15 @@ syncthing.filter('metric', function() {
}
if (input > 1000 * 1000 * 1000) {
input /= 1000 * 1000 * 1000;
return input.toFixed(decimals(input)) + ' G';
return input.toFixed(decimals(input, 2)) + ' G';
}
if (input > 1000 * 1000) {
input /= 1000 * 1000;
return input.toFixed(decimals(input)) + ' M';
return input.toFixed(decimals(input, 2)) + ' M';
}
if (input > 1000) {
input /= 1000;
return input.toFixed(decimals(input)) + ' k';
return input.toFixed(decimals(input, 2)) + ' k';
}
return Math.round(input) + ' ';
}

View File

@@ -1,397 +0,0 @@
/*!
* Bootstrap v3.0.3 (http://getbootstrap.com)
* Copyright 2013 Twitter, Inc.
* Licensed under http://www.apache.org/licenses/LICENSE-2.0
*/
.btn-default,
.btn-primary,
.btn-success,
.btn-info,
.btn-warning,
.btn-danger {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
}
.btn-default:active,
.btn-primary:active,
.btn-success:active,
.btn-info:active,
.btn-warning:active,
.btn-danger:active,
.btn-default.active,
.btn-primary.active,
.btn-success.active,
.btn-info.active,
.btn-warning.active,
.btn-danger.active {
-webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
}
.btn:active,
.btn.active {
background-image: none;
}
.btn-default {
text-shadow: 0 1px 0 #fff;
background-image: -webkit-linear-gradient(top, #ffffff 0%, #e0e0e0 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #e0e0e0 100%);
background-repeat: repeat-x;
border-color: #dbdbdb;
border-color: #ccc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-default:hover,
.btn-default:focus {
background-color: #e0e0e0;
background-position: 0 -15px;
}
.btn-default:active,
.btn-default.active {
background-color: #e0e0e0;
border-color: #dbdbdb;
}
.btn-primary {
background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
background-repeat: repeat-x;
border-color: #2b669a;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-primary:hover,
.btn-primary:focus {
background-color: #2d6ca2;
background-position: 0 -15px;
}
.btn-primary:active,
.btn-primary.active {
background-color: #2d6ca2;
border-color: #2b669a;
}
.btn-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
background-repeat: repeat-x;
border-color: #3e8f3e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-success:hover,
.btn-success:focus {
background-color: #419641;
background-position: 0 -15px;
}
.btn-success:active,
.btn-success.active {
background-color: #419641;
border-color: #3e8f3e;
}
.btn-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
background-repeat: repeat-x;
border-color: #e38d13;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-warning:hover,
.btn-warning:focus {
background-color: #eb9316;
background-position: 0 -15px;
}
.btn-warning:active,
.btn-warning.active {
background-color: #eb9316;
border-color: #e38d13;
}
.btn-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
background-repeat: repeat-x;
border-color: #b92c28;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-danger:hover,
.btn-danger:focus {
background-color: #c12e2a;
background-position: 0 -15px;
}
.btn-danger:active,
.btn-danger.active {
background-color: #c12e2a;
border-color: #b92c28;
}
.btn-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
background-repeat: repeat-x;
border-color: #28a4c9;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-info:hover,
.btn-info:focus {
background-color: #2aabd2;
background-position: 0 -15px;
}
.btn-info:active,
.btn-info.active {
background-color: #2aabd2;
border-color: #28a4c9;
}
.thumbnail,
.img-thumbnail {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.dropdown-menu > li > a:hover,
.dropdown-menu > li > a:focus {
background-color: #e8e8e8;
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.dropdown-menu > .active > a,
.dropdown-menu > .active > a:hover,
.dropdown-menu > .active > a:focus {
background-color: #357ebd;
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.navbar-default {
background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);
background-repeat: repeat-x;
border-radius: 4px;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
}
.navbar-default .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
}
.navbar-brand,
.navbar-nav > li > a {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);
}
.navbar-inverse {
background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222222 100%);
background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.navbar-inverse .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #222222 0%, #282828 100%);
background-image: linear-gradient(to bottom, #222222 0%, #282828 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
}
.navbar-inverse .navbar-brand,
.navbar-inverse .navbar-nav > li > a {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
}
.navbar-static-top,
.navbar-fixed-top,
.navbar-fixed-bottom {
border-radius: 0;
}
.alert {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
}
.alert-success {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
background-repeat: repeat-x;
border-color: #b2dba1;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
}
.alert-info {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
background-repeat: repeat-x;
border-color: #9acfea;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
}
.alert-warning {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
background-repeat: repeat-x;
border-color: #f5e79e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
}
.alert-danger {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
background-repeat: repeat-x;
border-color: #dca7a7;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
}
.progress {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
}
.progress-bar {
background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
}
.progress-bar-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
}
.progress-bar-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
}
.progress-bar-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
}
.progress-bar-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
}
.list-group {
border-radius: 4px;
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.list-group-item.active,
.list-group-item.active:hover,
.list-group-item.active:focus {
text-shadow: 0 -1px 0 #3071a9;
background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
background-repeat: repeat-x;
border-color: #3278b3;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
}
.panel {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.panel-default > .panel-heading {
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.panel-primary > .panel-heading {
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.panel-success > .panel-heading {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
}
.panel-info > .panel-heading {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
}
.panel-warning > .panel-heading {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
}
.panel-danger > .panel-heading {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
}
.well {
background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
background-repeat: repeat-x;
border-color: #dcdcdc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
-webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
}

View File

File diff suppressed because one or more lines are too long

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -9,11 +9,22 @@
<link rel="shortcut icon" href="../../docs-assets/ico/favicon.png">
<title>syncthing</title>
<link href="bootstrap/css/bootstrap.css" rel="stylesheet">
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
<style type="text/css">
body {
html, body {
height: 100%;
}
#wrap{
padding-top: 20px;
padding-bottom: 20px;
min-height: 100%;
height: auto;
margin: 0 auto -50px;
padding: 20px 0 50px 0;
}
#footer {
height: 50px;
padding: 12px;
background-color: #f5f5f5;
}
.header {
@@ -28,72 +39,131 @@ body {
</head>
<body ng-controller="SyncthingCtrl">
<div class="container">
<div class="header">
<h3 class="text-muted">syncthing&emsp;<small>|</small>&emsp;<small>{{version}}</small></h3>
</div>
<div class="row">
<div class="col-md-12">
<h2>Synchronization</h2>
<div class="progress">
<div class="progress-bar" ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100" style="width: {{100 * model.localFiles / (model.localFiles + model.needFiles) | number:2}}%;"></div>
</div>
<p class="pull-right">{{100 * model.localFiles / (model.localFiles + model.needFiles) | number:2}}%</p>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
<div id="wrap">
<div class="container">
<div class="header">
<h3 class="text-muted">syncthing</h3>
</div>
</div>
<div class="row">
<div class="col-md-6">
<h1>Repository Status</h1>
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
<div ng-show="model.needFiles > 0">
<h2>Files to Synchronize</h2>
<table class="table table-condesed table-striped">
<tr ng-repeat="file in need track by $index">
<td>{{file.Name}}</td>
<td>{{file.Size | binary}}B</td>
</tr>
</table>
<div class="row">
<div class="col-md-12">
<div class="panel" ng-class="{'panel-success': model.needBytes === 0, 'panel-primary': model.needBytes !== 0}">
<div class="panel-heading"><h3 class="panel-title">Synchronization</h3></div>
<div class="panel-body">
<div class="progress">
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
{{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
</div>
</div>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
</div>
</div>
</div>
</div>
<div class="col-md-6">
<h1>Cluster Status</h1>
<table class="table table-condensed">
<tbody>
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node]}">
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
<td>
<span ng-show="!!connections[node]">
<span class="glyphicon glyphicon-link"></span>
{{connections[node].Address}}
</span>
<span ng-hide="!!connections[node]">
<span class="glyphicon glyphicon-cog"></span>
{{address}}
</span>
</td>
<td class="text-right">
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
</td>
<td class="text-right">
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
</td>
</tr>
</tbody>
</table>
<div class="row">
<div class="col-md-6">
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Repository</h3></div>
<div class="panel-body">
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
</div>
</div>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">System</h3></div>
<div class="panel-body">
<p>{{system.sys | binary}}B RAM allocated, {{system.alloc | binary}}B in use</p>
<p>{{system.cpuPercent | alwaysNumber | natural:1}}% CPU, {{system.goroutines | alwaysNumber}} goroutines</p>
</div>
</div>
<div ng-show="model.needFiles > 0">
<h2>Files to Synchronize</h2>
<table class="table table-condensed table-striped">
<tr ng-repeat="file in need track by $index">
<td><abbr title="{{file.Name}}">{{file.ShortName}}</abbr></td>
<td class="text-right">{{file.Size | binary}}B</td>
</tr>
</table>
</div>
</div>
<div class="col-md-6">
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Cluster</h3></div>
<table class="table table-condensed">
<tbody>
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node], 'text-muted': node == config.myID}">
<td><span class="text-monospace">{{node | short}}</span></td>
<td>
<span ng-show="node != config.myID">{{connections[node].ClientVersion}}</span>
<span ng-show="node == config.myID">{{version}}</span>
</td>
<td>
<span ng-show="node == config.myID">
<span class="glyphicon glyphicon-ok"></span>
(this node)
</span>
<span ng-show="node != config.myID && !!connections[node]">
<span class="glyphicon glyphicon-link"></span>
{{connections[node].Address}}
</span>
<span ng-show="node != config.myID && !connections[node]">
<span class="glyphicon glyphicon-cog"></span>
{{address}}
</span>
</td>
<td class="text-right">
<span ng-show="node != config.myID">
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
</span>
</td>
<td class="text-right">
<span ng-show="node != config.myID">
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
</span>
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
<div id="footer" class="text-center">
syncthing {{version}}
| <a href="https://github.com/calmh/syncthing/releases">Latest Release</a>
| <a href="https://github.com/calmh/syncthing/wiki">Documentation</a>
| <a href="https://github.com/calmh/syncthing/issues">Bugs</a>
| <a href="https://github.com/calmh/syncthing">Source Code</a>
</div>
<div id="networkError" class="modal fade">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header alert alert-danger">
<h4 class="modal-title">
<span class="glyphicon glyphicon-exclamation-sign"></span>
Connection Error
</h4>
</div>
<div class="modal-body">
<p>
Syncthing seems to be down, or there is a problem with your Internet connection.
Retrying&hellip;
</p>
</div>
</div>
</div>
</div>
<script src="angular.min.js"></script>

31
gui_unix.go Normal file
View File

@@ -0,0 +1,31 @@
//+build !windows,!solaris
package main
import (
"syscall"
"time"
)
func init() {
go trackCPUUsage()
}
func trackCPUUsage() {
var prevUsage int64
var prevTime = time.Now().UnixNano()
var rusage syscall.Rusage
for {
time.Sleep(10 * time.Second)
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
curTime := time.Now().UnixNano()
timeDiff := curTime - prevTime
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
usageDiff := curUsage - prevUsage
cpuUsageLock.Lock()
cpuUsagePercent = 100 * float64(usageDiff) / float64(timeDiff)
cpuUsageLock.Unlock()
prevTime = curTime
prevUsage = curUsage
}
}

5
integration/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
files-*
conf-*
md5-*
genfiles
md5r

42
integration/genfiles.go Normal file
View File

@@ -0,0 +1,42 @@
package main
import (
"crypto/rand"
"flag"
"fmt"
"io/ioutil"
mr "math/rand"
"os"
"path"
)
func name() string {
var b [16]byte
rand.Reader.Read(b[:])
return fmt.Sprintf("%x", b[:])
}
func main() {
var files int
var maxexp int
flag.IntVar(&files, "files", 1000, "Number of files")
flag.IntVar(&maxexp, "maxexp", 20, "Maximum file size (max = 2^n + 128*1024 B)")
flag.Parse()
for i := 0; i < files; i++ {
n := name()
p0 := path.Join(string(n[0]), n[0:2])
os.MkdirAll(p0, 0755)
s := 1 << uint(mr.Intn(maxexp))
a := 128 * 1024
if a > s {
a = s
}
s += mr.Intn(a)
b := make([]byte, s)
rand.Reader.Read(b)
p1 := path.Join(p0, n)
ioutil.WriteFile(p1, b, 0644)
}
}

59
integration/md5r.go Normal file
View File

@@ -0,0 +1,59 @@
package main
import (
"crypto/md5"
"flag"
"fmt"
"io"
"os"
"path/filepath"
)
func main() {
flag.Parse()
args := flag.Args()
if len(args) == 0 {
args = []string{"."}
}
for _, path := range args {
err := filepath.Walk(path, walker)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
}
func walker(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
sum, err := md5file(path)
if err != nil {
return err
}
fmt.Printf("%s %s\n", sum, path)
}
return nil
}
func md5file(fname string) (hash string, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := md5.New()
io.Copy(h, f)
hb := h.Sum(nil)
hash = fmt.Sprintf("%x", hb)
return
}

74
integration/test.sh Executable file
View File

@@ -0,0 +1,74 @@
#!/bin/bash
rm -rf files-* conf-* md5-*
extraopts=""
p=$(pwd)
go build genfiles.go
go build md5r.go
echo "Setting up (keys)..."
i1=$(syncthing --home conf-1 2>&1 | awk '/My ID/ {print $7}')
echo $i1
i2=$(syncthing --home conf-2 2>&1 | awk '/My ID/ {print $7}')
echo $i2
i3=$(syncthing --home conf-3 2>&1 | awk '/My ID/ {print $7}')
echo $i3
echo "Setting up (files)..."
for i in 1 2 3 ; do
cat >conf-$i/syncthing.ini <<EOT
[repository]
dir = $p/files-$i
[nodes]
$i1 = 127.0.0.1:22001
$i2 = 127.0.0.1:22002
$i3 = 127.0.0.1:22003
[settings]
gui-enabled = false
listen-address = :2200$i
EOT
mkdir files-$i
pushd files-$i >/dev/null
../genfiles -maxexp 21 -files 400
touch empty-$i
../md5r > ../md5-$i
popd >/dev/null
done
echo "Starting..."
for i in 1 2 3 ; do
sleep 1
syncthing --home conf-$i $extraopts &
done
cat md5-* | sort > md5-tot
while true ; do
read
echo Verifying...
conv=0
for i in 1 2 3 ; do
pushd files-$i >/dev/null
../md5r | sort > ../md5-$i
popd >/dev/null
if ! cmp md5-$i md5-tot >/dev/null ; then
echo $i unconverged
else
conv=$((conv + 1))
echo $i converged
fi
done
if [[ $conv == 3 ]] ; then
kill %1
kill %2
kill %3
exit
fi
done

View File

@@ -6,7 +6,8 @@ import (
"os"
)
var logger = log.New(os.Stderr, "", log.Ltime)
// set in main()
var logger *log.Logger
func debugln(vals ...interface{}) {
s := fmt.Sprintln(vals...)

289
main.go
View File

@@ -2,8 +2,8 @@ package main
import (
"compress/gzip"
"crypto/sha1"
"crypto/tls"
"flag"
"fmt"
"log"
"net"
@@ -11,95 +11,131 @@ import (
_ "net/http/pprof"
"os"
"path"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/calmh/ini"
"github.com/calmh/syncthing/discover"
flags "github.com/calmh/syncthing/github.com/jessevdk/go-flags"
"github.com/calmh/syncthing/model"
"github.com/calmh/syncthing/protocol"
)
type Options struct {
ConfDir string `short:"c" long:"cfg" description:"Configuration directory" default:"~/.syncthing" value-name:"DIR"`
Listen string `short:"l" long:"listen" description:"Listen address" default:":22000" value-name:"ADDR"`
ReadOnly bool `short:"r" long:"ro" description:"Repository is read only"`
Delete bool `short:"d" long:"delete" description:"Delete files deleted from cluster"`
Rehash bool `long:"rehash" description:"Ignore cache and rehash all files in repository"`
NoSymlinks bool `long:"no-symlinks" description:"Don't follow first level symlinks in the repo"`
NoStats bool `long:"no-stats" description:"Don't print model and connection statistics"`
GUIAddr string `long:"gui" description:"GUI listen address" default:"" value-name:"ADDR"`
Discovery DiscoveryOptions `group:"Discovery Options"`
Advanced AdvancedOptions `group:"Advanced Options"`
Debug DebugOptions `group:"Debugging Options"`
}
type DebugOptions struct {
LogSource bool `long:"log-source"`
TraceFile bool `long:"trace-file"`
TraceNet bool `long:"trace-net"`
TraceIdx bool `long:"trace-idx"`
Profiler string `long:"profiler" value-name:"ADDR"`
}
type DiscoveryOptions struct {
ExternalServer string `long:"ext-server" description:"External discovery server" value-name:"NAME" default:"syncthing.nym.se"`
ExternalPort int `short:"e" long:"ext-port" description:"External listen port" value-name:"PORT" default:"22000"`
NoExternalDiscovery bool `short:"n" long:"no-ext-announce" description:"Do not announce presence externally"`
NoLocalDiscovery bool `short:"N" long:"no-local-announce" description:"Do not announce presence locally"`
}
type AdvancedOptions struct {
RequestsInFlight int `long:"reqs-in-flight" description:"Parallell in flight requests per file" default:"4" value-name:"REQS"`
FilesInFlight int `long:"files-in-flight" description:"Parallell in flight file pulls" default:"8" value-name:"FILES"`
ScanInterval time.Duration `long:"scan-intv" description:"Repository scan interval" default:"60s" value-name:"INTV"`
ConnInterval time.Duration `long:"conn-intv" description:"Node reconnect interval" default:"60s" value-name:"INTV"`
}
var opts Options
var Version string = "unknown-dev"
const (
confDirName = ".syncthing"
confFileName = "syncthing.ini"
)
var (
myID string
config ini.Config
nodeAddrs = make(map[string][]string)
)
var (
showVersion bool
showConfig bool
confDir string
trace string
profiler string
)
func main() {
_, err := flags.Parse(&opts)
if err != nil {
log.SetOutput(os.Stderr)
logger = log.New(os.Stderr, "", log.Flags())
flag.StringVar(&confDir, "home", "~/.syncthing", "Set configuration directory")
flag.BoolVar(&showConfig, "config", false, "Print current configuration")
flag.StringVar(&trace, "debug.trace", "", "(connect,net,idx,file,pull)")
flag.StringVar(&profiler, "debug.profiler", "", "(addr)")
flag.BoolVar(&showVersion, "version", false, "Show version")
flag.Usage = usageFor(flag.CommandLine, "syncthing [options]")
flag.Parse()
if showVersion {
fmt.Println(Version)
os.Exit(0)
}
if opts.Debug.TraceFile || opts.Debug.TraceIdx || opts.Debug.TraceNet || opts.Debug.LogSource {
logger = log.New(os.Stderr, "", log.Lshortfile|log.Ldate|log.Ltime|log.Lmicroseconds)
}
if strings.HasPrefix(opts.ConfDir, "~/") {
opts.ConfDir = strings.Replace(opts.ConfDir, "~", getHomeDir(), 1)
if len(os.Getenv("GOGC")) == 0 {
debug.SetGCPercent(25)
}
infoln("Version", Version)
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if len(trace) > 0 {
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
logger.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
}
confDir = expandTilde(confDir)
// Ensure that our home directory exists and that we have a certificate and key.
ensureDir(opts.ConfDir, 0700)
cert, err := loadCert(opts.ConfDir)
ensureDir(confDir, 0700)
cert, err := loadCert(confDir)
if err != nil {
newCertificate(opts.ConfDir)
cert, err = loadCert(opts.ConfDir)
newCertificate(confDir)
cert, err = loadCert(confDir)
fatalErr(err)
}
myID := string(certId(cert.Certificate[0]))
myID = string(certId(cert.Certificate[0]))
log.SetPrefix("[" + myID[0:5] + "] ")
logger.SetPrefix("[" + myID[0:5] + "] ")
// Load the configuration file, if it exists.
// If it does not, create a template.
cfgFile := path.Join(confDir, confFileName)
cf, err := os.Open(cfgFile)
if err != nil {
infoln("My ID:", myID)
infoln("No config file; creating a template")
loadConfig(nil, &opts) //loads defaults
fd, err := os.Create(cfgFile)
if err != nil {
fatalln(err)
}
writeConfig(fd, "~/Sync", map[string]string{myID: "dynamic"}, opts, true)
fd.Close()
infof("Edit %s to suit and restart syncthing.", cfgFile)
os.Exit(0)
}
config = ini.Parse(cf)
cf.Close()
loadConfig(config.OptionMap("settings"), &opts)
if showConfig {
writeConfig(os.Stdout,
config.Get("repository", "dir"),
config.OptionMap("nodes"), opts, false)
os.Exit(0)
}
infoln("Version", Version)
infoln("My ID:", myID)
if opts.Debug.Profiler != "" {
var dir = expandTilde(config.Get("repository", "dir"))
if len(dir) == 0 {
fatalln("No repository directory. Set dir under [repository] in syncthing.ini.")
}
if len(profiler) > 0 {
go func() {
err := http.ListenAndServe(opts.Debug.Profiler, nil)
err := http.ListenAndServe(profiler, nil)
if err != nil {
warnln(err)
}
@@ -110,25 +146,15 @@ func main() {
// connections.
cfg := &tls.Config{
ClientAuth: tls.RequestClientCert,
ServerName: "syncthing",
NextProtos: []string{"bep/1.0"},
InsecureSkipVerify: true,
Certificates: []tls.Certificate{cert},
Certificates: []tls.Certificate{cert},
NextProtos: []string{"bep/1.0"},
ServerName: myID,
ClientAuth: tls.RequestClientCert,
SessionTicketsDisabled: true,
InsecureSkipVerify: true,
MinVersion: tls.VersionTLS12,
}
// Load the configuration file, if it exists.
cf, err := os.Open(path.Join(opts.ConfDir, confFileName))
if err != nil {
fatalln("No config file")
config = ini.Config{}
}
config = ini.Parse(cf)
cf.Close()
var dir = config.Get("repository", "dir")
// Create a map of desired node connections based on the configuration file
// directives.
@@ -138,21 +164,34 @@ func main() {
}
ensureDir(dir, -1)
m := NewModel(dir)
m := model.NewModel(dir, opts.MaxChangeBW*1000)
for _, t := range strings.Split(trace, ",") {
m.Trace(t)
}
if opts.LimitRate > 0 {
m.LimitRate(opts.LimitRate)
}
// GUI
if opts.GUIAddr != "" {
startGUI(opts.GUIAddr, m)
if opts.GUI && opts.GUIAddr != "" {
host, port, err := net.SplitHostPort(opts.GUIAddr)
if err != nil {
warnf("Cannot start GUI on %q: %v", opts.GUIAddr, err)
} else {
if len(host) > 0 {
infof("Starting web GUI on http://%s", opts.GUIAddr)
} else {
infof("Starting web GUI on port %s", port)
}
startGUI(opts.GUIAddr, m)
}
}
// Walk the repository and update the local model before establishing any
// connections to other nodes.
if !opts.Rehash {
infoln("Loading index cache")
loadIndex(m)
}
infoln("Populating repository index")
loadIndex(m)
updateLocalModel(m)
// Routine to listen for incoming connections
@@ -166,32 +205,37 @@ func main() {
// Routine to pull blocks from other nodes to synchronize the local
// repository. Does not run when we are in read only (publish only) mode.
if !opts.ReadOnly {
infoln("Cleaning out incomplete synchronizations")
CleanTempFiles(dir)
okln("Ready to synchronize")
m.Start()
if opts.Delete {
infoln("Deletes from peer nodes are allowed")
} else {
infoln("Deletes from peer nodes will be ignored")
}
okln("Ready to synchronize (read-write)")
m.StartRW(opts.Delete, opts.ParallelRequests)
} else {
okln("Ready to synchronize (read only; no external updates accepted)")
}
// Periodically scan the repository and update the local model.
// XXX: Should use some fsnotify mechanism.
go func() {
for {
time.Sleep(opts.Advanced.ScanInterval)
updateLocalModel(m)
time.Sleep(opts.ScanInterval)
if m.LocalAge() > opts.ScanInterval.Seconds()/2 {
updateLocalModel(m)
}
}
}()
if !opts.NoStats {
// Periodically print statistics
go printStatsLoop(m)
}
// Periodically print statistics
go printStatsLoop(m)
select {}
}
func printStatsLoop(m *Model) {
func printStatsLoop(m *model.Model) {
var lastUpdated int64
var lastStats = make(map[string]ConnectionInfo)
var lastStats = make(map[string]model.ConnectionInfo)
for {
time.Sleep(60 * time.Second)
@@ -202,7 +246,7 @@ func printStatsLoop(m *Model) {
outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs)
if inbps+outbps > 0 {
infof("%s: %sb/s in, %sb/s out", node, MetricPrefix(inbps), MetricPrefix(outbps))
infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(inbps), MetricPrefix(outbps))
}
lastStats[node] = stats
@@ -215,15 +259,20 @@ func printStatsLoop(m *Model) {
files, _, bytes = m.LocalSize()
infof("%6d files, %9sB in local repo", files, BinaryPrefix(bytes))
needFiles, bytes := m.NeedFiles()
infof("%6d files, %9sB in to synchronize", len(needFiles), BinaryPrefix(bytes))
infof("%6d files, %9sB to synchronize", len(needFiles), BinaryPrefix(bytes))
}
}
}
func listen(myID string, addr string, m *Model, cfg *tls.Config) {
func listen(myID string, addr string, m *model.Model, cfg *tls.Config) {
l, err := tls.Listen("tcp", addr, cfg)
fatalErr(err)
connOpts := map[string]string{
"clientId": "syncthing",
"clientVersion": Version,
}
listen:
for {
conn, err := l.Accept()
@@ -232,7 +281,7 @@ listen:
continue
}
if opts.Debug.TraceNet {
if strings.Contains(trace, "connect") {
debugln("NET: Connect from", conn.RemoteAddr())
}
@@ -258,7 +307,8 @@ listen:
for nodeID := range nodeAddrs {
if nodeID == remoteID {
m.AddConnection(conn, remoteID)
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
m.AddConnection(conn, protoConn)
continue listen
}
}
@@ -266,29 +316,34 @@ listen:
}
}
func connect(myID string, addr string, nodeAddrs map[string][]string, m *Model, cfg *tls.Config) {
func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.Model, cfg *tls.Config) {
_, portstr, err := net.SplitHostPort(addr)
fatalErr(err)
port, _ := strconv.Atoi(portstr)
if opts.Discovery.NoLocalDiscovery {
if !opts.LocalDiscovery {
port = -1
} else {
infoln("Sending local discovery announcements")
}
if opts.Discovery.NoExternalDiscovery {
opts.Discovery.ExternalPort = -1
if !opts.ExternalDiscovery {
opts.ExternalServer = ""
} else {
infoln("Sending external discovery announcements")
}
disc, err := discover.NewDiscoverer(myID, port, opts.Discovery.ExternalPort, opts.Discovery.ExternalServer)
disc, err := discover.NewDiscoverer(myID, port, opts.ExternalServer)
if err != nil {
warnf("No discovery possible (%v)", err)
}
connOpts := map[string]string{
"clientId": "syncthing",
"clientVersion": Version,
}
for {
nextNode:
for nodeID, addrs := range nodeAddrs {
@@ -309,12 +364,12 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *Model,
}
}
if opts.Debug.TraceNet {
if strings.Contains(trace, "connect") {
debugln("NET: Dial", nodeID, addr)
}
conn, err := tls.Dial("tcp", addr, cfg)
if err != nil {
if opts.Debug.TraceNet {
if strings.Contains(trace, "connect") {
debugln("NET:", err)
}
continue
@@ -327,24 +382,25 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *Model,
continue
}
m.AddConnection(conn, remoteID)
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
m.AddConnection(conn, protoConn)
continue nextNode
}
}
time.Sleep(opts.Advanced.ConnInterval)
time.Sleep(opts.ConnInterval)
}
}
func updateLocalModel(m *Model) {
files := Walk(m.Dir(), m, !opts.NoSymlinks)
func updateLocalModel(m *model.Model) {
files, _ := m.Walk(opts.Symlinks)
m.ReplaceLocal(files)
saveIndex(m)
}
func saveIndex(m *Model) {
name := fmt.Sprintf("%x.idx.gz", sha1.Sum([]byte(m.Dir())))
fullName := path.Join(opts.ConfDir, name)
func saveIndex(m *model.Model) {
name := m.RepoID() + ".idx.gz"
fullName := path.Join(confDir, name)
idxf, err := os.Create(fullName + ".tmp")
if err != nil {
return
@@ -358,9 +414,9 @@ func saveIndex(m *Model) {
os.Rename(fullName+".tmp", fullName)
}
func loadIndex(m *Model) {
fname := fmt.Sprintf("%x.idx.gz", sha1.Sum([]byte(m.Dir())))
idxf, err := os.Open(path.Join(opts.ConfDir, fname))
func loadIndex(m *model.Model) {
name := m.RepoID() + ".idx.gz"
idxf, err := os.Open(path.Join(confDir, name))
if err != nil {
return
}
@@ -376,7 +432,7 @@ func loadIndex(m *Model) {
if err != nil {
return
}
m.SeedIndex(idx)
m.SeedLocal(idx)
}
func ensureDir(dir string, mode int) {
@@ -390,6 +446,13 @@ func ensureDir(dir string, mode int) {
}
}
func expandTilde(p string) string {
if strings.HasPrefix(p, "~/") {
return strings.Replace(p, "~", getHomeDir(), 1)
}
return p
}
func getHomeDir() string {
home := os.Getenv("HOME")
if home == "" {

548
model.go
View File

@@ -1,548 +0,0 @@
package main
/*
Locking
=======
The model has read and write locks. These must be acquired as appropriate by
public methods. To prevent deadlock situations, private methods should never
acquire locks, but document what locks they require.
*/
import (
"fmt"
"io"
"net"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
)
type Model struct {
sync.RWMutex
dir string
global map[string]File // the latest version of each file as it exists in the cluster
local map[string]File // the files we currently have locally on disk
remote map[string]map[string]File
need map[string]bool // the files we need to update
nodes map[string]*protocol.Connection
rawConn map[string]io.ReadWriteCloser
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
lastIdxBcast time.Time
lastIdxBcastRequest time.Time
}
const (
FlagDeleted = 1 << 12
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
)
func NewModel(dir string) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
need: make(map[string]bool),
nodes: make(map[string]*protocol.Connection),
rawConn: make(map[string]io.ReadWriteCloser),
lastIdxBcast: time.Now(),
}
go m.broadcastIndexLoop()
return m
}
func (m *Model) Start() {
go m.puller()
}
func (m *Model) Generation() int64 {
m.RLock()
defer m.RUnlock()
return m.updatedLocal + m.updateGlobal
}
type ConnectionInfo struct {
protocol.Statistics
Address string
}
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.RLock()
defer m.RUnlock()
var res = make(map[string]ConnectionInfo)
for node, conn := range m.nodes {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
}
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
res[node] = ci
}
return res
}
func (m *Model) GlobalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
for _, f := range m.global {
if f.Flags&FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
return
}
func (m *Model) LocalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
for _, f := range m.local {
if f.Flags&FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
return
}
type FileInfo struct {
Name string
Size int
}
func (m *Model) NeedFiles() (files []FileInfo, bytes int) {
m.RLock()
defer m.RUnlock()
for n := range m.need {
f := m.global[n]
s := f.Size()
files = append(files, FileInfo{f.Name, s})
bytes += s
}
return
}
// Index is called when a new node is connected and we receive their full index.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
if opts.Debug.TraceNet {
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
m.remote[nodeID] = make(map[string]File)
for _, f := range fs {
if f.Flags&FlagDeleted != 0 && !opts.Delete {
// Files marked as deleted do not even enter the model
continue
}
m.remote[nodeID][f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
// IndexUpdate is called for incremental updates to connected nodes' indexes.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
if opts.Debug.TraceNet {
debugf("NET IDXUP(in): %s: %d files", nodeID, len(fs))
}
repo, ok := m.remote[nodeID]
if !ok {
return
}
for _, f := range fs {
if f.Flags&FlagDeleted != 0 && !opts.Delete {
// Files marked as deleted do not even enter the model
continue
}
repo[f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
// SeedIndex is called when our previously cached index is loaded from disk at startup.
func (m *Model) SeedIndex(fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
m.local = make(map[string]File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
func (m *Model) Close(node string, err error) {
m.Lock()
defer m.Unlock()
conn, ok := m.rawConn[node]
if ok {
conn.Close()
} else {
warnln("Close on unknown connection for node", node)
}
if err != nil {
warnf("Disconnected from node %s: %v", node, err)
} else {
infoln("Disconnected from node", node)
}
delete(m.remote, node)
delete(m.nodes, node)
delete(m.rawConn, node)
m.recomputeGlobal()
m.recomputeNeed()
}
func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
if opts.Debug.TraceNet && nodeID != "<local>" {
debugf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
fn := path.Join(m.dir, name)
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer fd.Close()
buf := buffers.Get(int(size))
_, err = fd.ReadAt(buf, int64(offset))
if err != nil {
return nil, err
}
return buf, nil
}
func (m *Model) RequestGlobal(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
m.RLock()
nc, ok := m.nodes[nodeID]
m.RUnlock()
if !ok {
return nil, fmt.Errorf("RequestGlobal: no such node: %s", nodeID)
}
if opts.Debug.TraceNet {
debugf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
return nc.Request(name, offset, size, hash)
}
func (m *Model) ReplaceLocal(fs []File) {
m.Lock()
defer m.Unlock()
var updated bool
var newLocal = make(map[string]File)
for _, f := range fs {
newLocal[f.Name] = f
if ef := m.local[f.Name]; ef.Modified != f.Modified {
updated = true
}
}
if m.markDeletedLocals(newLocal) {
updated = true
}
if len(newLocal) != len(m.local) {
updated = true
}
if updated {
m.local = newLocal
m.recomputeGlobal()
m.recomputeNeed()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
}
}
func (m *Model) broadcastIndexLoop() {
for {
m.RLock()
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
m.RUnlock()
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
m.Lock()
var indexWg sync.WaitGroup
indexWg.Add(len(m.nodes))
idx := m.protocolIndex()
m.lastIdxBcast = time.Now()
for _, node := range m.nodes {
node := node
if opts.Debug.TraceNet {
debugf("NET IDX(out/loop): %s: %d files", node.ID, len(idx))
}
go func() {
node.Index(idx)
indexWg.Done()
}()
}
m.Unlock()
indexWg.Wait()
}
time.Sleep(idxBcastHoldtime)
}
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
// Must be called with the write lock held.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
// note the file as having been deleted.
var updated bool
for n, f := range m.local {
if _, ok := newLocal[n]; !ok {
if gf := m.global[n]; gf.Modified <= f.Modified {
if f.Flags&FlagDeleted == 0 {
f.Flags = FlagDeleted
f.Modified = f.Modified + 1
f.Blocks = nil
updated = true
}
newLocal[n] = f
}
}
}
return updated
}
func (m *Model) UpdateLocal(f File) {
m.Lock()
defer m.Unlock()
if ef, ok := m.local[f.Name]; !ok || ef.Modified != f.Modified {
m.local[f.Name] = f
m.recomputeGlobal()
m.recomputeNeed()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
}
}
func (m *Model) Dir() string {
m.RLock()
defer m.RUnlock()
return m.dir
}
func (m *Model) HaveFiles() []File {
m.RLock()
defer m.RUnlock()
var files []File
for _, file := range m.local {
files = append(files, file)
}
return files
}
func (m *Model) LocalFile(name string) (File, bool) {
m.RLock()
defer m.RUnlock()
f, ok := m.local[name]
return f, ok
}
func (m *Model) GlobalFile(name string) (File, bool) {
m.RLock()
defer m.RUnlock()
f, ok := m.global[name]
return f, ok
}
// Must be called with the write lock held.
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
for n, f := range m.local {
newGlobal[n] = f
}
for _, fs := range m.remote {
for n, f := range fs {
if cf, ok := newGlobal[n]; !ok || cf.Modified < f.Modified {
newGlobal[n] = f
}
}
}
// Figure out if anything actually changed
var updated bool
if len(newGlobal) != len(m.global) {
updated = true
} else {
for n, f0 := range newGlobal {
if f1, ok := m.global[n]; !ok || f0.Modified != f1.Modified {
updated = true
break
}
}
}
if updated {
m.updateGlobal = time.Now().Unix()
m.global = newGlobal
}
}
// Must be called with the write lock held.
func (m *Model) recomputeNeed() {
m.need = make(map[string]bool)
for n, f := range m.global {
hf, ok := m.local[n]
if !ok || f.Modified > hf.Modified {
m.need[n] = true
}
}
}
// Must be called with the read lock held.
func (m *Model) whoHas(name string) []string {
var remote []string
gf := m.global[name]
for node, files := range m.remote {
if file, ok := files[name]; ok && file.Modified == gf.Modified {
remote = append(remote, node)
}
}
return remote
}
func (m *Model) ConnectedTo(nodeID string) bool {
m.RLock()
defer m.RUnlock()
_, ok := m.nodes[nodeID]
return ok
}
func (m *Model) ProtocolIndex() []protocol.FileInfo {
m.RLock()
defer m.RUnlock()
return m.protocolIndex()
}
// Must be called with the read lock held.
func (m *Model) protocolIndex() []protocol.FileInfo {
var index []protocol.FileInfo
for _, f := range m.local {
mf := fileInfoFromFile(f)
if opts.Debug.TraceIdx {
var flagComment string
if mf.Flags&FlagDeleted != 0 {
flagComment = " (deleted)"
}
debugf("IDX: %q m=%d f=%o%s (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, len(mf.Blocks))
}
index = append(index, mf)
}
return index
}
func (m *Model) AddConnection(conn io.ReadWriteCloser, nodeID string) {
node := protocol.NewConnection(nodeID, conn, conn, m)
m.Lock()
m.nodes[nodeID] = node
m.rawConn[nodeID] = conn
m.Unlock()
infoln("Connected to node", nodeID)
m.RLock()
idx := m.protocolIndex()
m.RUnlock()
go func() {
node.Index(idx)
infoln("Sent initial index to node", nodeID)
}()
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks []Block
var offset uint64
for _, b := range f.Blocks {
blocks = append(blocks, Block{
Offset: offset,
Length: b.Length,
Hash: b.Hash,
})
offset += uint64(b.Length)
}
return File{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Blocks: blocks,
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
var blocks []protocol.BlockInfo
for _, b := range f.Blocks {
blocks = append(blocks, protocol.BlockInfo{
Length: b.Length,
Hash: b.Hash,
})
}
return protocol.FileInfo{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Blocks: blocks,
}
}

View File

@@ -1,4 +1,4 @@
package main
package model
import (
"bytes"
@@ -6,18 +6,16 @@ import (
"io"
)
type BlockList []Block
type Block struct {
Offset uint64
Length uint32
Offset int64
Size uint32
Hash []byte
}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) (BlockList, error) {
var blocks BlockList
var offset uint64
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset int64
for {
lr := &io.LimitedReader{r, int64(blocksize)}
hf := sha256.New()
@@ -32,19 +30,28 @@ func Blocks(r io.Reader, blocksize int) (BlockList, error) {
b := Block{
Offset: offset,
Length: uint32(n),
Size: uint32(n),
Hash: hf.Sum(nil),
}
blocks = append(blocks, b)
offset += uint64(n)
offset += int64(n)
}
if len(blocks) == 0 {
// Empty file
blocks = append(blocks, Block{
Offset: 0,
Size: 0,
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
})
}
return blocks, nil
}
// To returns the list of blocks necessary to transform src into dst.
// Both block lists must have been created with the same block size.
func (src BlockList) To(tgt BlockList) (have, need BlockList) {
// BlockDiff returns lists of common and missing (to transform src into tgt)
// blocks. Both block lists must have been created with the same block size.
func BlockDiff(src, tgt []Block) (have, need []Block) {
if len(tgt) == 0 && len(src) != 0 {
return nil, nil
}

View File

@@ -1,4 +1,4 @@
package main
package model
import (
"bytes"
@@ -11,7 +11,8 @@ var blocksTestData = []struct {
blocksize int
hash []string
}{
{[]byte(""), 1024, []string{}},
{[]byte(""), 1024, []string{
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
{[]byte("contents"), 1024, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 9, []string{
@@ -52,7 +53,7 @@ func TestBlocks(t *testing.T) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
} else {
i := 0
for off := uint64(0); off < uint64(len(test.data)); off += uint64(test.blocksize) {
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
}
@@ -61,8 +62,8 @@ func TestBlocks(t *testing.T) {
if rem := len(test.data) - int(off); bs > rem {
bs = rem
}
if int(blocks[i].Length) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Length, bs)
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
@@ -86,7 +87,7 @@ var diffTestData = []struct {
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
{"contents", "contants", 3, []Block{{3, 3, nil}}},
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
{"contents", "", 3, nil},
{"contents", "", 3, []Block{{0, 0, nil}}},
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
{"contents", "con", 3, nil},
@@ -98,7 +99,7 @@ func TestDiff(t *testing.T) {
for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
_, d := a.To(b)
_, d := BlockDiff(a, b)
if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
} else {
@@ -106,8 +107,8 @@ func TestDiff(t *testing.T) {
if d[j].Offset != test.d[j].Offset {
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
}
if d[j].Length != test.d[j].Length {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Length, test.d[j].Length)
if d[j].Size != test.d[j].Size {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
}
}
}

173
model/filemonitor.go Normal file
View File

@@ -0,0 +1,173 @@
package model
import (
"bytes"
"errors"
"fmt"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
)
type fileMonitor struct {
name string // in-repo name
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
copyError error
writeError error
}
func (m *fileMonitor) FileBegins(cc <-chan content) error {
if m.model.trace["file"] {
log.Printf("FILE: FileBegins: " + m.name)
}
tmp := tempName(m.path, m.global.Modified)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(dir, 0777)
if err != nil {
return err
}
}
outFile, err := os.Create(tmp)
if err != nil {
return err
}
m.writeDone.Add(1)
var writeWg sync.WaitGroup
if len(m.localBlocks) > 0 {
writeWg.Add(1)
inFile, err := os.Open(m.path)
if err != nil {
return err
}
// Copy local blocks, close infile when done
go m.copyLocalBlocks(inFile, outFile, &writeWg)
}
// Write remote blocks,
writeWg.Add(1)
go m.copyRemoteBlocks(cc, outFile, &writeWg)
// Wait for both writing routines, then close the outfile
go func() {
writeWg.Wait()
outFile.Close()
m.writeDone.Done()
}()
return nil
}
func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
defer inFile.Close()
defer writeWg.Done()
var buf = buffers.Get(BlockSize)
defer buffers.Put(buf)
for _, lb := range m.localBlocks {
buf = buf[:lb.Size]
_, err := inFile.ReadAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
_, err = outFile.WriteAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
}
}
func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
defer writeWg.Done()
for content := range cc {
_, err := outFile.WriteAt(content.data, content.offset)
buffers.Put(content.data)
if err != nil {
m.writeError = err
return
}
}
}
func (m *fileMonitor) FileDone() error {
if m.model.trace["file"] {
log.Printf("FILE: FileDone: " + m.name)
}
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
defer os.Remove(tmp)
if m.copyError != nil {
return m.copyError
}
if m.writeError != nil {
return m.writeError
}
err := hashCheck(tmp, m.global.Blocks)
if err != nil {
return err
}
err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0))
if err != nil {
return err
}
err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777))
if err != nil {
return err
}
err = os.Rename(tmp, m.path)
if err != nil {
return err
}
m.model.updateLocal(m.global)
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}

236
model/filequeue.go Normal file
View File

@@ -0,0 +1,236 @@
package model
import (
"log"
"sort"
"sync"
"time"
)
type Monitor interface {
FileBegins(<-chan content) error
FileDone() error
}
type FileQueue struct {
files queuedFileList
sorted bool
fmut sync.Mutex // protects files and sorted
availability map[string][]string
amut sync.Mutex // protects availability
queued map[string]bool
}
type queuedFile struct {
name string
blocks []Block
activeBlocks []bool
given int
remaining int
channel chan content
nodes []string
nodesChecked time.Time
monitor Monitor
}
type content struct {
offset int64
data []byte
}
type queuedFileList []queuedFile
func (l queuedFileList) Len() int { return len(l) }
func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
func (l queuedFileList) Less(a, b int) bool {
// Sort by most blocks already given out, then alphabetically
if l[a].given != l[b].given {
return l[a].given > l[b].given
}
return l[a].name < l[b].name
}
type queuedBlock struct {
name string
block Block
index int
}
func NewFileQueue() *FileQueue {
return &FileQueue{
availability: make(map[string][]string),
queued: make(map[string]bool),
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()
if q.queued[name] {
return
}
q.files = append(q.files, queuedFile{
name: name,
blocks: blocks,
activeBlocks: make([]bool, len(blocks)),
remaining: len(blocks),
channel: make(chan content),
monitor: monitor,
})
q.queued[name] = true
q.sorted = false
}
func (q *FileQueue) Len() int {
q.fmut.Lock()
defer q.fmut.Unlock()
return len(q.files)
}
func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) {
q.fmut.Lock()
defer q.fmut.Unlock()
if !q.sorted {
sort.Sort(q.files)
q.sorted = true
}
for i := range q.files {
qf := &q.files[i]
q.amut.Lock()
av := q.availability[qf.name]
q.amut.Unlock()
if len(av) == 0 {
// Noone has the file we want; abort.
if qf.remaining != len(qf.blocks) {
// We have already started on this file; close it down
close(qf.channel)
if mon := qf.monitor; mon != nil {
mon.FileDone()
}
}
delete(q.queued, qf.name)
q.deleteAt(i)
return queuedBlock{}, false
}
for _, ni := range av {
// Find and return the next block in the queue
if ni == nodeID {
for j, b := range qf.blocks {
if !qf.activeBlocks[j] {
qf.activeBlocks[j] = true
qf.given++
return queuedBlock{
name: qf.name,
block: b,
index: j,
}, true
}
}
break
}
}
}
// We found nothing to do
return queuedBlock{}, false
}
func (q *FileQueue) Done(file string, offset int64, data []byte) {
q.fmut.Lock()
defer q.fmut.Unlock()
c := content{
offset: offset,
data: data,
}
for i := range q.files {
qf := &q.files[i]
if qf.name == file {
if qf.monitor != nil && qf.remaining == len(qf.blocks) {
err := qf.monitor.FileBegins(qf.channel)
if err != nil {
log.Printf("WARNING: %s: %v (not synced)", qf.name, err)
delete(q.queued, qf.name)
q.deleteAt(i)
return
}
}
qf.channel <- c
qf.remaining--
if qf.remaining == 0 {
close(qf.channel)
if qf.monitor != nil {
err := qf.monitor.FileDone()
if err != nil {
log.Printf("WARNING: %s: %v", qf.name, err)
}
}
delete(q.queued, qf.name)
q.deleteAt(i)
}
return
}
}
panic("unreachable")
}
func (q *FileQueue) QueuedFiles() (files []string) {
q.fmut.Lock()
defer q.fmut.Unlock()
for _, qf := range q.files {
files = append(files, qf.name)
}
return
}
func (q *FileQueue) deleteAt(i int) {
q.files = q.files[:i+copy(q.files[i:], q.files[i+1:])]
}
func (q *FileQueue) deleteFile(n string) {
for i, file := range q.files {
if n == file.name {
q.deleteAt(i)
delete(q.queued, file.name)
return
}
}
}
func (q *FileQueue) SetAvailable(file string, nodes []string) {
q.amut.Lock()
defer q.amut.Unlock()
q.availability[file] = nodes
}
func (q *FileQueue) RemoveAvailable(toRemove string) {
q.amut.Lock()
defer q.amut.Unlock()
for file, nodes := range q.availability {
for i, node := range nodes {
if node == toRemove {
q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])]
if len(q.availability[file]) == 0 {
q.deleteFile(file)
}
}
break
}
}
}

277
model/filequeue_test.go Normal file
View File

@@ -0,0 +1,277 @@
package model
import (
"reflect"
"sync"
"sync/atomic"
"testing"
)
func TestFileQueueAdd(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
}
func TestFileQueueAddSorting(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q = NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
t.Errorf("Incorrectly sorted get: %+v", b)
}
}
func TestFileQueueLen(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
q.Add("bar", nil, nil)
if l := q.Len(); l != 2 {
t.Errorf("Incorrect len %d != 2 after adds", l)
}
}
func TestFileQueueGet(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
// First get should return the first block of the first file
expected := queuedBlock{
name: "bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (first)\n E: %+v\n A: %+v", expected, actual)
}
// Second get should return the next block of the first file
expected = queuedBlock{
name: "bar",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (second)\n E: %+v\n A: %+v", expected, actual)
}
// Third get should return the first block of the second file
expected = queuedBlock{
name: "foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (third)\n E: %+v\n A: %+v", expected, actual)
}
}
/*
func TestFileQueueDone(t *testing.T) {
ch := make(chan content)
var recv sync.WaitGroup
recv.Add(1)
go func() {
content := <-ch
if bytes.Compare(content.data, []byte("first block bytes")) != 0 {
t.Error("Incorrect data in first content block")
}
content = <-ch
if bytes.Compare(content.data, []byte("second block bytes")) != 0 {
t.Error("Incorrect data in second content block")
}
_, ok := <-ch
if ok {
t.Error("Content channel not closed")
}
recv.Done()
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
b0, _ := q.Get("nodeID")
b1, _ := q.Get("nodeID")
q.Done(b0.name, b0.block.Offset, []byte("first block bytes"))
q.Done(b1.name, b1.block.Offset, []byte("second block bytes"))
recv.Wait()
// Queue should now have one file less
if l := q.Len(); l != 0 {
t.Error("Queue not empty")
}
_, ok := q.Get("nodeID")
if ok {
t.Error("Unexpected OK Get()")
}
}
*/
func TestFileQueueGetNodeIDs(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("b")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("a")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
}
func TestFileQueueThreadHandling(t *testing.T) {
// This should pass with go test -race
const n = 100
var total int
var blocks []Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
total += i
}
q := NewFileQueue()
q.Add("foo", blocks, nil)
q.SetAvailable("foo", []string{"nodeID"})
var start = make(chan bool)
var gotTot uint32
var wg sync.WaitGroup
wg.Add(n)
for i := 1; i <= n; i++ {
go func() {
<-start
b, _ := q.Get("nodeID")
atomic.AddUint32(&gotTot, uint32(b.block.Offset))
wg.Done()
}()
}
close(start)
wg.Wait()
if int(gotTot) != total {
t.Error("Total mismatch; %d != %d", gotTot, total)
}
}

888
model/model.go Normal file
View File

@@ -0,0 +1,888 @@
package model
import (
"crypto/sha1"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
)
type Model struct {
dir string
global map[string]File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
pmut sync.RWMutex // protects protoConn and rawConn
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
lastIdxBcast time.Time
lastIdxBcastRequest time.Time
umut sync.RWMutex // provides updated* and lastIdx*
rwRunning bool
delete bool
initmut sync.Mutex // protects rwRunning and delete
trace map[string]bool
sup suppressor
parallelRequests int
limitRequestRate chan struct{}
imut sync.Mutex // protects Index
}
type Connection interface {
ID() string
Index([]protocol.FileInfo)
Request(name string, offset int64, size uint32, hash []byte) ([]byte, error)
Statistics() protocol.Statistics
Option(key string) string
}
const (
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
minFileHoldTimeS = 60 // Never allow file changes more often than this
maxFileHoldTimeS = 600 // Always allow file changes at least this often
)
var (
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
)
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
// for file data without altering the local repository in any way.
func NewModel(dir string, maxChangeBw int) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
sup: suppressor{threshold: int64(maxChangeBw)},
fq: NewFileQueue(),
dq: make(chan File),
}
go m.broadcastIndexLoop()
return m
}
func (m *Model) LimitRate(kbps int) {
m.limitRequestRate = make(chan struct{}, kbps)
n := kbps/10 + 1
go func() {
for {
time.Sleep(100 * time.Millisecond)
for i := 0; i < n; i++ {
select {
case m.limitRequestRate <- struct{}{}:
}
}
}
}()
}
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
func (m *Model) Trace(t string) {
m.trace[t] = true
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
func (m *Model) StartRW(del bool, threads int) {
m.initmut.Lock()
defer m.initmut.Unlock()
if m.rwRunning {
panic("starting started model")
}
m.rwRunning = true
m.delete = del
m.parallelRequests = threads
go m.cleanTempFiles()
if del {
go m.deleteLoop()
}
}
// Generation returns an opaque integer that is guaranteed to increment on
// every change to the local repository or global model.
func (m *Model) Generation() int64 {
m.umut.RLock()
defer m.umut.RUnlock()
return m.updatedLocal + m.updateGlobal
}
func (m *Model) LocalAge() float64 {
m.umut.RLock()
defer m.umut.RUnlock()
return time.Since(time.Unix(m.updatedLocal, 0)).Seconds()
}
type ConnectionInfo struct {
protocol.Statistics
Address string
ClientID string
ClientVersion string
}
// ConnectionStats returns a map with connection statistics for each connected node.
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.pmut.RLock()
var res = make(map[string]ConnectionInfo)
for node, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
ClientID: conn.Option("clientId"),
ClientVersion: conn.Option("clientVersion"),
}
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
res[node] = ci
}
m.pmut.RUnlock()
return res
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize() (files, deleted, bytes int) {
m.gmut.RLock()
for _, f := range m.global {
if f.Flags&protocol.FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
m.gmut.RUnlock()
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local repository.
func (m *Model) LocalSize() (files, deleted, bytes int) {
m.lmut.RLock()
for _, f := range m.local {
if f.Flags&protocol.FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
m.lmut.RUnlock()
return
}
// InSyncSize returns the number and total byte size of the local files that
// are in sync with the global model.
func (m *Model) InSyncSize() (files, bytes int) {
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if gf, ok := m.global[n]; ok && f.Equals(gf) {
files++
bytes += f.Size()
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int) {
qf := m.fq.QueuedFiles()
m.gmut.RLock()
for _, n := range qf {
f := m.global[n]
files = append(files, f)
bytes += f.Size()
}
m.gmut.RUnlock()
return
}
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
log.Printf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
repo := make(map[string]File)
for _, f := range files {
m.indexUpdate(repo, f)
}
m.rmut.Lock()
m.remote[nodeID] = repo
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForFiles(files)
}
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
log.Printf("NET IDXUP(in): %s: %d files", nodeID, len(files))
}
m.rmut.Lock()
repo, ok := m.remote[nodeID]
if !ok {
log.Printf("WARNING: Index update from node %s that does not have an index", nodeID)
m.rmut.Unlock()
return
}
for _, f := range files {
m.indexUpdate(repo, f)
}
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForFiles(files)
}
func (m *Model) indexUpdate(repo map[string]File, f File) {
if m.trace["idx"] {
var flagComment string
if f.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
log.Printf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
}
if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
log.Printf("WARNING: IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f)
return
}
repo[f.Name] = f
}
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
m.fq.RemoveAvailable(node)
m.pmut.Lock()
m.rmut.Lock()
conn, ok := m.rawConn[node]
if ok {
conn.Close()
}
delete(m.remote, node)
delete(m.protoConn, node)
delete(m.rawConn, node)
m.rmut.Unlock()
m.pmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
// Verify that the requested file exists in the local and global model.
m.lmut.RLock()
lf, localOk := m.local[name]
m.lmut.RUnlock()
m.gmut.RLock()
_, globalOk := m.global[name]
m.gmut.RUnlock()
if !localOk || !globalOk {
log.Printf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
return nil, ErrNoSuchFile
}
if lf.Flags&protocol.FlagInvalid != 0 {
return nil, ErrInvalid
}
if m.trace["net"] && nodeID != "<local>" {
log.Printf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
fn := path.Join(m.dir, name)
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer fd.Close()
buf := buffers.Get(int(size))
_, err = fd.ReadAt(buf, offset)
if err != nil {
return nil, err
}
if m.limitRequestRate != nil {
for s := 0; s < len(buf); s += 1024 {
<-m.limitRequestRate
}
}
return buf, nil
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
var updated bool
var newLocal = make(map[string]File)
m.lmut.RLock()
for _, f := range fs {
newLocal[f.Name] = f
if ef := m.local[f.Name]; !ef.Equals(f) {
updated = true
}
}
m.lmut.RUnlock()
if m.markDeletedLocals(newLocal) {
updated = true
}
m.lmut.RLock()
if len(newLocal) != len(m.local) {
updated = true
}
m.lmut.RUnlock()
if updated {
m.lmut.Lock()
m.local = newLocal
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
// SeedLocal replaces the local repository index with the given list of files,
// in protocol data types. Does not track deletes, should only be used to seed
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.lmut.Lock()
m.local = make(map[string]File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
}
// ConnectedTo returns true if we are connected to the named node.
func (m *Model) ConnectedTo(nodeID string) bool {
m.pmut.RLock()
_, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
return ok
}
// RepoID returns a unique ID representing the current repository location.
func (m *Model) RepoID() string {
return fmt.Sprintf("%x", sha1.Sum([]byte(m.dir)))
}
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// repository changes.
func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
nodeID := protoConn.ID()
m.pmut.Lock()
m.protoConn[nodeID] = protoConn
m.rawConn[nodeID] = rawConn
m.pmut.Unlock()
go func() {
idx := m.ProtocolIndex()
protoConn.Index(idx)
}()
m.initmut.Lock()
rw := m.rwRunning
m.initmut.Unlock()
if !rw {
return
}
for i := 0; i < m.parallelRequests; i++ {
i := i
go func() {
if m.trace["pull"] {
log.Println("PULL: Starting", nodeID, i)
}
for {
m.pmut.RLock()
if _, ok := m.protoConn[nodeID]; !ok {
if m.trace["pull"] {
log.Println("PULL: Exiting", nodeID, i)
}
m.pmut.RUnlock()
return
}
m.pmut.RUnlock()
qb, ok := m.fq.Get(nodeID)
if ok {
if m.trace["pull"] {
log.Println("PULL: Request", nodeID, i, qb.name, qb.block.Offset)
}
data, _ := protoConn.Request(qb.name, qb.block.Offset, qb.block.Size, qb.block.Hash)
m.fq.Done(qb.name, qb.block.Offset, data)
} else {
time.Sleep(1 * time.Second)
}
}
}()
}
}
// ProtocolIndex returns the current local index in protocol data types.
// Must be called with the read lock held.
func (m *Model) ProtocolIndex() []protocol.FileInfo {
var index []protocol.FileInfo
m.lmut.RLock()
for _, f := range m.local {
mf := fileInfoFromFile(f)
if m.trace["idx"] {
var flagComment string
if mf.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
log.Printf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
}
index = append(index, mf)
}
m.lmut.RUnlock()
return index
}
func (m *Model) requestGlobal(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
m.pmut.RLock()
nc, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
}
if m.trace["net"] {
log.Printf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
return nc.Request(name, offset, size, hash)
}
func (m *Model) broadcastIndexLoop() {
for {
m.umut.RLock()
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
m.umut.RUnlock()
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
idx := m.ProtocolIndex()
var indexWg sync.WaitGroup
indexWg.Add(len(m.protoConn))
m.umut.Lock()
m.lastIdxBcast = time.Now()
m.umut.Unlock()
m.pmut.RLock()
for _, node := range m.protoConn {
node := node
if m.trace["net"] {
log.Printf("NET IDX(out/loop): %s: %d files", node.ID(), len(idx))
}
go func() {
node.Index(idx)
indexWg.Done()
}()
}
m.pmut.RUnlock()
indexWg.Wait()
}
time.Sleep(idxBcastHoldtime)
}
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
// note the file as having been deleted.
var updated bool
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if _, ok := newLocal[n]; !ok {
if gf := m.global[n]; !gf.NewerThan(f) {
if f.Flags&protocol.FlagDeleted == 0 {
f.Flags = protocol.FlagDeleted
f.Version++
f.Blocks = nil
updated = true
}
newLocal[n] = f
}
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return updated
}
func (m *Model) updateLocal(f File) {
var updated bool
m.lmut.Lock()
if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
m.local[f.Name] = f
updated = true
}
m.lmut.Unlock()
if updated {
m.recomputeGlobal()
// We don't recomputeNeed here for two reasons:
// - a need shouldn't have arisen due to having a newer local file
// - recomputeNeed might call into fq.Add but we might have been called by
// fq which would be a deadlock on fq
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
/*
XXX: Not done, needs elegant handling of availability
func (m *Model) recomputeGlobalFor(files []File) bool {
m.gmut.Lock()
defer m.gmut.Unlock()
var updated bool
for _, f := range files {
if gf, ok := m.global[f.Name]; !ok || f.NewerThan(gf) {
m.global[f.Name] = f
updated = true
// Fix availability
}
}
return updated
}
*/
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
m.lmut.RLock()
for n, f := range m.local {
newGlobal[n] = f
}
m.lmut.RUnlock()
var available = make(map[string][]string)
m.rmut.RLock()
var highestMod int64
for nodeID, fs := range m.remote {
for n, nf := range fs {
if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
newGlobal[n] = nf
available[n] = []string{nodeID}
if nf.Modified > highestMod {
highestMod = nf.Modified
}
} else if lf.Equals(nf) {
available[n] = append(available[n], nodeID)
}
}
}
m.rmut.RUnlock()
for f, ns := range available {
m.fq.SetAvailable(f, ns)
}
// Figure out if anything actually changed
m.gmut.RLock()
var updated bool
if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
updated = true
} else {
for n, f0 := range newGlobal {
if f1, ok := m.global[n]; !ok || !f0.Equals(f1) {
updated = true
break
}
}
}
m.gmut.RUnlock()
if updated {
m.gmut.Lock()
m.umut.Lock()
m.global = newGlobal
m.updateGlobal = time.Now().Unix()
m.umut.Unlock()
m.gmut.Unlock()
}
}
type addOrder struct {
n string
remote []Block
fm *fileMonitor
}
func (m *Model) recomputeNeedForGlobal() {
var toDelete []File
var toAdd []addOrder
m.gmut.RLock()
for _, gf := range m.global {
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
}
m.gmut.RUnlock()
for _, ao := range toAdd {
m.fq.Add(ao.n, ao.remote, ao.fm)
}
for _, gf := range toDelete {
m.dq <- gf
}
}
func (m *Model) recomputeNeedForFiles(files []File) {
var toDelete []File
var toAdd []addOrder
m.gmut.RLock()
for _, gf := range files {
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
}
m.gmut.RUnlock()
for _, ao := range toAdd {
m.fq.Add(ao.n, ao.remote, ao.fm)
}
for _, gf := range toDelete {
m.dq <- gf
}
}
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
m.lmut.RLock()
lf, ok := m.local[gf.Name]
m.lmut.RUnlock()
if !ok || gf.NewerThan(lf) {
if gf.Flags&protocol.FlagInvalid != 0 {
// Never attempt to sync invalid files
return toAdd, toDelete
}
if gf.Flags&protocol.FlagDeleted != 0 && !m.delete {
// Don't want to delete files, so forget this need
return toAdd, toDelete
}
if gf.Flags&protocol.FlagDeleted != 0 && !ok {
// Don't have the file, so don't need to delete it
return toAdd, toDelete
}
if m.trace["need"] {
log.Printf("NEED: lf:%v gf:%v", lf, gf)
}
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: gf.Name,
path: path.Clean(path.Join(m.dir, gf.Name)),
global: gf,
model: m,
localBlocks: local,
}
toAdd = append(toAdd, addOrder{gf.Name, remote, &fm})
}
}
return toAdd, toDelete
}
func (m *Model) WhoHas(name string) []string {
var remote []string
m.gmut.RLock()
m.rmut.RLock()
gf := m.global[name]
for node, files := range m.remote {
if file, ok := files[name]; ok && file.Equals(gf) {
remote = append(remote, node)
}
}
m.rmut.RUnlock()
m.gmut.RUnlock()
return remote
}
func (m *Model) deleteLoop() {
for file := range m.dq {
if m.trace["file"] {
log.Println("FILE: Delete", file.Name)
}
path := path.Clean(path.Join(m.dir, file.Name))
err := os.Remove(path)
if err != nil {
log.Printf("WARNING: %s: %v", file.Name, err)
}
m.updateLocal(file)
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks = make([]Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return File{
Name: f.Name,
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{
Size: b.Size,
Hash: b.Hash,
}
}
return protocol.FileInfo{
Name: f.Name,
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
}

537
model/model_test.go Normal file
View File

@@ -0,0 +1,537 @@
package model
import (
"bytes"
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/calmh/syncthing/protocol"
)
func TestNewModel(t *testing.T) {
m := NewModel("foo", 1e6)
if m == nil {
t.Fatalf("NewModel returned nil")
}
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Errorf("New model should have no Need")
}
if len(m.local) > 0 {
t.Errorf("New model should have no Have")
}
}
var testDataExpected = map[string]File{
"foo": File{
Name: "foo",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"empty": File{
Name: "empty",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
},
"bar": File{
Name: "bar",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
func init() {
// Fix expected test data to match reality
for n, f := range testDataExpected {
fi, _ := os.Stat("testdata/" + n)
f.Flags = uint32(fi.Mode())
f.Modified = fi.ModTime().Unix()
testDataExpected[n] = f
}
}
func TestUpdateLocal(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Fatalf("Model with only local data should have no need")
}
if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 {
t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2)
}
if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 {
t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2)
}
for name, file := range testDataExpected {
if f, ok := m.local[name]; ok {
if !reflect.DeepEqual(f, file) {
t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name)
}
} else {
t.Errorf("Missing file %q in local table", name)
}
if f, ok := m.global[name]; ok {
if !reflect.DeepEqual(f, file) {
t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name)
}
} else {
t.Errorf("Missing file %q in global table", name)
}
}
for _, f := range fs {
if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified {
t.Fatalf("Incorrect local for %q", f.Name)
}
if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified {
t.Fatalf("Incorrect global for %q", f.Name)
}
}
}
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
Name: "foo",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", len(fs))
}
}
func TestRemoteAddNew(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
Name: "a new file",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs))
}
}
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
oldTimeStamp := int64(1234)
newFile := protocol.FileInfo{
Name: "foo",
Modified: oldTimeStamp,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
}
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
foo := protocol.FileInfo{
Name: "foo",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
bar := protocol.FileInfo{
Name: "bar",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{foo})
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
m.IndexUpdate("42", []protocol.FileInfo{bar})
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
if fs, _ := m.NeedFiles(); fs[1].Name != "bar" {
t.Error("Model doesn't need 'bar'")
}
}
func TestDelete(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
ot := time.Now().Unix()
newFile := File{
Name: "a new file",
Modified: ot,
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
}
m.updateLocal(newFile)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
// The deleted file is kept in the local and global tables and marked as deleted.
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if m.local["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in local table")
}
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in global table")
}
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.global["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
}
// Another update should change nothing
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if m.local["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in local table")
}
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in global table")
}
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.global["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
}
}
func TestForgetNode(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
newFile := protocol.FileInfo{
Name: "new file",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
newFile = protocol.FileInfo{
Name: "new file 2",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("43", []protocol.FileInfo{newFile})
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+2; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if fs, _ := m.NeedFiles(); len(fs) != 2 {
t.Errorf("Model len(need) incorrect (%d != 2)", len(fs))
}
m.Close("42", nil)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(need) incorrect (%d != 1)", len(fs))
}
}
func TestRequest(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
bs, err := m.Request("some node", "foo", 0, 6, nil)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(bs, []byte("foobar")) != 0 {
t.Errorf("Incorrect data from request: %q", string(bs))
}
bs, err = m.Request("some node", "../walk.go", 0, 6, nil)
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
if bs != nil {
t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs))
}
}
func TestIgnoreWithUnknownFlags(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
valid := protocol.FileInfo{
Name: "valid",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
Flags: protocol.FlagDeleted | 0755,
}
invalid := protocol.FileInfo{
Name: "invalid",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
Flags: 1<<27 | protocol.FlagDeleted | 0755,
}
m.Index("42", []protocol.FileInfo{valid, invalid})
if _, ok := m.global[valid.Name]; !ok {
t.Error("Model should include", valid)
}
if _, ok := m.global[invalid.Name]; ok {
t.Error("Model not should include", invalid)
}
}
func genFiles(n int) []protocol.FileInfo {
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
return files
}
func BenchmarkIndex10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index("42", files)
}
}
func BenchmarkIndex00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index("42", files)
}
}
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", files)
}
}
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
ufiles := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", ufiles)
}
}
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
ufiles := genFiles(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", ufiles)
}
}
type FakeConnection struct {
id string
requestData []byte
}
func (FakeConnection) Close() error {
return nil
}
func (f FakeConnection) ID() string {
return string(f.id)
}
func (f FakeConnection) Option(string) string {
return ""
}
func (FakeConnection) Index([]protocol.FileInfo) {}
func (f FakeConnection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
return f.requestData, nil
}
func (FakeConnection) Ping() bool {
return true
}
func (FakeConnection) Statistics() protocol.Statistics {
return protocol.Statistics{}
}
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
const n = 1000
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
fc := FakeConnection{
id: "42",
requestData: []byte("some data to return"),
}
m.AddConnection(fc, fc)
m.Index("42", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
data, err := m.requestGlobal("42", files[i%n].Name, 0, 32, nil)
if err != nil {
b.Error(err)
}
if data == nil {
b.Error("nil data")
}
}
}

72
model/suppressor.go Normal file
View File

@@ -0,0 +1,72 @@
package model
import (
"sync"
"time"
)
const (
MAX_CHANGE_HISTORY = 4
)
type change struct {
size int64
when time.Time
}
type changeHistory struct {
changes []change
next int64
prevSup bool
}
type suppressor struct {
sync.Mutex
changes map[string]changeHistory
threshold int64 // bytes/s
}
func (h changeHistory) bandwidth(t time.Time) int64 {
if len(h.changes) == 0 {
return 0
}
var t0 = h.changes[0].when
if t == t0 {
return 0
}
var bw float64
for _, c := range h.changes {
bw += float64(c.size)
}
return int64(bw / t.Sub(t0).Seconds())
}
func (h *changeHistory) append(size int64, t time.Time) {
c := change{size, t}
if len(h.changes) == MAX_CHANGE_HISTORY {
h.changes = h.changes[1:MAX_CHANGE_HISTORY]
}
h.changes = append(h.changes, c)
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()
if s.changes == nil {
s.changes = make(map[string]changeHistory)
}
h := s.changes[name]
sup := h.bandwidth(t) > s.threshold
prevSup := h.prevSup
h.prevSup = sup
if !sup {
h.append(size, t)
}
s.changes[name] = h
s.Unlock()
return sup, prevSup
}

113
model/suppressor_test.go Normal file
View File

@@ -0,0 +1,113 @@
package model
import (
"testing"
"time"
)
func TestSuppressor(t *testing.T) {
s := suppressor{threshold: 10000}
t0 := time.Now()
t1 := t0
sup, prev := s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Never suppress first change")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is 10000 / 10 = 1000
t1 = t0.Add(10 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000) / 11 = 1818
t1 = t0.Add(11 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 100500, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 12 = 10041
t1 = t0.Add(12 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
if !sup {
t.Fatal("Should be over threshold")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 15 = 8033
t1 = t0.Add(15 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1)
if sup {
t.Fatal("Should be Ok")
}
if !prev {
t.Fatal("Incorrect prev status")
}
}
func TestHistory(t *testing.T) {
h := changeHistory{}
t0 := time.Now()
h.append(40, t0)
if l := len(h.changes); l != 1 {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
for i := 1; i < MAX_CHANGE_HISTORY; i++ {
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
}
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 40+MAX_CHANGE_HISTORY-1 {
t.Errorf("Incorrect last record size %d", s)
}
h.append(999, t0.Add(time.Duration(999)*time.Second))
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 41 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 999 {
t.Errorf("Incorrect last record size %d", s)
}
}

2
model/testdata/.stignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.*
quux

View File

View File

0
model/testdata/empty vendored Normal file
View File

View File

243
model/walk.go Normal file
View File

@@ -0,0 +1,243 @@
package model
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Blocks []Block
}
func (f File) Size() (bytes int) {
for _, b := range f.Blocks {
bytes += int(b.Size)
}
return
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}
func isTempName(name string) bool {
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, lf)
} else {
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
if m.trace["file"] {
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
}
if !prev {
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
}
if ok {
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
return nil
} else if prev && !cur {
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
}
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
f := File{
Name: rn,
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
return
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

83
model/walk_test.go Normal file
View File

@@ -0,0 +1,83 @@
package model
import (
"fmt"
"reflect"
"testing"
"time"
)
var testdata = []struct {
name string
size int
hash string
}{
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
{"empty", 0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
}
var correctIgnores = map[string][]string{
"": {".*", "quux"},
}
func TestWalk(t *testing.T) {
m := NewModel("testdata", 1e6)
files, ignores := m.Walk(false)
if l1, l2 := len(files), len(testdata); l1 != l2 {
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
}
for i := range testdata {
if n1, n2 := testdata[i].name, files[i].Name; n1 != n2 {
t.Errorf("Incorrect file name %q != %q for case #%d", n1, n2, i)
}
if h1, h2 := fmt.Sprintf("%x", files[i].Blocks[0].Hash), testdata[i].hash; h1 != h2 {
t.Errorf("Incorrect hash %q != %q for case #%d", h1, h2, i)
}
t0 := time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
t1 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
if mt := files[i].Modified; mt < t0 || mt > t1 {
t.Errorf("Unrealistic modtime %d for test %d", mt, i)
}
}
if !reflect.DeepEqual(ignores, correctIgnores) {
t.Errorf("Incorrect ignores\n %v\n %v", correctIgnores, ignores)
}
}
func TestIgnore(t *testing.T) {
var patterns = map[string][]string{
"": {"t2"},
"foo": {"bar", "z*"},
"foo/baz": {"quux", ".*"},
}
var tests = []struct {
f string
r bool
}{
{"foo/bar", true},
{"foo/quux", false},
{"foo/zuux", true},
{"foo/qzuux", false},
{"foo/baz/t1", false},
{"foo/baz/t2", true},
{"foo/baz/bar", true},
{"foo/baz/quuxa", false},
{"foo/baz/aquux", false},
{"foo/baz/.quux", true},
{"foo/baz/zquux", true},
{"foo/baz/quux", true},
{"foo/bazz/quux", false},
}
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}
}

View File

@@ -1,248 +0,0 @@
package main
/*
Locking
=======
These methods are never called from the outside so don't follow the locking
policy in model.go.
TODO(jb): Refactor this into smaller and cleaner pieces.
TODO(jb): Increase performance by taking apparent peer bandwidth into account.
*/
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
)
func (m *Model) pullFile(name string) error {
m.RLock()
var localFile = m.local[name]
var globalFile = m.global[name]
var nodeIDs = m.whoHas(name)
m.RUnlock()
if len(nodeIDs) == 0 {
return fmt.Errorf("%s: no connected nodes with file available", name)
}
filename := path.Join(m.dir, name)
sdir := path.Dir(filename)
_, err := os.Stat(sdir)
if err != nil && os.IsNotExist(err) {
os.MkdirAll(sdir, 0777)
}
tmpFilename := tempName(filename, globalFile.Modified)
tmpFile, err := os.Create(tmpFilename)
if err != nil {
return err
}
contentChan := make(chan content, 32)
var applyDone sync.WaitGroup
applyDone.Add(1)
go func() {
applyContent(contentChan, tmpFile)
tmpFile.Close()
applyDone.Done()
}()
local, remote := localFile.Blocks.To(globalFile.Blocks)
var fetchDone sync.WaitGroup
// One local copy routine
fetchDone.Add(1)
go func() {
for _, block := range local {
data, err := m.Request("<local>", name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}()
// N remote copy routines
var remoteBlocks = blockIterator{blocks: remote}
for i := 0; i < opts.Advanced.RequestsInFlight; i++ {
curNode := nodeIDs[i%len(nodeIDs)]
fetchDone.Add(1)
go func(nodeID string) {
for {
block, ok := remoteBlocks.Next()
if !ok {
break
}
data, err := m.RequestGlobal(nodeID, name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}(curNode)
}
fetchDone.Wait()
close(contentChan)
applyDone.Wait()
err = hashCheck(tmpFilename, globalFile.Blocks)
if err != nil {
return fmt.Errorf("%s: %s (deleting)", path.Base(name), err.Error())
}
err = os.Chtimes(tmpFilename, time.Unix(globalFile.Modified, 0), time.Unix(globalFile.Modified, 0))
if err != nil {
return err
}
err = os.Rename(tmpFilename, filename)
if err != nil {
return err
}
return nil
}
func (m *Model) puller() {
for {
time.Sleep(time.Second)
var ns []string
m.RLock()
for n := range m.need {
ns = append(ns, n)
}
m.RUnlock()
if len(ns) == 0 {
continue
}
var limiter = make(chan bool, opts.Advanced.FilesInFlight)
var allDone sync.WaitGroup
for _, n := range ns {
limiter <- true
allDone.Add(1)
go func(n string) {
defer func() {
allDone.Done()
<-limiter
}()
f, ok := m.GlobalFile(n)
if !ok {
return
}
var err error
if f.Flags&FlagDeleted == 0 {
if opts.Debug.TraceFile {
debugf("FILE: Pull %q", n)
}
err = m.pullFile(n)
} else {
if opts.Debug.TraceFile {
debugf("FILE: Remove %q", n)
}
// Cheerfully ignore errors here
_ = os.Remove(path.Join(m.dir, n))
}
if err == nil {
m.UpdateLocal(f)
} else {
warnln(err)
}
}(n)
}
allDone.Wait()
}
}
type content struct {
offset int64
data []byte
}
func applyContent(cc <-chan content, dst io.WriterAt) error {
var err error
for c := range cc {
_, err = dst.WriteAt(c.data, c.offset)
buffers.Put(c.data)
if err != nil {
return err
}
}
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}
type blockIterator struct {
sync.Mutex
blocks []Block
}
func (i *blockIterator) Next() (b Block, ok bool) {
i.Lock()
defer i.Unlock()
if len(i.blocks) == 0 {
return
}
b, i.blocks = i.blocks[0], i.blocks[1:]
ok = true
return
}

View File

@@ -1,308 +0,0 @@
package main
import (
"reflect"
"testing"
"time"
"github.com/calmh/syncthing/protocol"
)
func TestNewModel(t *testing.T) {
m := NewModel("foo")
if m == nil {
t.Fatalf("NewModel returned nil")
}
if len(m.need) > 0 {
t.Errorf("New model should have no Need")
}
if len(m.local) > 0 {
t.Errorf("New model should have no Have")
}
}
var testDataExpected = map[string]File{
"foo": File{
Name: "foo",
Flags: 0644,
Modified: 1384244572,
Blocks: []Block{{Offset: 0x0, Length: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"bar": File{
Name: "bar",
Flags: 0644,
Modified: 1384244579,
Blocks: []Block{{Offset: 0x0, Length: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
"baz/quux": File{
Name: "baz/quux",
Flags: 0644,
Modified: 1384244676,
Blocks: []Block{{Offset: 0x0, Length: 0x9, Hash: []uint8{0xc1, 0x54, 0xd9, 0x4e, 0x94, 0xba, 0x72, 0x98, 0xa6, 0xad, 0xb0, 0x52, 0x3a, 0xfe, 0x34, 0xd1, 0xb6, 0xa5, 0x81, 0xd6, 0xb8, 0x93, 0xa7, 0x63, 0xd4, 0x5d, 0xdc, 0x5e, 0x20, 0x9d, 0xcb, 0x83}}},
},
}
func TestUpdateLocal(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
if len(m.need) > 0 {
t.Fatalf("Model with only local data should have no need")
}
if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 {
t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2)
}
if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 {
t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2)
}
for name, file := range testDataExpected {
if f, ok := m.local[name]; ok {
if !reflect.DeepEqual(f, file) {
t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name)
}
} else {
t.Errorf("Missing file %q in local table", name)
}
if f, ok := m.global[name]; ok {
if !reflect.DeepEqual(f, file) {
t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name)
}
} else {
t.Errorf("Missing file %q in global table", name)
}
}
for _, f := range fs {
if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified {
t.Fatalf("Incorrect local for %q", f.Name)
}
if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified {
t.Fatalf("Incorrect global for %q", f.Name)
}
}
}
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
Name: "foo",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if l := len(m.need); l != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", l)
}
}
func TestRemoteAddNew(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
Name: "a new file",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(m.need) incorrect (%d != %d)", l1, l2)
}
}
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
oldTimeStamp := int64(1234)
newFile := protocol.FileInfo{
Name: "foo",
Modified: oldTimeStamp,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
}
}
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
foo := protocol.FileInfo{
Name: "foo",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
bar := protocol.FileInfo{
Name: "bar",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{foo})
if _, ok := m.need["foo"]; !ok {
t.Error("Model doesn't need 'foo'")
}
m.IndexUpdate("42", []protocol.FileInfo{bar})
if _, ok := m.need["foo"]; !ok {
t.Error("Model doesn't need 'foo'")
}
if _, ok := m.need["bar"]; !ok {
t.Error("Model doesn't need 'bar'")
}
}
func TestDelete(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
ot := time.Now().Unix()
newFile := File{
Name: "a new file",
Modified: ot,
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
}
m.UpdateLocal(newFile)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
// The deleted file is kept in the local and global tables and marked as deleted.
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if m.local["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in local table")
}
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in global table")
}
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
// Another update should change nothing
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if m.local["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in local table")
}
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in global table")
}
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
}
func TestForgetNode(t *testing.T) {
m := NewModel("foo")
fs := Walk("testdata", m, false)
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
}
newFile := protocol.FileInfo{
Name: "new file",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
}
m.Close("42", nil)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
}
}

View File

@@ -9,7 +9,7 @@ Each node has a _repository_ of files described by the _local model_,
containing modifications times and block hashes. The local model is sent
to the other nodes in the cluster. The union of all files in the local
models, with files selected for most recent modification time, forms the
_global model_. Each node strives to get it's repository in synch with
_global model_. Each node strives to get it's repository in sync with
the global model by requesting missing blocks from the other nodes.
Transport and Authentication
@@ -62,11 +62,10 @@ reserved bits must be set to zero.
All data following the message header is in XDR (RFC 1014) encoding.
The actual data types in use by BEP, in XDR naming convention, are:
- unsigned int -- unsigned 32 bit integer
- hyper -- signed 64 bit integer
- unsigned hyper -- signed 64 bit integer
- opaque<> -- variable length opaque data
- string<> -- variable length string
- (unsigned) int -- (unsigned) 32 bit integer
- (unsigned) hyper -- (unsigned) 64 bit integer
- opaque<> -- variable length opaque data
- string<> -- variable length string
The encoding of opaque<> and string<> are identical, the distinction is
solely in interpretation. Opaque data should not be interpreted as such,
@@ -92,6 +91,7 @@ message.
string Name<>;
unsigned int Flags;
hyper Modified;
unsigned int Version;
BlockInfo Blocks<>;
}
@@ -102,15 +102,19 @@ message.
The file name is the part relative to the repository root. The
modification time is expressed as the number of seconds since the Unix
Epoch. The hash algorithm is implied by the hash length. Currently, the
hash must be 32 bytes long and computed by SHA256.
Epoch. The version field is a counter that increments each time the file
changes but resets to zero each time the modification is updated. This
is used to signal changes to the file (or file metadata) while the
modification time remains unchanged. The hash algorithm is implied by
the hash length. Currently, the hash must be 32 bytes long and computed
by SHA256.
The flags field is made up of the following single bit flags:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |D| Unix Perm. & Mode |
| Reserved |I|D| Unix Perm. & Mode |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- The lower 12 bits hold the common Unix permission and mode bits.
@@ -118,9 +122,13 @@ The flags field is made up of the following single bit flags:
- Bit 19 ("D") is set when the file has been deleted. The block list
shall contain zero blocks and the modification time indicates the
time of deletion or, if deletion time is not reliably determinable,
one second past the last know modification time.
the last known modification time and a higher version number.
- Bit 0 through 18 are reserved for future use and shall be set to
- Bit 18 ("I") is set when the file is invalid and unavailable for
synchronization. A peer may set this bit to indicate that it can
temporarily not serve data for the file.
- Bit 0 through 17 are reserved for future use and shall be set to
zero.
### Request (Type = 2)
@@ -185,6 +193,33 @@ model, the Index Update merely amends it with new or updated file
information. Any files not mentioned in an Index Update are left
unchanged.
### Options (Type = 7)
This informational message provides information about the client
configuration, version, etc. It is sent at connection initiation and,
optionally, when any of the sent parameters have changed. The message is
in the form of a list of (key, value) pairs, both of string type.
struct OptionsMessage {
KeyValue Options<>;
}
struct KeyValue {
string Key;
string Value;
}
Key ID:s apart from the well known ones are implementation
specific. An implementation is expected to ignore unknown keys. An
implementation may impose limits on key and value size.
Well known keys:
- "clientId" -- The name of the implementation. Example: "syncthing".
- "clientVersion" -- The version of the client. Example: "v1.0.33-47". The
Following the SemVer 2.0 specification for version strings is
encouraged but not enforced.
Example Exchange
----------------

View File

@@ -5,7 +5,7 @@ import "io"
type TestModel struct {
data []byte
name string
offset uint64
offset int64
size uint32
hash []byte
closed bool
@@ -17,7 +17,7 @@ func (t *TestModel) Index(nodeID string, files []FileInfo) {
func (t *TestModel) IndexUpdate(nodeID string, files []FileInfo) {
}
func (t *TestModel) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (t *TestModel) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
t.name = name
t.offset = offset
t.size = size

View File

@@ -4,7 +4,7 @@ import "io"
type request struct {
name string
offset uint64
offset int64
size uint32
hash []byte
}
@@ -39,9 +39,10 @@ func (w *marshalWriter) writeIndex(idx []FileInfo) {
w.writeString(f.Name)
w.writeUint32(f.Flags)
w.writeUint64(uint64(f.Modified))
w.writeUint32(f.Version)
w.writeUint32(uint32(len(f.Blocks)))
for _, b := range f.Blocks {
w.writeUint32(b.Length)
w.writeUint32(b.Size)
w.writeBytes(b.Hash)
}
}
@@ -55,7 +56,7 @@ func WriteIndex(w io.Writer, idx []FileInfo) (int, error) {
func (w *marshalWriter) writeRequest(r request) {
w.writeString(r.name)
w.writeUint64(r.offset)
w.writeUint64(uint64(r.offset))
w.writeUint32(r.size)
w.writeBytes(r.hash)
}
@@ -64,6 +65,14 @@ func (w *marshalWriter) writeResponse(data []byte) {
w.writeBytes(data)
}
func (w *marshalWriter) writeOptions(opts map[string]string) {
w.writeUint32(uint32(len(opts)))
for k, v := range opts {
w.writeString(k)
w.writeString(v)
}
}
func (r *marshalReader) readHeader() header {
return decodeHeader(r.readUint32())
}
@@ -77,10 +86,11 @@ func (r *marshalReader) readIndex() []FileInfo {
files[i].Name = r.readString()
files[i].Flags = r.readUint32()
files[i].Modified = int64(r.readUint64())
files[i].Version = r.readUint32()
nblocks := r.readUint32()
blocks := make([]BlockInfo, nblocks)
for j := range blocks {
blocks[j].Length = r.readUint32()
blocks[j].Size = r.readUint32()
blocks[j].Hash = r.readBytes()
}
files[i].Blocks = blocks
@@ -98,7 +108,7 @@ func ReadIndex(r io.Reader) ([]FileInfo, error) {
func (r *marshalReader) readRequest() request {
var req request
req.name = r.readString()
req.offset = r.readUint64()
req.offset = int64(r.readUint64())
req.size = r.readUint32()
req.hash = r.readBytes()
return req
@@ -107,3 +117,14 @@ func (r *marshalReader) readRequest() request {
func (r *marshalReader) readResponse() []byte {
return r.readBytes()
}
func (r *marshalReader) readOptions() map[string]string {
n := r.readUint32()
opts := make(map[string]string, n)
for i := 0; i < int(n); i++ {
k := r.readString()
v := r.readString()
opts[k] = v
}
return opts
}

View File

@@ -12,8 +12,9 @@ func TestIndex(t *testing.T) {
idx := []FileInfo{
{
"Foo",
0755,
FlagInvalid & FlagDeleted & 0755,
1234567890,
142,
[]BlockInfo{
{12345678, []byte("hash hash hash")},
{23456781, []byte("ash hash hashh")},
@@ -23,6 +24,7 @@ func TestIndex(t *testing.T) {
"Quux/Quux",
0644,
2345678901,
232323232,
[]BlockInfo{
{45678123, []byte("4321 hash hash hash")},
{56781234, []byte("3214 ash hash hashh")},
@@ -44,7 +46,7 @@ func TestIndex(t *testing.T) {
}
func TestRequest(t *testing.T) {
f := func(name string, offset uint64, size uint32, hash []byte) bool {
f := func(name string, offset int64, size uint32, hash []byte) bool {
var buf = new(bytes.Buffer)
var req = request{name, offset, size, hash}
var wr = marshalWriter{w: buf}
@@ -81,6 +83,7 @@ func BenchmarkWriteIndex(b *testing.B) {
"Foo",
0777,
1234567890,
424242,
[]BlockInfo{
{12345678, []byte("hash hash hash")},
{23456781, []byte("ash hash hashh")},
@@ -90,6 +93,7 @@ func BenchmarkWriteIndex(b *testing.B) {
"Quux/Quux",
0644,
2345678901,
323232,
[]BlockInfo{
{45678123, []byte("4321 hash hash hash")},
{56781234, []byte("3214 ash hash hashh")},
@@ -113,3 +117,23 @@ func BenchmarkWriteRequest(b *testing.B) {
wr.writeRequest(req)
}
}
func TestOptions(t *testing.T) {
opts := map[string]string{
"foo": "bar",
"someKey": "otherValue",
"hello": "",
"": "42",
}
var buf = new(bytes.Buffer)
var wr = marshalWriter{w: buf}
wr.writeOptions(opts)
var rd = marshalReader{r: buf}
var ropts = rd.readOptions()
if !reflect.DeepEqual(opts, ropts) {
t.Error("Incorrect options marshal/demarshal")
}
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"log"
"sync"
"time"
@@ -18,18 +19,25 @@ const (
messageTypePing = 4
messageTypePong = 5
messageTypeIndexUpdate = 6
messageTypeOptions = 7
)
const (
FlagDeleted = 1 << 12
FlagInvalid = 1 << 13
)
type FileInfo struct {
Name string
Flags uint32
Modified int64
Version uint32
Blocks []BlockInfo
}
type BlockInfo struct {
Length uint32
Hash []byte
Size uint32
Hash []byte
}
type Model interface {
@@ -38,7 +46,7 @@ type Model interface {
// An index update was received from the peer node
IndexUpdate(nodeID string, files []FileInfo)
// A request was made by the peer node
Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error)
Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error)
// The peer node closed the connection
Close(nodeID string, err error)
}
@@ -46,16 +54,18 @@ type Model interface {
type Connection struct {
sync.RWMutex
ID string
receiver Model
reader io.Reader
mreader *marshalReader
writer io.Writer
mwriter *marshalWriter
closed bool
awaiting map[int]chan asyncResult
nextId int
indexSent map[string]int64
id string
receiver Model
reader io.Reader
mreader *marshalReader
writer io.Writer
mwriter *marshalWriter
closed bool
awaiting map[int]chan asyncResult
nextId int
indexSent map[string][2]int64
options map[string]string
optionsLock sync.Mutex
hasSentIndex bool
hasRecvdIndex bool
@@ -75,7 +85,7 @@ const (
pingIdleTime = 5 * time.Minute
)
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) *Connection {
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) *Connection {
flrd := flate.NewReader(reader)
flwr, err := flate.NewWriter(writer, flate.BestSpeed)
if err != nil {
@@ -83,21 +93,39 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
}
c := Connection{
id: nodeID,
receiver: receiver,
reader: flrd,
mreader: &marshalReader{r: flrd},
writer: flwr,
mwriter: &marshalWriter{w: flwr},
awaiting: make(map[int]chan asyncResult),
ID: nodeID,
}
go c.readerLoop()
go c.pingerLoop()
if options != nil {
go func() {
c.Lock()
c.mwriter.writeHeader(header{0, c.nextId, messageTypeOptions})
c.mwriter.writeOptions(options)
err := c.flush()
if err != nil {
log.Printf("Warning:", err)
}
c.nextId++
c.Unlock()
}()
}
return &c
}
func (c *Connection) ID() string {
return c.id
}
// Index writes the list of file information to the connected peer node
func (c *Connection) Index(idx []FileInfo) {
c.Lock()
@@ -106,18 +134,18 @@ func (c *Connection) Index(idx []FileInfo) {
// This is the first time we send an index.
msgType = messageTypeIndex
c.indexSent = make(map[string]int64)
c.indexSent = make(map[string][2]int64)
for _, f := range idx {
c.indexSent[f.Name] = f.Modified
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
}
} else {
// We have sent one full index. Only send updates now.
msgType = messageTypeIndexUpdate
var diff []FileInfo
for _, f := range idx {
if modified, ok := c.indexSent[f.Name]; !ok || f.Modified != modified {
if vs, ok := c.indexSent[f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
diff = append(diff, f)
c.indexSent[f.Name] = f.Modified
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
}
}
idx = diff
@@ -131,16 +159,16 @@ func (c *Connection) Index(idx []FileInfo) {
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
return
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return
}
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *Connection) Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (c *Connection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -152,13 +180,13 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
c.mwriter.writeRequest(request{name, offset, size, hash})
if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return nil, c.mwriter.err
}
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return nil, err
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -171,7 +199,7 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
return res.val, res.err
}
func (c *Connection) Ping() bool {
func (c *Connection) ping() bool {
c.Lock()
if c.closed {
c.Unlock()
@@ -183,11 +211,11 @@ func (c *Connection) Ping() bool {
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return false
} else if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return false
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -197,9 +225,6 @@ func (c *Connection) Ping() bool {
return ok && res.err == nil
}
func (c *Connection) Stop() {
}
type flusher interface {
Flush() error
}
@@ -211,7 +236,7 @@ func (c *Connection) flush() error {
return nil
}
func (c *Connection) Close(err error) {
func (c *Connection) close(err error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -224,7 +249,7 @@ func (c *Connection) Close(err error) {
c.awaiting = nil
c.Unlock()
c.receiver.Close(c.ID, err)
c.receiver.Close(c.id, err)
}
func (c *Connection) isClosed() bool {
@@ -238,11 +263,11 @@ loop:
for {
hdr := c.mreader.readHeader()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
if hdr.version != 0 {
c.Close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
c.close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
break loop
}
@@ -250,10 +275,10 @@ loop:
case messageTypeIndex:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.Index(c.ID, files)
c.receiver.Index(c.id, files)
}
c.Lock()
c.hasRecvdIndex = true
@@ -262,16 +287,16 @@ loop:
case messageTypeIndexUpdate:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.IndexUpdate(c.ID, files)
c.receiver.IndexUpdate(c.id, files)
}
case messageTypeRequest:
req := c.mreader.readRequest()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
go c.processRequest(hdr.msgID, req)
@@ -280,7 +305,7 @@ loop:
data := c.mreader.readResponse()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.Lock()
@@ -300,10 +325,10 @@ loop:
err := c.flush()
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
break loop
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
break loop
}
@@ -321,27 +346,33 @@ loop:
c.Unlock()
}
case messageTypeOptions:
c.optionsLock.Lock()
c.options = c.mreader.readOptions()
c.optionsLock.Unlock()
default:
c.Close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
c.close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
break loop
}
}
}
func (c *Connection) processRequest(msgID int, req request) {
data, _ := c.receiver.Request(c.ID, req.name, req.offset, req.size, req.hash)
data, _ := c.receiver.Request(c.id, req.name, req.offset, req.size, req.hash)
c.Lock()
c.mwriter.writeUint32(encodeHeader(header{0, msgID, messageTypeResponse}))
c.mwriter.writeResponse(data)
err := c.flush()
err := c.mwriter.err
if err == nil {
err = c.flush()
}
c.Unlock()
buffers.Put(data)
if err != nil {
c.Close(err)
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(err)
}
}
@@ -356,15 +387,15 @@ func (c *Connection) pingerLoop() {
if ready {
go func() {
rc <- c.Ping()
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.Close(fmt.Errorf("Ping failure"))
c.close(fmt.Errorf("Ping failure"))
}
case <-time.After(pingTimeout):
c.Close(fmt.Errorf("Ping timeout"))
c.close(fmt.Errorf("Ping timeout"))
}
}
}
@@ -388,3 +419,9 @@ func (c *Connection) Statistics() Statistics {
return stats
}
func (c *Connection) Option(key string) string {
c.optionsLock.Lock()
defer c.optionsLock.Unlock()
return c.options[key]
}

View File

@@ -43,13 +43,13 @@ func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, nil)
c1 := NewConnection("c1", br, aw, nil)
c0 := NewConnection("c0", ar, bw, nil, nil)
c1 := NewConnection("c1", br, aw, nil, nil)
if ok := c0.Ping(); !ok {
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
}
if ok := c1.Ping(); !ok {
if ok := c1.ping(); !ok {
t.Error("c1 ping failed")
}
}
@@ -67,10 +67,10 @@ func TestPingErr(t *testing.T) {
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
c0 := NewConnection("c0", ar, ebw, m0)
NewConnection("c1", br, eaw, m1)
c0 := NewConnection("c0", ar, ebw, m0, nil)
NewConnection("c1", br, eaw, m1, nil)
res := c0.Ping()
res := c0.ping()
if (i < 4 || j < 4) && res {
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 8 && j >= 8) && !res {
@@ -94,8 +94,8 @@ func TestRequestResponseErr(t *testing.T) {
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
NewConnection("c0", ar, ebw, m0)
c1 := NewConnection("c1", br, eaw, m1)
NewConnection("c0", ar, ebw, m0, nil)
c1 := NewConnection("c1", br, eaw, m1, nil)
d, err := c1.Request("tn", 1234, 3456, []byte("hashbytes"))
if err == e || err == ErrClosed {
@@ -143,8 +143,8 @@ func TestVersionErr(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.mwriter.writeHeader(header{
version: 2,
@@ -165,8 +165,8 @@ func TestTypeErr(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.mwriter.writeHeader(header{
version: 0,
@@ -187,10 +187,10 @@ func TestClose(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.Close(nil)
c0.close(nil)
ok := c0.isClosed()
if !ok {
@@ -199,7 +199,7 @@ func TestClose(t *testing.T) {
// None of these should panic, some should return an error
ok = c0.Ping()
ok = c0.ping()
if ok {
t.Error("Ping should not return true")
}

17
tls.go
View File

@@ -3,7 +3,7 @@ package main
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
@@ -12,11 +12,12 @@ import (
"math/big"
"os"
"path"
"strings"
"time"
)
const (
tlsRSABits = 2048
tlsRSABits = 3072
tlsName = "syncthing"
)
@@ -25,13 +26,15 @@ func loadCert(dir string) (tls.Certificate, error) {
}
func certId(bs []byte) string {
hf := sha1.New()
hf := sha256.New()
hf.Write(bs)
id := hf.Sum(nil)
return base32.StdEncoding.EncodeToString(id)
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
}
func newCertificate(dir string) {
infoln("Generating RSA certificate and key...")
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
fatalErr(err)
@@ -47,7 +50,7 @@ func newCertificate(dir string) {
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
}
@@ -58,11 +61,11 @@ func newCertificate(dir string) {
fatalErr(err)
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
okln("Created TLS certificate file")
okln("Created RSA certificate file")
keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
fatalErr(err)
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
keyOut.Close()
okln("Created TLS key file")
okln("Created RSA key file")
}

52
usage.go Normal file
View File

@@ -0,0 +1,52 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"text/tabwriter"
)
func optionTable(w io.Writer, rows [][]string) {
tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0)
for _, row := range rows {
for i, cell := range row {
if i > 0 {
tw.Write([]byte("\t"))
}
tw.Write([]byte(cell))
}
tw.Write([]byte("\n"))
}
tw.Flush()
}
func usageFor(fs *flag.FlagSet, usage string) func() {
return func() {
var b bytes.Buffer
b.WriteString("Usage:\n " + usage + "\n")
var options [][]string
fs.VisitAll(func(f *flag.Flag) {
var dash = "-"
if len(f.Name) > 1 {
dash = "--"
}
var opt = " " + dash + f.Name
if f.DefValue != "false" {
opt += "=" + f.DefValue
}
options = append(options, []string{opt, f.Usage})
})
if len(options) > 0 {
b.WriteString("\nOptions:\n")
optionTable(&b, options)
}
fmt.Println(b.String())
}
}

View File

@@ -1,13 +1,6 @@
package main
import (
"fmt"
"time"
)
func timing(name string, t0 time.Time) {
debugf("%s: %.02f ms", name, time.Since(t0).Seconds()*1000)
}
import "fmt"
func MetricPrefix(n int) string {
if n > 1e9 {

154
walk.go
View File

@@ -1,154 +0,0 @@
package main
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Blocks BlockList
}
func (f File) Dump() {
fmt.Printf("%s\n", f.Name)
for _, b := range f.Blocks {
fmt.Printf(" %dB @ %d: %x\n", b.Length, b.Offset, b.Hash)
}
fmt.Println()
}
func (f File) Size() (bytes int) {
for _, b := range f.Blocks {
bytes += int(b.Length)
}
return
}
func isTempName(name string) bool {
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func genWalker(base string, res *[]File, model *Model) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
warnln(err)
return nil
}
if isTempName(p) {
return nil
}
if info.Mode()&os.ModeType == 0 {
rn, err := filepath.Rel(base, p)
if err != nil {
warnln(err)
return nil
}
fi, err := os.Stat(p)
if err != nil {
warnln(err)
return nil
}
modified := fi.ModTime().Unix()
hf, ok := model.LocalFile(rn)
if ok && hf.Modified == modified {
// No change
*res = append(*res, hf)
} else {
if opts.Debug.TraceFile {
debugf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
warnln(err)
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
warnln(err)
return nil
}
f := File{
Name: rn,
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
func Walk(dir string, model *Model, followSymlinks bool) []File {
var files []File
fn := genWalker(dir, &files, model)
err := filepath.Walk(dir, fn)
if err != nil {
warnln(err)
}
if !opts.NoSymlinks {
d, err := os.Open(dir)
if err != nil {
warnln(err)
return files
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
warnln(err)
return files
}
for _, fi := range fis {
if fi.Mode()&os.ModeSymlink != 0 {
err := filepath.Walk(path.Join(dir, fi.Name())+"/", fn)
if err != nil {
warnln(err)
}
}
}
}
return files
}
func cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if opts.Debug.TraceFile {
debugf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func CleanTempFiles(dir string) {
filepath.Walk(dir, cleanTempFile)
}

View File

@@ -1,42 +0,0 @@
package main
import (
"fmt"
"testing"
"time"
)
var testdata = []struct {
name string
size int
hash string
}{
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
{"baz/quux", 9, "c154d94e94ba7298a6adb0523afe34d1b6a581d6b893a763d45ddc5e209dcb83"},
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
}
func TestWalk(t *testing.T) {
m := new(Model)
files := Walk("testdata", m, false)
if l1, l2 := len(files), len(testdata); l1 != l2 {
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
}
for i := range testdata {
if n1, n2 := testdata[i].name, files[i].Name; n1 != n2 {
t.Errorf("Incorrect file name %q != %q for case #%d", n1, n2, i)
}
if h1, h2 := fmt.Sprintf("%x", files[i].Blocks[0].Hash), testdata[i].hash; h1 != h2 {
t.Errorf("Incorrect hash %q != %q for case #%d", h1, h2, i)
}
t0 := time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
t1 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
if mt := files[i].Modified; mt < t0 || mt > t1 {
t.Errorf("Unrealistic modtime %d for test %d", mt, i)
}
}
}