Compare commits

...

77 Commits

Author SHA1 Message Date
Jakob Borg
725f748b17 Find syncthing binary in $PATH when restarting (fixes #68) 2014-02-17 08:50:55 +01:00
Jakob Borg
f3a793ce91 Add peer node sync status in GUI (fixes #46) 2014-02-16 08:30:32 +01:00
Jakob Borg
c171780c0d Reorder locking to avoid deadlock (fixes #64) 2014-02-13 12:51:51 +01:00
Jakob Borg
5daf6ecf70 Actually embed GUI changes from 91d5c4a... 2014-02-13 09:27:06 +01:00
Jakob Borg
6c8135126d Initialize logging earlier (fix panic in tests) 2014-02-13 08:59:27 +01:00
Jakob Borg
91d5c4a1ae Show warnings in GUI (fixes #66) 2014-02-12 23:18:41 +01:00
Jakob Borg
2cbe81f1c7 Restart from web gui (fixes #50) 2014-02-12 12:10:44 +01:00
Jakob Borg
a26ce61d92 (Re)Fix locking around deleteFile (fixes #64) 2014-02-11 16:04:55 +01:00
Jakob Borg
478300f6d8 Only show address when connected (fixes #58)
The configured address is visible in the config dialog.
2014-02-11 14:34:47 +01:00
Jakob Borg
3a5b816125 Allow setting a friendly name for the local node (fixes #65) 2014-02-10 20:54:57 +01:00
Jakob Borg
b6814241cc Fix locking around deleteFile (fixes #64) 2014-02-09 23:24:55 +01:00
Jakob Borg
fc6eabea28 Enforce identical member configuration among nodes (fixes #63) 2014-02-09 23:13:06 +01:00
Jakob Borg
14b3791b2b Don't panic for legitimate file errors (fixes #55) 2014-02-09 22:41:30 +01:00
Jakob Borg
e6b29988e5 Logo 2014-02-07 22:33:58 +01:00
Jakob Borg
3cb7b8f22b Allow multiple listenAddresses (fixes #52) 2014-02-05 23:17:17 +01:00
Jakob Borg
2297e29502 Give friendly names to nodes (fixes #54) 2014-02-05 22:49:26 +01:00
Jakob Borg
ea41acfff5 Clarify status badges and fix column widths (fixes #53) 2014-02-05 22:42:23 +01:00
Jakob Borg
1aefc50e35 Always show local node, and summarize traffic stats (fixes #43) 2014-02-05 21:30:04 +01:00
Jakob Borg
9bd4fa5008 Make immediate write error only slightly less cryptic (fixes #51) 2014-02-05 20:58:39 +01:00
Jakob Borg
89c2f61b30 Reduce default verbosity now that the GUI is there 2014-02-03 16:22:15 +01:00
Jakob Borg
a1d575894a Edit configuration in GUI; use XML configuration 2014-02-03 15:42:59 +01:00
Jakob Borg
71def3a970 Don't include resource fork crap in builds (fixes #48) 2014-02-01 20:23:02 +01:00
Jakob Borg
13854250b3 Always show self in cluster list (fixes #43) 2014-02-01 11:22:41 +01:00
Jakob Borg
e6078f9449 Streamline build script 2014-02-01 10:10:07 +01:00
Jakob Borg
5980952495 Actually load index cache again (fixes #45) 2014-01-29 22:02:38 +01:00
Jakob Borg
618c376e18 Synchronize zero sized files (fixes #44) 2014-01-29 21:52:27 +01:00
Jakob Borg
d31a126408 CONTRIBUTING.md 2014-01-28 19:10:39 +01:00
Jakob Borg
6d3f8a2c06 Parallell -> parallel (ref #13) 2014-01-26 16:48:20 +01:00
Jakob Borg
b1ba976122 Move auto generated source to a package 2014-01-26 15:02:06 +01:00
Jakob Borg
81d5d1d4a6 Rework config/flags (fixes #13) 2014-01-26 14:45:03 +01:00
Jakob Borg
ea5ef28c5a Performance: improve need computation 2014-01-23 22:20:15 +01:00
Jakob Borg
fc2ebc6cad Performance: make filequeue not suck 2014-01-23 16:39:12 +01:00
Jakob Borg
01096fff6c Add version info to GUI (fixes #41) 2014-01-23 13:13:15 +01:00
Jakob Borg
2ea3558283 Add Options message to protocol 2014-01-23 13:12:45 +01:00
Jakob Borg
20a47695fb Create syncthing.ini template (fixes #39) 2014-01-22 14:28:14 +01:00
Jakob Borg
1dde9ec2d8 New file change suppression algorithm (fixes #30) 2014-01-22 12:52:27 +01:00
Jakob Borg
0841a46055 Don't crash on invalid options 2014-01-22 12:52:15 +01:00
Jakob Borg
84c0749d20 Slightly more compact GUI resources 2014-01-20 23:17:57 +01:00
Jakob Borg
6b02f9e44f Fix GUI files modtime (ish...) 2014-01-20 23:08:29 +01:00
Jakob Borg
84d7452f9e Use embed instead of nrsc, enables 'go get' 2014-01-20 23:01:38 +01:00
Jakob Borg
9b449cb527 Fix windows build (fixes #38) 2014-01-20 23:00:49 +01:00
Jakob Borg
d9ffd359e2 Tweak locking and integration test. 2014-01-20 22:22:27 +01:00
Jakob Borg
b67443eb40 Integration test 2014-01-20 07:38:57 +01:00
Jakob Borg
4ac204b604 Fine grained locking 2014-01-20 07:38:48 +01:00
Jakob Borg
fff50b5472 Delete deadlock 2014-01-14 17:47:27 -07:00
Jakob Borg
8d5aed410f Clear availability for disconnected node 2014-01-13 11:22:57 -07:00
Jakob Borg
ba0e4ded65 Deadlock fix and cleanups 2014-01-13 10:29:23 -07:00
Jakob Borg
f0b18685a5 Show 'this node' in GUI 2014-01-12 15:19:03 -07:00
Jakob Borg
fc2b557ae6 Don't print help twice 2014-01-12 14:47:04 -07:00
Jakob Borg
af399ae9f3 Cleanup ignore tests 2014-01-12 11:10:15 -07:00
Jakob Borg
45fcf4bc84 Implement new puller routine (fixes #33) 2014-01-12 11:02:16 -07:00
Jakob Borg
55f61ccb5e Simple send rate limit 2014-01-12 10:19:22 -07:00
Jakob Borg
b601fc5627 Don't build with CPU usage on Solaris 2014-01-10 15:32:30 +01:00
Jakob Borg
832c0ffad0 Report CPU/mem usage in GUI 2014-01-10 00:12:32 +01:00
Jakob Borg
cb33f27f23 Woops: reignore .stignore 2014-01-09 23:00:42 +01:00
Jakob Borg
92dee7c082 Only fetch deps, don't build 2014-01-09 23:00:23 +01:00
Jakob Borg
b9af45bc6b Prepopulate ignore patterns (fixes #21) 2014-01-09 22:46:01 +01:00
Jakob Borg
a18f6c6d90 Do go get as part of build unless fast build requested (fixes #31) 2014-01-09 21:22:05 +01:00
Jakob Borg
6e11e3cda9 Build for Linux on ARM (fixes #32) 2014-01-09 21:17:41 +01:00
Jakob Borg
2935aebe53 Benchmarking 2014-01-09 14:11:55 +01:00
Jakob Borg
71f78f0d62 Future proofing: handle file records with unknown flags 2014-01-09 11:04:42 +01:00
Jakob Borg
3e1194e5ff Show web GUI address on startup (fixes #27) 2014-01-09 10:40:12 +01:00
Jakob Borg
6d64992e64 Display alert on GUI connection error (fixes #26) 2014-01-09 10:31:27 +01:00
Jakob Borg
211180108e Tweak TLS settings (ref #23) 2014-01-09 09:30:22 +01:00
Jakob Borg
17e78d6f7e Option to show version (fixes #24) 2014-01-08 14:37:33 +01:00
Jakob Borg
1ef86379fb Actually send index updates for version bumps 2014-01-08 14:21:47 +01:00
Jakob Borg
884a7d6a1b Default to running GUI on 127.0.0.1:8080 2014-01-08 13:56:29 +01:00
Jakob Borg
334961fe10 Footer with links 2014-01-08 13:52:17 +01:00
Jakob Borg
2cfb24892f Add version and invalid bit to protocol 2014-01-07 22:44:21 +01:00
Jakob Borg
d4fe1400d2 Longer RSA key and stronger node ID hash (ref #23) 2014-01-07 22:04:30 +01:00
Jakob Borg
69ef4d261d Unbreak build script 2014-01-07 17:07:46 +01:00
Jakob Borg
91c102e4fe Syncronize file mode (fixes #20) 2014-01-07 16:38:07 +01:00
Jakob Borg
b4db177045 Allow deletes per default (fixes #19) 2014-01-07 16:15:18 +01:00
Jakob Borg
340c9095dd Suppress frequent changes to files (fixes #12) 2014-01-07 16:10:38 +01:00
Jakob Borg
e3bc33dc88 Move binary to build destination 2014-01-07 12:14:50 +01:00
Jakob Borg
eebc145055 Point to the wiki for documentation (fixes #10) 2014-01-07 12:07:56 +01:00
Jakob Borg
92b01fa48a Build tar file for current OS/architecture 2014-01-07 11:52:42 +01:00
58 changed files with 4135 additions and 10123 deletions

22
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,22 @@
Please do contribute!
## Building
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
## Tests
Yes please!
## Style
`go fmt`
## Documentation
[Hack it here](https://github.com/calmh/syncthing/wiki)
## License
MIT

175
README.md
View File

@@ -25,178 +25,11 @@ making sure large swarms of selfish agents behave and somehow work
towards a common goal. Here we have a much smaller swarm of cooperative
agents and a simpler approach will suffice.
Features
--------
Documentation
=============
> To request features and file bugs, see [the issue tracker][issues].
The following features are _currently implemented and working_:
* The formation of a cluster of nodes, certificate authenticated and
communicating over TLS over TCP.
* Synchronization of a single directory among the cluster nodes.
* Change detection by periodic scanning of the local repository.
* Static configuration of cluster nodes.
* Automatic discovery of cluster nodes. See [discover.go][discover.go]
for the protocol specification. Discovery on the LAN is performed by
broadcasts, Internet wide discovery is performed with the assistance
of a global server.
* Handling of deleted files. Deletes can be propagated or ignored per
client.
* Synchronizing multiple unrelated directory trees by following
symlinks directly below the repository level.
* HTTP GUI.
The following features are _not yet implemented but planned_:
* Change detection by listening to file system notifications instead of
periodic scanning.
The following features are _not implemented but may be implemented_ in
the future:
* Syncing multiple directories from the same syncthing instance.
* Automatic NAT handling via UPNP.
* Conflict resolution. Currently whichever file has the newest
modification time "wins". The correct behavior in the face of
conflicts is open for discussion.
[discover.go]: https://github.com/calmh/syncthing/blob/master/discover/discover.go
[issues]: https://github.com/calmh/syncthing/issues
Security
--------
Security is one of the primary project goals. This means that it should
not be possible for an attacker to join a cluster uninvited, and it
should not be possible to extract private information from intercepted
traffic. Currently this is implemented as follows.
All traffic is protected by TLS. To prevent uninvited nodes from joining
a cluster, the certificate fingerprint of each node is compared to a
preset list of acceptable nodes at connection establishment. The
fingerprint is computed as the SHA-1 hash of the certificate and
displayed in BASE32 encoding to form a compact yet convenient string.
Currently SHA-1 is deemed secure against preimage attacks.
Incoming requests for file data are verified to the extent that the
requested file name must exist in the local index and the global model.
Installing
==========
Download the appropriate precompiled binary from the
[releases](https://github.com/calmh/syncthing/releases) page. Untar and
put the `syncthing` binary somewhere convenient in your `$PATH`.
If you are a developer and have Go 1.2 installed you can also install
the latest version from source. `go get` works as expected but builds
a binary without GUI capabilities. Use the included `build.sh` script
without parameters to build a syncthing with GUI.
Usage
=====
Check out the options:
```
$ syncthing --help
Usage:
syncthing [options]
...
```
Run syncthing to let it create it's config directory and certificate:
```
$ syncthing
11:34:13 main.go:85: INFO: Version v0.1-40-gbb0fd87
11:34:13 tls.go:61: OK: Created TLS certificate file
11:34:13 tls.go:67: OK: Created TLS key file
11:34:13 main.go:66: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
11:34:13 main.go:90: FATAL: No config file
```
Take note of the "My ID: ..." line. Perform the same operation on
another computer to create another node. Take note of that ID as well,
and create a config file `~/.syncthing/syncthing.ini` looking something
like this:
```
[repository]
dir = /Users/jb/Synced
[nodes]
NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q = 172.16.32.1:22000 192.23.34.56:22000
CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL = dynamic
```
This assumes that the first node is reachable on either of the two
addresses listed (perhaps one internal and one port-forwarded external)
and that the other node is not normally reachable from the outside. Save
this config file, identically, to both nodes.
If the nodes are running on the same network, or reachable on port 22000
from the outside world, you can set all addresses to "dynamic" and they
will find each other using automatic discovery. (This discovery,
including port numbers, can be tweaked or disabled using command line
options.)
Start syncthing on both nodes. For the cautious, one side can be set to
be read only.
```
$ syncthing --ro
13:30:55 main.go:85: INFO: Version v0.1-40-gbb0fd87
13:30:55 main.go:102: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
13:30:55 main.go:149: INFO: Initial repository scan in progress
13:30:59 main.go:153: INFO: Listening for incoming connections
13:30:59 main.go:157: INFO: Attempting to connect to other nodes
13:30:59 main.go:247: INFO: Starting local discovery
13:30:59 main.go:165: OK: Ready to synchronize
13:31:04 discover.go:113: INFO: Discovered node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL at 172.16.32.24:22000
13:31:14 main.go:296: INFO: Connected to node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL
13:31:19 main.go:345: INFO: Transferred 139 KiB in (14 KiB/s), 139 KiB out (14 KiB/s)
13:32:20 model.go:94: INFO: CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL: 263.4 KB/s in, 69.1 KB/s out
13:32:20 model.go:104: INFO: 18289 files, 24.24 GB in cluster
13:32:20 model.go:111: INFO: 17132 files, 22.39 GB in local repo
13:32:20 model.go:117: INFO: 1157 files, 1.84 GB to synchronize
...
```
You should see the synchronization start and then finish a short while
later. Add nodes to taste.
GUI
---
The web based GUI is disabled per default. To enable and access it you
must start syncthing with the `--gui` command line option, giving a
listen address. For example:
```
$ syncthing --gui 127.0.0.1:8080
```
You then point your browser to the given address.
Excluding Files
---------------
syncthing looks for files named `.stignore` while walking the
repository. The file is expected to contain glob patterns of file names
to ignore. Patterns are matched on file name only and apply to files in
the same directory as the `.stignore` file and in directories lower down
in the hierarchy.
The syncthing documentation is kept on the
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
License
=======

BIN
assets/st-logo.pxm Normal file
View File

Binary file not shown.

6
auto/gui.files.go Normal file
View File

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
package model
package main
import (
"bytes"
@@ -7,15 +7,15 @@ import (
)
type Block struct {
Offset uint64
Length uint32
Offset int64
Size uint32
Hash []byte
}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset uint64
var offset int64
for {
lr := &io.LimitedReader{r, int64(blocksize)}
hf := sha256.New()
@@ -30,11 +30,20 @@ func Blocks(r io.Reader, blocksize int) ([]Block, error) {
b := Block{
Offset: offset,
Length: uint32(n),
Size: uint32(n),
Hash: hf.Sum(nil),
}
blocks = append(blocks, b)
offset += uint64(n)
offset += int64(n)
}
if len(blocks) == 0 {
// Empty file
blocks = append(blocks, Block{
Offset: 0,
Size: 0,
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
})
}
return blocks, nil

View File

@@ -1,4 +1,4 @@
package model
package main
import (
"bytes"
@@ -11,7 +11,8 @@ var blocksTestData = []struct {
blocksize int
hash []string
}{
{[]byte(""), 1024, []string{}},
{[]byte(""), 1024, []string{
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
{[]byte("contents"), 1024, []string{
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
{[]byte("contents"), 9, []string{
@@ -52,7 +53,7 @@ func TestBlocks(t *testing.T) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
} else {
i := 0
for off := uint64(0); off < uint64(len(test.data)); off += uint64(test.blocksize) {
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
}
@@ -61,8 +62,8 @@ func TestBlocks(t *testing.T) {
if rem := len(test.data) - int(off); bs > rem {
bs = rem
}
if int(blocks[i].Length) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Length, bs)
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
@@ -86,7 +87,7 @@ var diffTestData = []struct {
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
{"contents", "contants", 3, []Block{{3, 3, nil}}},
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
{"contents", "", 3, nil},
{"contents", "", 3, []Block{{0, 0, nil}}},
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
{"contents", "con", 3, nil},
@@ -106,8 +107,8 @@ func TestDiff(t *testing.T) {
if d[j].Offset != test.d[j].Offset {
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
}
if d[j].Length != test.d[j].Length {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Length, test.d[j].Length)
if d[j].Size != test.d[j].Size {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
}
}
}

View File

@@ -1,48 +1,51 @@
#!/bin/bash
export COPYFILE_DISABLE=true
version=$(git describe --always)
buildDir=dist
if [[ -z $1 ]] ; then
if [[ $fast != yes ]] ; then
go get -d
go test ./...
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui
else
go test ./... || exit 1
fi
if [[ -z $1 ]] ; then
go build -ldflags "-X main.Version $version"
elif [[ $1 == "embed" ]] ; then
embedder auto gui > auto/gui.files.go \
&& go build -ldflags "-X main.Version $version"
elif [[ $1 == "tar" ]] ; then
go build -ldflags "-X main.Version $version" \
&& mkdir syncthing-dist \
&& cp syncthing README.md LICENSE syncthing-dist \
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
&& rm -rf syncthing-dist
elif [[ $1 == "all" ]] ; then
rm -rf "$buildDir"
mkdir -p "$buildDir" || exit 1
for goos in darwin linux freebsd ; do
for goarch in amd64 386 ; do
echo "$goos-$goarch"
export GOOS="$goos"
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui \
&& mkdir -p "$name" \
&& cp syncthing "$buildDir/$name" \
&& cp README.md LICENSE "$name" \
&& mv syncthing "$name" \
&& tar zcf "$buildDir/$name.tar.gz" "$name" \
&& rm -r "$name"
done
done
for goos in windows ; do
for goarch in amd64 386 ; do
echo "$goos-$goarch"
export GOOS="$goos"
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing.exe gui \
&& mkdir -p "$name" \
&& cp syncthing.exe "$buildDir/$name.exe" \
&& cp README.md LICENSE "$name" \
&& zip -qr "$buildDir/$name.zip" "$name" \
&& rm -r "$name"
done
export GOARM=7
for os in darwin-amd64 linux-386 linux-amd64 linux-arm freebsd-386 freebsd-amd64 windows-386 windows-amd64 ; do
echo "$os"
export name="syncthing-$os"
export GOOS=${os%-*}
export GOARCH=${os#*-}
go build -ldflags "-X main.Version $version"
mkdir -p "$name"
cp README.md LICENSE "$name"
case $GOOS in
windows)
cp syncthing.exe "$buildDir/$name.exe"
mv syncthing.exe "$name"
zip -qr "$buildDir/$name.zip" "$name"
;;
*)
cp syncthing "$buildDir/$name"
mv syncthing "$name"
tar zcf "$buildDir/$name.tar.gz" "$name"
;;
esac
rm -r "$name"
done
fi

202
config.go Normal file
View File

@@ -0,0 +1,202 @@
package main
import (
"crypto/sha256"
"encoding/xml"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
)
type Configuration struct {
Version int `xml:"version,attr" default:"1"`
Repositories []RepositoryConfiguration `xml:"repository"`
Options OptionsConfiguration `xml:"options"`
XMLName xml.Name `xml:"configuration" json:"-"`
}
type RepositoryConfiguration struct {
Directory string `xml:"directory,attr"`
Nodes []NodeConfiguration `xml:"node"`
}
type NodeConfiguration struct {
NodeID string `xml:"id,attr"`
Name string `xml:"name,attr"`
Addresses []string `xml:"address"`
}
type OptionsConfiguration struct {
ListenAddress []string `xml:"listenAddress" default:":22000" ini:"listen-address"`
ReadOnly bool `xml:"readOnly" ini:"read-only"`
AllowDelete bool `xml:"allowDelete" default:"true" ini:"allow-delete"`
FollowSymlinks bool `xml:"followSymlinks" default:"true" ini:"follow-symlinks"`
GUIEnabled bool `xml:"guiEnabled" default:"true" ini:"gui-enabled"`
GUIAddress string `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"`
GlobalAnnServer string `xml:"globalAnnounceServer" default:"syncthing.nym.se:22025" ini:"global-announce-server"`
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true" ini:"global-announce-enabled"`
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true" ini:"local-announce-enabled"`
ParallelRequests int `xml:"parallelRequests" default:"16" ini:"parallel-requests"`
MaxSendKbps int `xml:"maxSendKbps" ini:"max-send-kbps"`
RescanIntervalS int `xml:"rescanIntervalS" default:"60" ini:"rescan-interval"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60" ini:"reconnection-interval"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"1000" ini:"max-change-bw"`
}
func setDefaults(data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
v := tag.Get("default")
if len(v) > 0 {
switch f.Interface().(type) {
case string:
f.SetString(v)
case []string:
rv := reflect.MakeSlice(reflect.TypeOf([]string{}), 1, 1)
rv.Index(0).SetString(v)
f.Set(rv)
case int:
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
f.SetInt(i)
case bool:
f.SetBool(v == "true")
default:
panic(f.Type())
}
}
}
return nil
}
func readConfigINI(m map[string]string, data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
name := tag.Get("ini")
if len(name) == 0 {
name = strings.ToLower(t.Field(i).Name)
}
if v, ok := m[name]; ok {
switch f.Interface().(type) {
case string:
f.SetString(v)
case int:
i, err := strconv.ParseInt(v, 10, 64)
if err == nil {
f.SetInt(i)
}
case bool:
f.SetBool(v == "true")
default:
panic(f.Type())
}
}
}
return nil
}
func writeConfigXML(wr io.Writer, cfg Configuration) error {
e := xml.NewEncoder(wr)
e.Indent("", " ")
err := e.Encode(cfg)
if err != nil {
return err
}
_, err = wr.Write([]byte("\n"))
return err
}
func uniqueStrings(ss []string) []string {
var m = make(map[string]bool, len(ss))
for _, s := range ss {
m[s] = true
}
var us = make([]string, 0, len(m))
for k := range m {
us = append(us, k)
}
return us
}
func readConfigXML(rd io.Reader) (Configuration, error) {
var cfg Configuration
setDefaults(&cfg)
setDefaults(&cfg.Options)
var err error
if rd != nil {
err = xml.NewDecoder(rd).Decode(&cfg)
}
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
return cfg, err
}
type NodeConfigurationList []NodeConfiguration
func (l NodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID < l[b].NodeID
}
func (l NodeConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l NodeConfigurationList) Len() int {
return len(l)
}
func clusterHash(nodes []NodeConfiguration) string {
sort.Sort(NodeConfigurationList(nodes))
h := sha256.New()
for _, n := range nodes {
h.Write([]byte(n.NodeID))
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func cleanNodeList(nodes []NodeConfiguration, myID string) []NodeConfiguration {
var myIDExists bool
for _, node := range nodes {
if node.NodeID == myID {
myIDExists = true
break
}
}
if !myIDExists {
nodes = append(nodes, NodeConfiguration{
NodeID: myID,
Addresses: []string{"dynamic"},
Name: "",
})
}
sort.Sort(NodeConfigurationList(nodes))
return nodes
}

View File

@@ -100,7 +100,6 @@ type Discoverer struct {
MyID string
ListenPort int
BroadcastIntv time.Duration
ExtListenPort int
ExtBroadcastIntv time.Duration
conn *net.UDPConn
@@ -114,7 +113,7 @@ type Discoverer struct {
// When we hit this many errors in succession, we stop.
const maxErrors = 30
func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discoverer, error) {
func NewDiscoverer(id string, port int, extServer string) (*Discoverer, error) {
local4 := &net.UDPAddr{IP: net.IP{0, 0, 0, 0}, Port: AnnouncementPort}
conn, err := net.ListenUDP("udp4", local4)
if err != nil {
@@ -125,7 +124,6 @@ func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discove
MyID: id,
ListenPort: port,
BroadcastIntv: 30 * time.Second,
ExtListenPort: extPort,
ExtBroadcastIntv: 1800 * time.Second,
conn: conn,
@@ -138,7 +136,7 @@ func NewDiscoverer(id string, port int, extPort int, extServer string) (*Discove
if disc.ListenPort > 0 {
disc.sendAnnouncements()
}
if len(disc.extServer) > 0 && disc.ExtListenPort > 0 {
if len(disc.extServer) > 0 {
disc.sendExtAnnouncements()
}
@@ -153,13 +151,13 @@ func (d *Discoverer) sendAnnouncements() {
}
func (d *Discoverer) sendExtAnnouncements() {
extIP, err := net.ResolveUDPAddr("udp", d.extServer+":22025")
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
log.Printf("discover/external: %v; no external announcements", err)
return
}
buf := EncodePacket(Packet{AnnouncementMagic, uint16(d.ExtListenPort), d.MyID, nil})
buf := EncodePacket(Packet{AnnouncementMagic, uint16(22000), d.MyID, nil})
go d.writeAnnouncements(buf, extIP, d.ExtBroadcastIntv)
}
@@ -213,7 +211,7 @@ func (d *Discoverer) recvAnnouncements() {
}
func (d *Discoverer) externalLookup(node string) (string, bool) {
extIP, err := net.ResolveUDPAddr("udp", d.extServer+":22025")
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil {
log.Printf("discover/external: %v; no external lookup", err)
return "", false

173
filemonitor.go Normal file
View File

@@ -0,0 +1,173 @@
package main
import (
"bytes"
"errors"
"fmt"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
)
type fileMonitor struct {
name string // in-repo name
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
copyError error
writeError error
}
func (m *fileMonitor) FileBegins(cc <-chan content) error {
if m.model.trace["file"] {
log.Printf("FILE: FileBegins: " + m.name)
}
tmp := tempName(m.path, m.global.Modified)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(dir, 0777)
if err != nil {
return err
}
}
outFile, err := os.Create(tmp)
if err != nil {
return err
}
m.writeDone.Add(1)
var writeWg sync.WaitGroup
if len(m.localBlocks) > 0 {
writeWg.Add(1)
inFile, err := os.Open(m.path)
if err != nil {
return err
}
// Copy local blocks, close infile when done
go m.copyLocalBlocks(inFile, outFile, &writeWg)
}
// Write remote blocks,
writeWg.Add(1)
go m.copyRemoteBlocks(cc, outFile, &writeWg)
// Wait for both writing routines, then close the outfile
go func() {
writeWg.Wait()
outFile.Close()
m.writeDone.Done()
}()
return nil
}
func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
defer inFile.Close()
defer writeWg.Done()
var buf = buffers.Get(BlockSize)
defer buffers.Put(buf)
for _, lb := range m.localBlocks {
buf = buf[:lb.Size]
_, err := inFile.ReadAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
_, err = outFile.WriteAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
}
}
func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
defer writeWg.Done()
for content := range cc {
_, err := outFile.WriteAt(content.data, content.offset)
buffers.Put(content.data)
if err != nil {
m.writeError = err
return
}
}
}
func (m *fileMonitor) FileDone() error {
if m.model.trace["file"] {
log.Printf("FILE: FileDone: " + m.name)
}
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
defer os.Remove(tmp)
if m.copyError != nil {
return m.copyError
}
if m.writeError != nil {
return m.writeError
}
err := hashCheck(tmp, m.global.Blocks)
if err != nil {
return err
}
err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0))
if err != nil {
return err
}
err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777))
if err != nil {
return err
}
err = os.Rename(tmp, m.path)
if err != nil {
return err
}
m.model.updateLocal(m.global)
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}

239
filequeue.go Normal file
View File

@@ -0,0 +1,239 @@
package main
import (
"log"
"sort"
"sync"
"time"
)
type Monitor interface {
FileBegins(<-chan content) error
FileDone() error
}
type FileQueue struct {
files queuedFileList
sorted bool
fmut sync.Mutex // protects files and sorted
availability map[string][]string
amut sync.Mutex // protects availability
queued map[string]bool
}
type queuedFile struct {
name string
blocks []Block
activeBlocks []bool
given int
remaining int
channel chan content
nodes []string
nodesChecked time.Time
monitor Monitor
}
type content struct {
offset int64
data []byte
}
type queuedFileList []queuedFile
func (l queuedFileList) Len() int { return len(l) }
func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
func (l queuedFileList) Less(a, b int) bool {
// Sort by most blocks already given out, then alphabetically
if l[a].given != l[b].given {
return l[a].given > l[b].given
}
return l[a].name < l[b].name
}
type queuedBlock struct {
name string
block Block
index int
}
func NewFileQueue() *FileQueue {
return &FileQueue{
availability: make(map[string][]string),
queued: make(map[string]bool),
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()
if q.queued[name] {
return
}
q.files = append(q.files, queuedFile{
name: name,
blocks: blocks,
activeBlocks: make([]bool, len(blocks)),
remaining: len(blocks),
channel: make(chan content),
monitor: monitor,
})
q.queued[name] = true
q.sorted = false
}
func (q *FileQueue) Len() int {
q.fmut.Lock()
defer q.fmut.Unlock()
return len(q.files)
}
func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) {
q.fmut.Lock()
defer q.fmut.Unlock()
if !q.sorted {
sort.Sort(q.files)
q.sorted = true
}
for i := range q.files {
qf := &q.files[i]
q.amut.Lock()
av := q.availability[qf.name]
q.amut.Unlock()
if len(av) == 0 {
// Noone has the file we want; abort.
if qf.remaining != len(qf.blocks) {
// We have already started on this file; close it down
close(qf.channel)
if mon := qf.monitor; mon != nil {
mon.FileDone()
}
}
delete(q.queued, qf.name)
q.deleteAt(i)
return queuedBlock{}, false
}
for _, ni := range av {
// Find and return the next block in the queue
if ni == nodeID {
for j, b := range qf.blocks {
if !qf.activeBlocks[j] {
qf.activeBlocks[j] = true
qf.given++
return queuedBlock{
name: qf.name,
block: b,
index: j,
}, true
}
}
break
}
}
}
// We found nothing to do
return queuedBlock{}, false
}
func (q *FileQueue) Done(file string, offset int64, data []byte) {
q.fmut.Lock()
defer q.fmut.Unlock()
c := content{
offset: offset,
data: data,
}
for i := range q.files {
qf := &q.files[i]
if qf.name == file {
if qf.monitor != nil && qf.remaining == len(qf.blocks) {
err := qf.monitor.FileBegins(qf.channel)
if err != nil {
log.Printf("WARNING: %s: %v (not synced)", qf.name, err)
delete(q.queued, qf.name)
q.deleteAt(i)
return
}
}
qf.channel <- c
qf.remaining--
if qf.remaining == 0 {
close(qf.channel)
if qf.monitor != nil {
err := qf.monitor.FileDone()
if err != nil {
log.Printf("WARNING: %s: %v", qf.name, err)
}
}
delete(q.queued, qf.name)
q.deleteAt(i)
}
return
}
}
// We found nothing, might have errored out already
}
func (q *FileQueue) QueuedFiles() (files []string) {
q.fmut.Lock()
defer q.fmut.Unlock()
for _, qf := range q.files {
files = append(files, qf.name)
}
return
}
func (q *FileQueue) deleteAt(i int) {
q.files = append(q.files[:i], q.files[i+1:]...)
}
func (q *FileQueue) deleteFile(n string) {
for i, file := range q.files {
if n == file.name {
q.deleteAt(i)
delete(q.queued, file.name)
return
}
}
}
func (q *FileQueue) SetAvailable(file string, nodes []string) {
q.amut.Lock()
defer q.amut.Unlock()
q.availability[file] = nodes
}
func (q *FileQueue) RemoveAvailable(toRemove string) {
q.fmut.Lock()
q.amut.Lock()
defer q.amut.Unlock()
defer q.fmut.Unlock()
for file, nodes := range q.availability {
for i, node := range nodes {
if node == toRemove {
q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])]
if len(q.availability[file]) == 0 {
q.deleteFile(file)
}
}
break
}
}
}

295
filequeue_test.go Normal file
View File

@@ -0,0 +1,295 @@
package main
import (
"reflect"
"sync"
"sync/atomic"
"testing"
)
func TestFileQueueAdd(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
}
func TestFileQueueAddSorting(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q = NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
t.Errorf("Incorrectly sorted get: %+v", b)
}
}
func TestFileQueueLen(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
q.Add("bar", nil, nil)
if l := q.Len(); l != 2 {
t.Errorf("Incorrect len %d != 2 after adds", l)
}
}
func TestFileQueueGet(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
// First get should return the first block of the first file
expected := queuedBlock{
name: "bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (first)\n E: %+v\n A: %+v", expected, actual)
}
// Second get should return the next block of the first file
expected = queuedBlock{
name: "bar",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (second)\n E: %+v\n A: %+v", expected, actual)
}
// Third get should return the first block of the second file
expected = queuedBlock{
name: "foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (third)\n E: %+v\n A: %+v", expected, actual)
}
}
/*
func TestFileQueueDone(t *testing.T) {
ch := make(chan content)
var recv sync.WaitGroup
recv.Add(1)
go func() {
content := <-ch
if bytes.Compare(content.data, []byte("first block bytes")) != 0 {
t.Error("Incorrect data in first content block")
}
content = <-ch
if bytes.Compare(content.data, []byte("second block bytes")) != 0 {
t.Error("Incorrect data in second content block")
}
_, ok := <-ch
if ok {
t.Error("Content channel not closed")
}
recv.Done()
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
b0, _ := q.Get("nodeID")
b1, _ := q.Get("nodeID")
q.Done(b0.name, b0.block.Offset, []byte("first block bytes"))
q.Done(b1.name, b1.block.Offset, []byte("second block bytes"))
recv.Wait()
// Queue should now have one file less
if l := q.Len(); l != 0 {
t.Error("Queue not empty")
}
_, ok := q.Get("nodeID")
if ok {
t.Error("Unexpected OK Get()")
}
}
*/
func TestFileQueueGetNodeIDs(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("b")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("a")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
}
func TestFileQueueThreadHandling(t *testing.T) {
// This should pass with go test -race
const n = 100
var total int
var blocks []Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
total += i
}
q := NewFileQueue()
q.Add("foo", blocks, nil)
q.SetAvailable("foo", []string{"nodeID"})
var start = make(chan bool)
var gotTot uint32
var wg sync.WaitGroup
wg.Add(n)
for i := 1; i <= n; i++ {
go func() {
<-start
b, _ := q.Get("nodeID")
atomic.AddUint32(&gotTot, uint32(b.block.Offset))
wg.Done()
}()
}
close(start)
wg.Wait()
if int(gotTot) != total {
t.Error("Total mismatch; %d != %d", gotTot, total)
}
}
func TestDeleteAt(t *testing.T) {
q := FileQueue{}
for i := 0; i < 4; i++ {
q.files = queuedFileList{{name: "a"}, {name: "b"}, {name: "c"}, {name: "d"}}
q.deleteAt(i)
if l := len(q.files); l != 3 {
t.Fatal("deleteAt(%d) failed; %d != 3", i, l)
}
}
q.files = queuedFileList{{name: "a"}}
q.deleteAt(0)
if l := len(q.files); l != 0 {
t.Fatal("deleteAt(only) failed; %d != 0", l)
}
}

136
gui.go
View File

@@ -2,34 +2,53 @@ package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"path/filepath"
"runtime"
"sync"
"time"
"bitbucket.org/tebeka/nrsc"
"github.com/calmh/syncthing/model"
"github.com/codegangsta/martini"
)
func startGUI(addr string, m *model.Model) {
type guiError struct {
Time time.Time
Error string
}
var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
)
func startGUI(addr string, m *Model) {
router := martini.NewRouter()
router.Get("/", getRoot)
router.Get("/rest/version", restGetVersion)
router.Get("/rest/model", restGetModel)
router.Get("/rest/connections", restGetConnections)
router.Get("/rest/config", restGetConfig)
router.Get("/rest/config/sync", restGetConfigInSync)
router.Get("/rest/need", restGetNeed)
router.Get("/rest/system", restGetSystem)
router.Get("/rest/errors", restGetErrors)
router.Post("/rest/config", restPostConfig)
router.Post("/rest/restart", restPostRestart)
router.Post("/rest/error", restPostError)
go func() {
mr := martini.New()
mr.Use(nrscStatic("gui"))
mr.Use(embeddedStatic())
mr.Use(martini.Recovery())
mr.Action(router.Handle)
mr.Map(m)
http.ListenAndServe(addr, mr)
err := http.ListenAndServe(addr, mr)
if err != nil {
warnln("GUI not possible:", err)
}
}()
}
@@ -41,7 +60,7 @@ func restGetVersion() string {
return Version
}
func restGetModel(m *model.Model, w http.ResponseWriter) {
func restGetModel(m *Model, w http.ResponseWriter) {
var res = make(map[string]interface{})
globalFiles, globalDeleted, globalBytes := m.GlobalSize()
@@ -60,21 +79,35 @@ func restGetModel(m *model.Model, w http.ResponseWriter) {
json.NewEncoder(w).Encode(res)
}
func restGetConnections(m *model.Model, w http.ResponseWriter) {
func restGetConnections(m *Model, w http.ResponseWriter) {
var res = m.ConnectionStats()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter) {
var res = make(map[string]interface{})
res["repository"] = config.OptionMap("repository")
res["nodes"] = config.OptionMap("nodes")
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
json.NewEncoder(w).Encode(cfg)
}
type guiFile model.File
func restPostConfig(req *http.Request) {
err := json.NewDecoder(req.Body).Decode(&cfg)
if err != nil {
log.Println(err)
} else {
saveConfig()
configInSync = false
}
}
func restGetConfigInSync(w http.ResponseWriter) {
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
}
func restPostRestart(req *http.Request) {
restart()
}
type guiFile File
func (f guiFile) MarshalJSON() ([]byte, error) {
type t struct {
@@ -83,11 +116,11 @@ func (f guiFile) MarshalJSON() ([]byte, error) {
}
return json.Marshal(t{
Name: f.Name,
Size: model.File(f).Size(),
Size: File(f).Size(),
})
}
func restGetNeed(m *model.Model, w http.ResponseWriter) {
func restGetNeed(m *Model, w http.ResponseWriter) {
files, _ := m.NeedFiles()
gfs := make([]guiFile, len(files))
for i, f := range files {
@@ -97,36 +130,43 @@ func restGetNeed(m *model.Model, w http.ResponseWriter) {
json.NewEncoder(w).Encode(gfs)
}
func nrscStatic(path string) interface{} {
if err := nrsc.Initialize(); err != nil {
panic("Unable to initialize nrsc: " + err.Error())
}
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
file := req.URL.Path
var cpuUsagePercent float64
var cpuUsageLock sync.RWMutex
// nrsc expects there not to be a leading slash
if file[0] == '/' {
file = file[1:]
}
func restGetSystem(w http.ResponseWriter) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
f := nrsc.Get(file)
if f == nil {
return
}
res := make(map[string]interface{})
res["myID"] = myID
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys
cpuUsageLock.RLock()
res["cpuPercent"] = cpuUsagePercent
cpuUsageLock.RUnlock()
rdr, err := f.Open()
if err != nil {
http.Error(res, "Internal Server Error", http.StatusInternalServerError)
}
defer rdr.Close()
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
if len(mtype) != 0 {
res.Header().Set("Content-Type", mtype)
}
res.Header().Set("Content-Size", fmt.Sprintf("%d", f.Size()))
res.Header().Set("Last-Modified", f.ModTime().UTC().Format(http.TimeFormat))
io.Copy(res, rdr)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
func restGetErrors(w http.ResponseWriter) {
guiErrorsMut.Lock()
json.NewEncoder(w).Encode(guiErrors)
guiErrorsMut.Unlock()
}
func restPostError(req *http.Request) {
bs, _ := ioutil.ReadAll(req.Body)
req.Body.Close()
showGuiError(string(bs))
}
func showGuiError(err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 {
guiErrors = guiErrors[len(guiErrors)-5:]
}
guiErrorsMut.Unlock()
}

View File

@@ -1,40 +1,126 @@
/*jslint browser: true, continue: true, plusplus: true */
/*global $: false, angular: false */
'use strict';
var syncthing = angular.module('syncthing', []);
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$http.get("/rest/version").success(function (data) {
var prevDate = 0,
modelGetOK = true;
$scope.connections = {};
$scope.config = {};
$scope.myID = '';
$scope.nodes = [];
$scope.configInSync = true;
$scope.errors = [];
$scope.seenError = '';
// Strings before bools look better
$scope.settings = [
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text', restart: true},
{id: 'GUIAddress', descr: 'GUI Listen Address', type: 'text', restart: true},
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KBps)', type: 'number', restart: true},
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number', restart: true},
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number', restart: true},
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number', restart: true},
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KBps)', type: 'number', restart: true},
{id: 'ReadOnly', descr: 'Read Only', type: 'bool', restart: true},
{id: 'AllowDelete', descr: 'Allow Delete', type: 'bool', restart: true},
{id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true},
{id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
{id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
];
function modelGetSucceeded() {
if (!modelGetOK) {
$('#networkError').modal('hide');
modelGetOK = true;
}
}
function modelGetFailed() {
if (modelGetOK) {
$('#networkError').modal({backdrop: 'static', keyboard: false});
modelGetOK = false;
}
}
function nodeCompare(a, b) {
if (a.NodeID === $scope.myID) {
return -1;
}
if (b.NodeID === $scope.myID) {
return 1;
}
if (a.NodeID < b.NodeID) {
return -1;
}
return a.NodeID > b.NodeID;
}
$http.get('/rest/version').success(function (data) {
$scope.version = data;
});
$http.get("/rest/config").success(function (data) {
$scope.config = data;
$http.get('/rest/system').success(function (data) {
$scope.system = data;
$scope.myID = data.myID;
$http.get('/rest/config').success(function (data) {
$scope.config = data;
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
var nodes = $scope.config.Repositories[0].Nodes;
nodes.sort(nodeCompare);
$scope.nodes = nodes;
});
$http.get('/rest/config/sync').success(function (data) {
$scope.configInSync = data.configInSync;
});
});
var prevDate = 0;
$scope.refresh = function () {
$http.get("/rest/model").success(function (data) {
$scope.model = data;
$http.get('/rest/system').success(function (data) {
$scope.system = data;
});
$http.get("/rest/connections").success(function (data) {
var now = Date.now();
var td = (now - prevDate) / 1000;
prevDate = now;
$http.get('/rest/model').success(function (data) {
$scope.model = data;
modelGetSucceeded();
}).error(function () {
modelGetFailed();
});
$http.get('/rest/connections').success(function (data) {
var now = Date.now(),
td = (now - prevDate) / 1000,
id;
for (var id in data) {
prevDate = now;
$scope.inbps = 0;
$scope.outbps = 0;
for (id in data) {
if (!data.hasOwnProperty(id)) {
continue;
}
try {
data[id].inbps = 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td;
data[id].outbps = 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td;
data[id].inbps = Math.max(0, 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td);
data[id].outbps = Math.max(0, 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td);
} catch (e) {
data[id].inbps = 0;
data[id].outbps = 0;
}
$scope.inbps += data[id].inbps;
$scope.outbps += data[id].outbps;
}
$scope.connections = data;
});
$http.get("/rest/need").success(function (data) {
$http.get('/rest/need').success(function (data) {
var i, name;
for (i = 0; i < data.length; i++) {
name = data[i].Name.split("/");
data[i].ShortName = name[name.length-1];
name = data[i].Name.split('/');
data[i].ShortName = name[name.length - 1];
}
data.sort(function (a, b) {
if (a.ShortName < b.ShortName) {
@@ -47,75 +133,290 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
});
$scope.need = data;
});
$http.get('/rest/errors').success(function (data) {
$scope.errors = data;
});
};
$scope.nodeStatus = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
if (conn.Completion === 100) {
return 'In Sync';
} else {
return 'Syncing (' + conn.Completion + '%)';
}
}
return 'Disconnected';
};
$scope.nodeIcon = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
if (conn.Completion === 100) {
return 'ok';
} else {
return 'refresh';
}
}
return 'minus';
};
$scope.nodeClass = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
if (conn.Completion === 100) {
return 'success';
} else {
return 'primary';
}
}
return 'info';
};
$scope.nodeAddr = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
return conn.Address;
}
return '(unknown address)';
};
$scope.nodeCompletion = function (nodeCfg) {
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
return conn.Completion + '%';
}
return '';
};
$scope.nodeVer = function (nodeCfg) {
if (nodeCfg.NodeID === $scope.myID) {
return $scope.version;
}
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
return conn.ClientVersion;
}
return '(unknown version)';
};
$scope.nodeName = function (nodeCfg) {
if (nodeCfg.Name) {
return nodeCfg.Name;
}
return nodeCfg.NodeID.substr(0, 6);
};
$scope.saveSettings = function () {
$scope.configInSync = false;
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$('#settingsTable').collapse('hide');
};
$scope.restart = function () {
$http.post('/rest/restart');
$scope.configInSync = true;
};
$scope.editNode = function (nodeCfg) {
$scope.currentNode = nodeCfg;
$scope.editingExisting = true;
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
$('#editNode').modal({backdrop: 'static', keyboard: false});
};
$scope.addNode = function () {
$scope.currentNode = {NodeID: '', AddressesStr: 'dynamic'};
$scope.editingExisting = false;
$('#editNode').modal({backdrop: 'static', keyboard: false});
};
$scope.deleteNode = function () {
var newNodes = [], i;
$('#editNode').modal('hide');
if (!$scope.editingExisting) {
return;
}
for (i = 0; i < $scope.nodes.length; i++) {
if ($scope.nodes[i].NodeID !== $scope.currentNode.NodeID) {
newNodes.push($scope.nodes[i]);
}
}
$scope.nodes = newNodes;
$scope.config.Repositories[0].Nodes = newNodes;
$scope.configInSync = false;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$scope.saveNode = function () {
var nodeCfg, done, i;
$scope.configInSync = false;
$('#editNode').modal('hide');
nodeCfg = $scope.currentNode;
nodeCfg.Addresses = nodeCfg.AddressesStr.split(',').map(function (x) { return x.trim(); });
done = false;
for (i = 0; i < $scope.nodes.length; i++) {
if ($scope.nodes[i].NodeID === nodeCfg.NodeID) {
$scope.nodes[i] = nodeCfg;
done = true;
break;
}
}
if (!done) {
$scope.nodes.push(nodeCfg);
}
$scope.nodes.sort(nodeCompare);
$scope.config.Repositories[0].Nodes = $scope.nodes;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$scope.otherNodes = function () {
var nodes = [], i, n;
for (i = 0; i < $scope.nodes.length; i++) {
n = $scope.nodes[i];
if (n.NodeID !== $scope.myID) {
nodes.push(n);
}
}
return nodes;
};
$scope.thisNode = function () {
var i, n;
for (i = 0; i < $scope.nodes.length; i++) {
n = $scope.nodes[i];
if (n.NodeID === $scope.myID) {
return [n];
}
}
};
$scope.errorList = function () {
var errors = [];
for (var i = 0; i < $scope.errors.length; i++) {
var e = $scope.errors[i];
if (e.Time > $scope.seenError) {
errors.push(e);
}
}
return errors;
};
$scope.clearErrors = function () {
$scope.seenError = $scope.errors[$scope.errors.length - 1].Time;
};
$scope.friendlyNodes = function (str) {
for (var i = 0; i < $scope.nodes.length; i++) {
var cfg = $scope.nodes[i];
str = str.replace(cfg.NodeID, $scope.nodeName(cfg));
}
return str;
};
$scope.refresh();
setInterval($scope.refresh, 10000);
});
function decimals(num) {
if (num > 100) {
function decimals(val, num) {
var digits, decs;
if (val === 0) {
return 0;
}
if (num > 10) {
return 1;
}
return 2;
digits = Math.floor(Math.log(Math.abs(val)) / Math.log(10));
decs = Math.max(0, num - digits);
return decs;
}
syncthing.filter('binary', function() {
return function(input) {
syncthing.filter('natural', function () {
return function (input, valid) {
return input.toFixed(decimals(input, valid));
};
});
syncthing.filter('binary', function () {
return function (input) {
if (input === undefined) {
return '- '
return '0 ';
}
if (input > 1024 * 1024 * 1024) {
input /= 1024 * 1024 * 1024;
return input.toFixed(decimals(input)) + ' Gi';
return input.toFixed(decimals(input, 2)) + ' Gi';
}
if (input > 1024 * 1024) {
input /= 1024 * 1024;
return input.toFixed(decimals(input)) + ' Mi';
return input.toFixed(decimals(input, 2)) + ' Mi';
}
if (input > 1024) {
input /= 1024;
return input.toFixed(decimals(input)) + ' Ki';
return input.toFixed(decimals(input, 2)) + ' Ki';
}
return Math.round(input) + ' ';
}
};
});
syncthing.filter('metric', function() {
return function(input) {
syncthing.filter('metric', function () {
return function (input) {
if (input === undefined) {
return '- '
return '0 ';
}
if (input > 1000 * 1000 * 1000) {
input /= 1000 * 1000 * 1000;
return input.toFixed(decimals(input)) + ' G';
return input.toFixed(decimals(input, 2)) + ' G';
}
if (input > 1000 * 1000) {
input /= 1000 * 1000;
return input.toFixed(decimals(input)) + ' M';
return input.toFixed(decimals(input, 2)) + ' M';
}
if (input > 1000) {
input /= 1000;
return input.toFixed(decimals(input)) + ' k';
return input.toFixed(decimals(input, 2)) + ' k';
}
return Math.round(input) + ' ';
}
};
});
syncthing.filter('short', function() {
return function(input) {
syncthing.filter('short', function () {
return function (input) {
return input.substr(0, 6);
}
};
});
syncthing.filter('alwaysNumber', function() {
return function(input) {
syncthing.filter('alwaysNumber', function () {
return function (input) {
if (input === undefined) {
return 0;
}
return input;
}
};
});
syncthing.directive('optionEditor', function () {
return {
restrict: 'C',
replace: true,
transclude: true,
scope: {
setting: '=setting',
},
template: '<input type="text" ng-model="config.Options[setting.id]"></input>',
};
});

View File

@@ -1,397 +0,0 @@
/*!
* Bootstrap v3.0.3 (http://getbootstrap.com)
* Copyright 2013 Twitter, Inc.
* Licensed under http://www.apache.org/licenses/LICENSE-2.0
*/
.btn-default,
.btn-primary,
.btn-success,
.btn-info,
.btn-warning,
.btn-danger {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
}
.btn-default:active,
.btn-primary:active,
.btn-success:active,
.btn-info:active,
.btn-warning:active,
.btn-danger:active,
.btn-default.active,
.btn-primary.active,
.btn-success.active,
.btn-info.active,
.btn-warning.active,
.btn-danger.active {
-webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
}
.btn:active,
.btn.active {
background-image: none;
}
.btn-default {
text-shadow: 0 1px 0 #fff;
background-image: -webkit-linear-gradient(top, #ffffff 0%, #e0e0e0 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #e0e0e0 100%);
background-repeat: repeat-x;
border-color: #dbdbdb;
border-color: #ccc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-default:hover,
.btn-default:focus {
background-color: #e0e0e0;
background-position: 0 -15px;
}
.btn-default:active,
.btn-default.active {
background-color: #e0e0e0;
border-color: #dbdbdb;
}
.btn-primary {
background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
background-repeat: repeat-x;
border-color: #2b669a;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-primary:hover,
.btn-primary:focus {
background-color: #2d6ca2;
background-position: 0 -15px;
}
.btn-primary:active,
.btn-primary.active {
background-color: #2d6ca2;
border-color: #2b669a;
}
.btn-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
background-repeat: repeat-x;
border-color: #3e8f3e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-success:hover,
.btn-success:focus {
background-color: #419641;
background-position: 0 -15px;
}
.btn-success:active,
.btn-success.active {
background-color: #419641;
border-color: #3e8f3e;
}
.btn-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
background-repeat: repeat-x;
border-color: #e38d13;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-warning:hover,
.btn-warning:focus {
background-color: #eb9316;
background-position: 0 -15px;
}
.btn-warning:active,
.btn-warning.active {
background-color: #eb9316;
border-color: #e38d13;
}
.btn-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
background-repeat: repeat-x;
border-color: #b92c28;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-danger:hover,
.btn-danger:focus {
background-color: #c12e2a;
background-position: 0 -15px;
}
.btn-danger:active,
.btn-danger.active {
background-color: #c12e2a;
border-color: #b92c28;
}
.btn-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
background-repeat: repeat-x;
border-color: #28a4c9;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-info:hover,
.btn-info:focus {
background-color: #2aabd2;
background-position: 0 -15px;
}
.btn-info:active,
.btn-info.active {
background-color: #2aabd2;
border-color: #28a4c9;
}
.thumbnail,
.img-thumbnail {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.dropdown-menu > li > a:hover,
.dropdown-menu > li > a:focus {
background-color: #e8e8e8;
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.dropdown-menu > .active > a,
.dropdown-menu > .active > a:hover,
.dropdown-menu > .active > a:focus {
background-color: #357ebd;
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.navbar-default {
background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);
background-repeat: repeat-x;
border-radius: 4px;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
}
.navbar-default .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
}
.navbar-brand,
.navbar-nav > li > a {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);
}
.navbar-inverse {
background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222222 100%);
background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.navbar-inverse .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #222222 0%, #282828 100%);
background-image: linear-gradient(to bottom, #222222 0%, #282828 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
}
.navbar-inverse .navbar-brand,
.navbar-inverse .navbar-nav > li > a {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
}
.navbar-static-top,
.navbar-fixed-top,
.navbar-fixed-bottom {
border-radius: 0;
}
.alert {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
}
.alert-success {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
background-repeat: repeat-x;
border-color: #b2dba1;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
}
.alert-info {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
background-repeat: repeat-x;
border-color: #9acfea;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
}
.alert-warning {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
background-repeat: repeat-x;
border-color: #f5e79e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
}
.alert-danger {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
background-repeat: repeat-x;
border-color: #dca7a7;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
}
.progress {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
}
.progress-bar {
background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
}
.progress-bar-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
}
.progress-bar-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
}
.progress-bar-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
}
.progress-bar-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
}
.list-group {
border-radius: 4px;
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.list-group-item.active,
.list-group-item.active:hover,
.list-group-item.active:focus {
text-shadow: 0 -1px 0 #3071a9;
background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
background-repeat: repeat-x;
border-color: #3278b3;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
}
.panel {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.panel-default > .panel-heading {
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.panel-primary > .panel-heading {
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.panel-success > .panel-heading {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
}
.panel-info > .panel-heading {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
}
.panel-warning > .panel-heading {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
}
.panel-danger > .panel-heading {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
}
.well {
background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
background-repeat: repeat-x;
border-color: #dcdcdc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
-webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
}

8
gui/bootstrap/css/bootstrap-theme.min.css vendored Normal file → Executable file
View File

File diff suppressed because one or more lines are too long

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because one or more lines are too long

0
gui/bootstrap/fonts/glyphicons-halflings-regular.eot Normal file → Executable file
View File

0
gui/bootstrap/fonts/glyphicons-halflings-regular.svg Normal file → Executable file
View File

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 61 KiB

0
gui/bootstrap/fonts/glyphicons-halflings-regular.ttf Normal file → Executable file
View File

0
gui/bootstrap/fonts/glyphicons-halflings-regular.woff Normal file → Executable file
View File

View File

File diff suppressed because it is too large Load Diff

8
gui/bootstrap/js/bootstrap.min.js vendored Normal file → Executable file
View File

File diff suppressed because one or more lines are too long

BIN
gui/favicon.png Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

View File

@@ -6,97 +6,270 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="">
<link rel="shortcut icon" href="../../docs-assets/ico/favicon.png">
<link rel="shortcut icon" href="favicon.png">
<title>syncthing</title>
<link href="bootstrap/css/bootstrap.css" rel="stylesheet">
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
<style type="text/css">
body {
padding-top: 20px;
padding-bottom: 20px;
}
.header {
border-bottom: 1px solid #e5e5e5;
padding-bottom: 10px;
body {
padding-top: 70px;
padding-bottom: 70px;
}
.text-monospace {
font-family: monospace;
}
.table-condensed>thead>tr>th, .table-condensed>tbody>tr>th, .table-condensed>tfoot>tr>th, .table-condensed>thead>tr>td, .table-condensed>tbody>tr>td, .table-condensed>tfoot>tr>td {
border-top: none;
}
thead tr th {
text-align: center;
}
.logo {
margin: 0;
padding: 0;
top: -5px;
position: relative;
}
</style>
</head>
<body ng-controller="SyncthingCtrl">
<div class="container">
<div class="header">
<h3 class="text-muted">syncthing&emsp;<small>|</small>&emsp;<small>{{version}}</small></h3>
</div>
<div class="row">
<div class="col-md-12">
<h2>Synchronization</h2>
<div class="progress">
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
{{100 * model.inSyncBytes / model.globalBytes | number:0}}%
</div>
</div>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
<div class="navbar navbar-fixed-top navbar-default">
<div class="container">
<a class="navbar-brand"><img class="logo" src="st-logo-128.png" width="32" height="32"> Syncthing</a>
<div ng-if="!configInSync">
<form class="navbar-form navbar-right">
<button type="button" class="btn btn-primary" ng-click="restart()">Restart Now</button>
</form>
<p class="navbar-text navbar-right">The configuration has been changed but not activated. Syncthing must restart to activate the new configuration.</p>
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-md-6">
<h1>Repository Status</h1>
<div class="col-md-12">
<div ng-if="errorList().length > 0" class="alert alert-warning">
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"hh:mm:ss.sss"}}:</small> {{friendlyNodes(err.Error)}}</p>
<button type="button" class="pull-right btn btn-warning" ng-click="clearErrors()">OK</button>
<div class="clearfix"></div>
</div>
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
<div ng-show="model.needFiles > 0">
<h2>Files to Synchronize</h2>
<table class="table table-condensed table-striped">
<tr ng-repeat="file in need track by $index">
<td><abbr title="{{file.Name}}">{{file.ShortName}}</abbr></td>
<td class="text-right">{{file.Size | binary}}B</td>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Cluster</h3></div>
<table class="table table-condensed">
<tbody>
<!-- myself -->
<tr class="text-muted" ng-repeat="nodeCfg in thisNode()">
<td style="width:12%">
<span class="label label-default">
<span class="glyphicon glyphicon-ok"></span> This node
</span>
</td>
<td style="width:10%">
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>
</td>
<td style="width:20%">{{version}}</td>
<td style="width:25%">(this node)</td>
<td style="width:9%" class="text-right">
{{inbps | metric}}bps
<span class="text-muted glyphicon glyphicon-chevron-down"></span>
</td>
<td style="width:9%" class="text-right">
{{outbps | metric}}bps
<span class="text-muted glyphicon glyphicon-chevron-up"></span>
</td>
<td style="width:7%" class="text-right">
<button type="button" ng-click="editNode(nodeCfg)" class="btn btn-default btn-xs"><span class="glyphicon glyphicon-pencil"></span> Edit</button>
</td>
</tr>
<!-- all other nodes -->
<tr ng-repeat="nodeCfg in otherNodes()">
<td>
<span class="label label-{{nodeClass(nodeCfg)}}">
<span class="glyphicon glyphicon-{{nodeIcon(nodeCfg)}}"></span> {{nodeStatus(nodeCfg)}}
</span>
</td>
<td>
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>
</td>
<td>{{nodeVer(nodeCfg)}}</td>
<td>{{nodeAddr(nodeCfg)}}</td>
<td class="text-right">
<abbr title="{{connections[nodeCfg.NodeID].InBytesTotal | binary}}B">{{connections[nodeCfg.NodeID].inbps | metric}}bps</abbr>
<span class="text-muted glyphicon glyphicon-chevron-down"></span>
</td>
<td class="text-right">
<abbr title="{{connections[nodeCfg.NodeID].OutBytesTotal | binary}}B">{{connections[nodeCfg.NodeID].outbps | metric}}bps</abbr>
<span class="text-muted glyphicon glyphicon-chevron-up"></span>
</td>
<td class="text-right">
<button type="button" ng-click="editNode(nodeCfg)" class="btn btn-default btn-xs"><span class="glyphicon glyphicon-pencil"></span> Edit</button>
</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td class="text-right">
<button type="button" class="btn btn-default btn-xs" ng-click="addNode()"><span class="glyphicon glyphicon-plus"></span> Add</button>
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<h1>Cluster Status</h1>
<table class="table table-condensed">
<tbody>
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node]}">
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
<td>
<span ng-show="!!connections[node]">
<span class="glyphicon glyphicon-link"></span>
{{connections[node].Address}}
</span>
<span ng-hide="!!connections[node]">
<span class="glyphicon glyphicon-cog"></span>
{{address}}
</span>
</td>
<td class="text-right">
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
</td>
<td class="text-right">
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
</td>
</tr>
</tbody>
</table>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Repository</h3></div>
<div class="panel-body">
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
</div>
</div>
</div>
<div class="col-md-6">
<div class="panel" ng-class="{'panel-success': model.needBytes === 0, 'panel-primary': model.needBytes !== 0}">
<div class="panel-heading"><h3 class="panel-title">Synchronization</h3></div>
<div class="panel-body">
<div class="progress">
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
{{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
</div>
</div>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title"><a href="" data-toggle="collapse" data-target="#system">System</a></h3></div>
<div id="system" class="panel-collapse collapse">
<div class="panel-body">
<p>{{system.sys | binary}}B RAM allocated, {{system.alloc | binary}}B in use</p>
<p>{{system.cpuPercent | alwaysNumber | natural:1}}% CPU, {{system.goroutines | alwaysNumber}} goroutines</p>
</div>
</div>
</div>
</div>
<div class="col-md-6">
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title"><a href="" data-toggle="collapse" data-target="#settingsTable">Settings</a></h3></div>
<div id="settingsTable" class="panel-collapse collapse">
<div class="panel-body">
<form role="form">
<div class="form-group" ng-repeat="setting in settings">
<div ng-if="setting.type == 'text' || setting.type == 'number'">
<label for="{{setting.id}}">{{setting.descr}}</label>
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.Options[setting.id]"></input>
</div>
<div class="checkbox" ng-if="setting.type == 'bool'">
<label>
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.Options[setting.id]"></input>
</label>
</div>
</div>
</form>
</div>
<div class="panel-footer">
<button type="button" class="btn btn-sm btn-default" ng-click="saveSettings()">Save</button>
<small><span class="text-muted">Changes take effect when restarting syncthing.</span></small>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="navbar navbar-default navbar-fixed-bottom">
<div class="container">
<p class="navbar-text">{{version}}</p>
<ul class="nav navbar-nav navbar-right">
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/releases">Latest Release</a></li>
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/wiki">Documentation</a></li>
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/issues">Bugs</a></li>
<li><a class="navbar-link" href="https://github.com/calmh/syncthing">Source Code</a></li>
</ul>
</p>
</div>
</div>
<div id="networkError" class="modal fade">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header alert alert-danger">
<h4 class="modal-title">
<span class="glyphicon glyphicon-exclamation-sign"></span>
Connection Error
</h4>
</div>
<div class="modal-body">
<p>
Syncthing seems to be down, or there is a problem with your Internet connection.
Retrying&hellip;
</p>
</div>
</div>
</div>
</div>
<div id="editNode" class="modal fade">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
<h4 class="modal-title">Edit Node</h4>
</div>
<div class="modal-body">
<form role="form">
<div class="form-group">
<label for="nodeID">Node ID</label>
<input placeholder="YUFJOUDPORCMA..." ng-disabled="editingExisting" id="nodeID" class="form-control" type="text" ng-model="currentNode.NodeID"></input>
<p class="help-block">The node ID can be found in the logs or in the "Add Node" dialog on the other node.</p>
</div>
<div class="form-group">
<label for="name">Name</label>
<input placeholder="Home Server" id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
<p class="help-block">Shown instead of Node ID in the cluster status.</p>
</div>
<div class="form-group">
<label for="addresses">Addresses</label>
<input placeholder="dynamic" ng-disabled="currentNode.NodeID == myID" id="addresses" class="form-control" type="text" ng-model="currentNode.AddressesStr"></input>
<p class="help-block">Enter comma separated <span class="text-monospace">ip:port</span> addresses or <span class="text-monospace">dynamic</span> to perform automatic discovery of the address.</p>
</div>
</form>
<div ng-show="!editingExisting">
When adding a new node, keep in mind that <em>this node</em> must be added on the other side too. The Node ID of this node is:
<pre>{{myID}}</pre>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary" ng-click="saveNode()">Save</button>
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left" ng-click="deleteNode()">Delete</button>
</div>
</div>
</div>
</div>
<script src="angular.min.js"></script>

BIN
gui/st-logo-128.png Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

9
gui_development.go Normal file
View File

@@ -0,0 +1,9 @@
//+build guidev
package main
import "github.com/codegangsta/martini"
func embeddedStatic() interface{} {
return martini.Static("gui")
}

46
gui_embedded.go Normal file
View File

@@ -0,0 +1,46 @@
//+build !guidev
package main
import (
"fmt"
"log"
"mime"
"net/http"
"path/filepath"
"time"
"github.com/calmh/syncthing/auto"
"github.com/cratonica/embed"
)
func embeddedStatic() interface{} {
fs, err := embed.Unpack(auto.Resources)
if err != nil {
panic(err)
}
var modt = time.Now().UTC().Format(http.TimeFormat)
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
file := req.URL.Path
if file[0] == '/' {
file = file[1:]
}
bs, ok := fs[file]
if !ok {
return
}
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
if len(mtype) != 0 {
res.Header().Set("Content-Type", mtype)
}
res.Header().Set("Content-Size", fmt.Sprintf("%d", len(bs)))
res.Header().Set("Last-Modified", modt)
res.Write(bs)
}
}

31
gui_unix.go Normal file
View File

@@ -0,0 +1,31 @@
//+build !windows,!solaris
package main
import (
"syscall"
"time"
)
func init() {
go trackCPUUsage()
}
func trackCPUUsage() {
var prevUsage int64
var prevTime = time.Now().UnixNano()
var rusage syscall.Rusage
for {
time.Sleep(10 * time.Second)
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
curTime := time.Now().UnixNano()
timeDiff := curTime - prevTime
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
usageDiff := curUsage - prevUsage
cpuUsageLock.Lock()
cpuUsagePercent = 100 * float64(usageDiff) / float64(timeDiff)
cpuUsageLock.Unlock()
prevTime = curTime
prevUsage = curUsage
}
}

5
integration/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
files-*
conf-*
md5-*
genfiles
md5r

42
integration/genfiles.go Normal file
View File

@@ -0,0 +1,42 @@
package main
import (
"crypto/rand"
"flag"
"fmt"
"io/ioutil"
mr "math/rand"
"os"
"path"
)
func name() string {
var b [16]byte
rand.Reader.Read(b[:])
return fmt.Sprintf("%x", b[:])
}
func main() {
var files int
var maxexp int
flag.IntVar(&files, "files", 1000, "Number of files")
flag.IntVar(&maxexp, "maxexp", 20, "Maximum file size (max = 2^n + 128*1024 B)")
flag.Parse()
for i := 0; i < files; i++ {
n := name()
p0 := path.Join(string(n[0]), n[0:2])
os.MkdirAll(p0, 0755)
s := 1 << uint(mr.Intn(maxexp))
a := 128 * 1024
if a > s {
a = s
}
s += mr.Intn(a)
b := make([]byte, s)
rand.Reader.Read(b)
p1 := path.Join(p0, n)
ioutil.WriteFile(p1, b, 0644)
}
}

59
integration/md5r.go Normal file
View File

@@ -0,0 +1,59 @@
package main
import (
"crypto/md5"
"flag"
"fmt"
"io"
"os"
"path/filepath"
)
func main() {
flag.Parse()
args := flag.Args()
if len(args) == 0 {
args = []string{"."}
}
for _, path := range args {
err := filepath.Walk(path, walker)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
}
func walker(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
sum, err := md5file(path)
if err != nil {
return err
}
fmt.Printf("%s %s\n", sum, path)
}
return nil
}
func md5file(fname string) (hash string, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := md5.New()
io.Copy(h, f)
hb := h.Sum(nil)
hash = fmt.Sprintf("%x", hb)
return
}

74
integration/test.sh Executable file
View File

@@ -0,0 +1,74 @@
#!/bin/bash
rm -rf files-* conf-* md5-*
extraopts=""
p=$(pwd)
go build genfiles.go
go build md5r.go
echo "Setting up (keys)..."
i1=$(syncthing --home conf-1 2>&1 | awk '/My ID/ {print $7}')
echo $i1
i2=$(syncthing --home conf-2 2>&1 | awk '/My ID/ {print $7}')
echo $i2
i3=$(syncthing --home conf-3 2>&1 | awk '/My ID/ {print $7}')
echo $i3
echo "Setting up (files)..."
for i in 1 2 3 ; do
cat >conf-$i/syncthing.ini <<EOT
[repository]
dir = $p/files-$i
[nodes]
$i1 = 127.0.0.1:22001
$i2 = 127.0.0.1:22002
$i3 = 127.0.0.1:22003
[settings]
gui-enabled = false
listen-address = :2200$i
EOT
mkdir files-$i
pushd files-$i >/dev/null
../genfiles -maxexp 21 -files 400
touch empty-$i
../md5r > ../md5-$i
popd >/dev/null
done
echo "Starting..."
for i in 1 2 3 ; do
sleep 1
syncthing --home conf-$i $extraopts &
done
cat md5-* | sort > md5-tot
while true ; do
read
echo Verifying...
conv=0
for i in 1 2 3 ; do
pushd files-$i >/dev/null
../md5r | sort > ../md5-$i
popd >/dev/null
if ! cmp md5-$i md5-tot >/dev/null ; then
echo $i unconverged
else
conv=$((conv + 1))
echo $i converged
fi
done
if [[ $conv == 3 ]] ; then
kill %1
kill %2
kill %3
exit
fi
done

View File

@@ -6,7 +6,12 @@ import (
"os"
)
var logger = log.New(os.Stderr, "", log.Ltime)
var logger *log.Logger
func init() {
log.SetOutput(os.Stderr)
logger = log.New(os.Stderr, "", log.Flags())
}
func debugln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
@@ -40,11 +45,13 @@ func okf(format string, vals ...interface{}) {
func warnln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
showGuiError(s)
logger.Output(2, "WARNING: "+s)
}
func warnf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
showGuiError(s)
logger.Output(2, "WARNING: "+s)
}

429
main.go
View File

@@ -3,98 +3,161 @@ package main
import (
"compress/gzip"
"crypto/tls"
"flag"
"fmt"
"log"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/exec"
"path"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/calmh/ini"
"github.com/calmh/syncthing/discover"
flags "github.com/calmh/syncthing/github.com/jessevdk/go-flags"
"github.com/calmh/syncthing/model"
"github.com/calmh/syncthing/protocol"
)
type Options struct {
ConfDir string `short:"c" long:"cfg" description:"Configuration directory" default:"~/.syncthing" value-name:"DIR"`
Listen string `short:"l" long:"listen" description:"Listen address" default:":22000" value-name:"ADDR"`
ReadOnly bool `short:"r" long:"ro" description:"Repository is read only"`
Delete bool `short:"d" long:"delete" description:"Delete files deleted from cluster"`
Rehash bool `long:"rehash" description:"Ignore cache and rehash all files in repository"`
NoSymlinks bool `long:"no-symlinks" description:"Don't follow first level symlinks in the repo"`
NoStats bool `long:"no-stats" description:"Don't print model and connection statistics"`
GUIAddr string `long:"gui" description:"GUI listen address" default:"" value-name:"ADDR"`
Discovery DiscoveryOptions `group:"Discovery Options"`
Advanced AdvancedOptions `group:"Advanced Options"`
Debug DebugOptions `group:"Debugging Options"`
}
type DebugOptions struct {
LogSource bool `long:"log-source"`
TraceModel []string `long:"trace-model" value-name:"TRACE" description:"idx, net, file, need"`
TraceConnect bool `long:"trace-connect"`
Profiler string `long:"profiler" value-name:"ADDR"`
}
type DiscoveryOptions struct {
ExternalServer string `long:"ext-server" description:"External discovery server" value-name:"NAME" default:"syncthing.nym.se"`
ExternalPort int `short:"e" long:"ext-port" description:"External listen port" value-name:"PORT" default:"22000"`
NoExternalDiscovery bool `short:"n" long:"no-ext-announce" description:"Do not announce presence externally"`
NoLocalDiscovery bool `short:"N" long:"no-local-announce" description:"Do not announce presence locally"`
}
type AdvancedOptions struct {
RequestsInFlight int `long:"reqs-in-flight" description:"Parallell in flight requests per file" default:"4" value-name:"REQS"`
FilesInFlight int `long:"files-in-flight" description:"Parallell in flight file pulls" default:"8" value-name:"FILES"`
ScanInterval time.Duration `long:"scan-intv" description:"Repository scan interval" default:"60s" value-name:"INTV"`
ConnInterval time.Duration `long:"conn-intv" description:"Node reconnect interval" default:"60s" value-name:"INTV"`
}
var opts Options
var cfg Configuration
var Version string = "unknown-dev"
const (
confFileName = "syncthing.ini"
var (
myID string
config ini.Config
)
var (
config ini.Config
nodeAddrs = make(map[string][]string)
showVersion bool
confDir string
trace string
profiler string
verbose bool
startupDelay int
)
func main() {
_, err := flags.Parse(&opts)
if err != nil {
flag.StringVar(&confDir, "home", "~/.syncthing", "Set configuration directory")
flag.StringVar(&trace, "debug.trace", "", "(connect,net,idx,file,pull)")
flag.StringVar(&profiler, "debug.profiler", "", "(addr)")
flag.BoolVar(&showVersion, "version", false, "Show version")
flag.BoolVar(&verbose, "v", false, "Be more verbose")
flag.IntVar(&startupDelay, "delay", 0, "Startup delay (s)")
flag.Usage = usageFor(flag.CommandLine, "syncthing [options]")
flag.Parse()
if startupDelay > 0 {
time.Sleep(time.Duration(startupDelay) * time.Second)
}
if showVersion {
fmt.Println(Version)
os.Exit(0)
}
if len(opts.Debug.TraceModel) > 0 || opts.Debug.LogSource {
logger = log.New(os.Stderr, "", log.Lshortfile|log.Ldate|log.Ltime|log.Lmicroseconds)
}
opts.ConfDir = expandTilde(opts.ConfDir)
infoln("Version", Version)
if len(os.Getenv("GOGC")) == 0 {
debug.SetGCPercent(25)
}
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if len(trace) > 0 {
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
logger.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
}
confDir = expandTilde(confDir)
// Ensure that our home directory exists and that we have a certificate and key.
ensureDir(opts.ConfDir, 0700)
cert, err := loadCert(opts.ConfDir)
ensureDir(confDir, 0700)
cert, err := loadCert(confDir)
if err != nil {
newCertificate(opts.ConfDir)
cert, err = loadCert(opts.ConfDir)
newCertificate(confDir)
cert, err = loadCert(confDir)
fatalErr(err)
}
myID := string(certId(cert.Certificate[0]))
myID = string(certId(cert.Certificate[0]))
log.SetPrefix("[" + myID[0:5] + "] ")
logger.SetPrefix("[" + myID[0:5] + "] ")
infoln("Version", Version)
infoln("My ID:", myID)
if opts.Debug.Profiler != "" {
// Prepare to be able to save configuration
cfgFile := path.Join(confDir, "config.xml")
go saveConfigLoop(cfgFile)
// Load the configuration file, if it exists.
// If it does not, create a template.
cf, err := os.Open(cfgFile)
if err == nil {
// Read config.xml
cfg, err = readConfigXML(cf)
if err != nil {
fatalln(err)
}
cf.Close()
} else {
// No config.xml, let's try the old syncthing.ini
iniFile := path.Join(confDir, "syncthing.ini")
cf, err := os.Open(iniFile)
if err == nil {
infoln("Migrating syncthing.ini to config.xml")
iniCfg := ini.Parse(cf)
cf.Close()
os.Rename(iniFile, path.Join(confDir, "migrated_syncthing.ini"))
cfg, _ = readConfigXML(nil)
cfg.Repositories = []RepositoryConfiguration{
{Directory: iniCfg.Get("repository", "dir")},
}
readConfigINI(iniCfg.OptionMap("settings"), &cfg.Options)
for name, addrs := range iniCfg.OptionMap("nodes") {
n := NodeConfiguration{
NodeID: name,
Addresses: strings.Fields(addrs),
}
cfg.Repositories[0].Nodes = append(cfg.Repositories[0].Nodes, n)
}
saveConfig()
}
}
if len(cfg.Repositories) == 0 {
infoln("No config file; starting with empty defaults")
cfg, err = readConfigXML(nil)
cfg.Repositories = []RepositoryConfiguration{
{
Directory: "~/Sync",
Nodes: []NodeConfiguration{
{NodeID: myID, Addresses: []string{"dynamic"}},
},
},
}
saveConfig()
infof("Edit %s to taste or use the GUI\n", cfgFile)
}
// Make sure the local node is in the node list.
cfg.Repositories[0].Nodes = cleanNodeList(cfg.Repositories[0].Nodes, myID)
var dir = expandTilde(cfg.Repositories[0].Directory)
if len(profiler) > 0 {
go func() {
err := http.ListenAndServe(opts.Debug.Profiler, nil)
err := http.ListenAndServe(profiler, nil)
if err != nil {
warnln(err)
}
@@ -104,87 +167,99 @@ func main() {
// The TLS configuration is used for both the listening socket and outgoing
// connections.
cfg := &tls.Config{
ClientAuth: tls.RequestClientCert,
ServerName: "syncthing",
NextProtos: []string{"bep/1.0"},
InsecureSkipVerify: true,
Certificates: []tls.Certificate{cert},
}
// Load the configuration file, if it exists.
cf, err := os.Open(path.Join(opts.ConfDir, confFileName))
if err != nil {
fatalln("No config file")
config = ini.Config{}
}
config = ini.Parse(cf)
cf.Close()
var dir = expandTilde(config.Get("repository", "dir"))
// Create a map of desired node connections based on the configuration file
// directives.
for nodeID, addrs := range config.OptionMap("nodes") {
addrs := strings.Fields(addrs)
nodeAddrs[nodeID] = addrs
tlsCfg := &tls.Config{
Certificates: []tls.Certificate{cert},
NextProtos: []string{"bep/1.0"},
ServerName: myID,
ClientAuth: tls.RequestClientCert,
SessionTicketsDisabled: true,
InsecureSkipVerify: true,
MinVersion: tls.VersionTLS12,
}
ensureDir(dir, -1)
m := model.NewModel(dir)
for _, t := range opts.Debug.TraceModel {
m := NewModel(dir, cfg.Options.MaxChangeKbps*1000)
for _, t := range strings.Split(trace, ",") {
m.Trace(t)
}
if cfg.Options.MaxSendKbps > 0 {
m.LimitRate(cfg.Options.MaxSendKbps)
}
// GUI
if opts.GUIAddr != "" {
startGUI(opts.GUIAddr, m)
if cfg.Options.GUIEnabled && cfg.Options.GUIAddress != "" {
host, port, err := net.SplitHostPort(cfg.Options.GUIAddress)
if err != nil {
warnf("Cannot start GUI on %q: %v", cfg.Options.GUIAddress, err)
} else {
if len(host) > 0 {
infof("Starting web GUI on http://%s", cfg.Options.GUIAddress)
} else {
infof("Starting web GUI on port %s", port)
}
startGUI(cfg.Options.GUIAddress, m)
}
}
// Walk the repository and update the local model before establishing any
// connections to other nodes.
if !opts.Rehash {
infoln("Loading index cache")
loadIndex(m)
if verbose {
infoln("Populating repository index")
}
infoln("Populating repository index")
loadIndex(m)
updateLocalModel(m)
connOpts := map[string]string{
"clientId": "syncthing",
"clientVersion": Version,
"clusterHash": clusterHash(cfg.Repositories[0].Nodes),
}
// Routine to listen for incoming connections
infoln("Listening for incoming connections")
go listen(myID, opts.Listen, m, cfg)
if verbose {
infoln("Listening for incoming connections")
}
for _, addr := range cfg.Options.ListenAddress {
go listen(myID, addr, m, tlsCfg, connOpts)
}
// Routine to connect out to configured nodes
infoln("Attempting to connect to other nodes")
go connect(myID, opts.Listen, nodeAddrs, m, cfg)
if verbose {
infoln("Attempting to connect to other nodes")
}
disc := discovery(cfg.Options.ListenAddress[0])
go connect(myID, disc, m, tlsCfg, connOpts)
// Routine to pull blocks from other nodes to synchronize the local
// repository. Does not run when we are in read only (publish only) mode.
if !opts.ReadOnly {
if opts.Delete {
infoln("Deletes from peer nodes are allowed")
} else {
infoln("Deletes from peer nodes will be ignored")
if !cfg.Options.ReadOnly {
if verbose {
if cfg.Options.AllowDelete {
infoln("Deletes from peer nodes are allowed")
} else {
infoln("Deletes from peer nodes will be ignored")
}
okln("Ready to synchronize (read-write)")
}
okln("Ready to synchronize (read-write)")
m.StartRW(opts.Delete, opts.Advanced.FilesInFlight, opts.Advanced.RequestsInFlight)
} else {
m.StartRW(cfg.Options.AllowDelete, cfg.Options.ParallelRequests)
} else if verbose {
okln("Ready to synchronize (read only; no external updates accepted)")
}
// Periodically scan the repository and update the local model.
// Periodically scan the repository and update the local
// XXX: Should use some fsnotify mechanism.
go func() {
td := time.Duration(cfg.Options.RescanIntervalS) * time.Second
for {
time.Sleep(opts.Advanced.ScanInterval)
updateLocalModel(m)
time.Sleep(td)
if m.LocalAge() > (td / 2).Seconds() {
updateLocalModel(m)
}
}
}()
if !opts.NoStats {
if verbose {
// Periodically print statistics
go printStatsLoop(m)
}
@@ -192,9 +267,72 @@ func main() {
select {}
}
func printStatsLoop(m *model.Model) {
func restart() {
infoln("Restarting")
args := os.Args
doAppend := true
for _, arg := range args {
if arg == "-delay" {
doAppend = false
break
}
}
if doAppend {
args = append(args, "-delay", "2")
}
pgm, err := exec.LookPath(os.Args[0])
if err != nil {
warnln(err)
return
}
proc, err := os.StartProcess(pgm, args, &os.ProcAttr{
Env: os.Environ(),
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
})
if err != nil {
fatalln(err)
}
proc.Release()
os.Exit(0)
}
var saveConfigCh = make(chan struct{})
func saveConfigLoop(cfgFile string) {
for _ = range saveConfigCh {
fd, err := os.Create(cfgFile + ".tmp")
if err != nil {
warnln(err)
continue
}
err = writeConfigXML(fd, cfg)
if err != nil {
warnln(err)
fd.Close()
continue
}
err = fd.Close()
if err != nil {
warnln(err)
continue
}
err = os.Rename(cfgFile+".tmp", cfgFile)
if err != nil {
warnln(err)
}
}
}
func saveConfig() {
saveConfigCh <- struct{}{}
}
func printStatsLoop(m *Model) {
var lastUpdated int64
var lastStats = make(map[string]model.ConnectionInfo)
var lastStats = make(map[string]ConnectionInfo)
for {
time.Sleep(60 * time.Second)
@@ -205,7 +343,7 @@ func printStatsLoop(m *model.Model) {
outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs)
if inbps+outbps > 0 {
infof("%s: %sb/s in, %sb/s out", node, MetricPrefix(inbps), MetricPrefix(outbps))
infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(inbps), MetricPrefix(outbps))
}
lastStats[node] = stats
@@ -223,8 +361,11 @@ func printStatsLoop(m *model.Model) {
}
}
func listen(myID string, addr string, m *model.Model, cfg *tls.Config) {
l, err := tls.Listen("tcp", addr, cfg)
func listen(myID string, addr string, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
if strings.Contains(trace, "connect") {
debugln("NET: Listening on", addr)
}
l, err := tls.Listen("tcp", addr, tlsCfg)
fatalErr(err)
listen:
@@ -235,7 +376,7 @@ listen:
continue
}
if opts.Debug.TraceConnect {
if strings.Contains(trace, "connect") {
debugln("NET: Connect from", conn.RemoteAddr())
}
@@ -259,9 +400,10 @@ listen:
warnf("Connect from connected node (%s)", remoteID)
}
for nodeID := range nodeAddrs {
if nodeID == remoteID {
m.AddConnection(conn, remoteID)
for _, nodeCfg := range cfg.Repositories[0].Nodes {
if nodeCfg.NodeID == remoteID {
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
m.AddConnection(conn, protoConn)
continue listen
}
}
@@ -269,85 +411,90 @@ listen:
}
}
func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.Model, cfg *tls.Config) {
func discovery(addr string) *discover.Discoverer {
_, portstr, err := net.SplitHostPort(addr)
fatalErr(err)
port, _ := strconv.Atoi(portstr)
if opts.Discovery.NoLocalDiscovery {
if !cfg.Options.LocalAnnEnabled {
port = -1
} else {
} else if verbose {
infoln("Sending local discovery announcements")
}
if opts.Discovery.NoExternalDiscovery {
opts.Discovery.ExternalPort = -1
} else {
if !cfg.Options.GlobalAnnEnabled {
cfg.Options.GlobalAnnServer = ""
} else if verbose {
infoln("Sending external discovery announcements")
}
disc, err := discover.NewDiscoverer(myID, port, opts.Discovery.ExternalPort, opts.Discovery.ExternalServer)
disc, err := discover.NewDiscoverer(myID, port, cfg.Options.GlobalAnnServer)
if err != nil {
warnf("No discovery possible (%v)", err)
}
return disc
}
func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
for {
nextNode:
for nodeID, addrs := range nodeAddrs {
if nodeID == myID {
for _, nodeCfg := range cfg.Repositories[0].Nodes {
if nodeCfg.NodeID == myID {
continue
}
if m.ConnectedTo(nodeID) {
if m.ConnectedTo(nodeCfg.NodeID) {
continue
}
for _, addr := range addrs {
for _, addr := range nodeCfg.Addresses {
if addr == "dynamic" {
var ok bool
if disc != nil {
addr, ok = disc.Lookup(nodeID)
addr, ok = disc.Lookup(nodeCfg.NodeID)
}
if !ok {
continue
}
}
if opts.Debug.TraceConnect {
debugln("NET: Dial", nodeID, addr)
if strings.Contains(trace, "connect") {
debugln("NET: Dial", nodeCfg.NodeID, addr)
}
conn, err := tls.Dial("tcp", addr, cfg)
conn, err := tls.Dial("tcp", addr, tlsCfg)
if err != nil {
if opts.Debug.TraceConnect {
if strings.Contains(trace, "connect") {
debugln("NET:", err)
}
continue
}
remoteID := certId(conn.ConnectionState().PeerCertificates[0].Raw)
if remoteID != nodeID {
warnln("Unexpected nodeID", remoteID, "!=", nodeID)
if remoteID != nodeCfg.NodeID {
warnln("Unexpected nodeID", remoteID, "!=", nodeCfg.NodeID)
conn.Close()
continue
}
m.AddConnection(conn, remoteID)
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
m.AddConnection(conn, protoConn)
continue nextNode
}
}
time.Sleep(opts.Advanced.ConnInterval)
time.Sleep(time.Duration(cfg.Options.ReconnectIntervalS) * time.Second)
}
}
func updateLocalModel(m *model.Model) {
files := m.FilteredWalk(!opts.NoSymlinks)
func updateLocalModel(m *Model) {
files, _ := m.Walk(cfg.Options.FollowSymlinks)
m.ReplaceLocal(files)
saveIndex(m)
}
func saveIndex(m *model.Model) {
func saveIndex(m *Model) {
name := m.RepoID() + ".idx.gz"
fullName := path.Join(opts.ConfDir, name)
fullName := path.Join(confDir, name)
idxf, err := os.Create(fullName + ".tmp")
if err != nil {
return
@@ -361,9 +508,9 @@ func saveIndex(m *model.Model) {
os.Rename(fullName+".tmp", fullName)
}
func loadIndex(m *model.Model) {
func loadIndex(m *Model) {
name := m.RepoID() + ".idx.gz"
idxf, err := os.Open(path.Join(opts.ConfDir, name))
idxf, err := os.Open(path.Join(confDir, name))
if err != nil {
return
}

914
model.go Normal file
View File

@@ -0,0 +1,914 @@
package main
import (
"crypto/sha1"
"errors"
"fmt"
"io"
"net"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
)
type Model struct {
dir string
global map[string]File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
pmut sync.RWMutex // protects protoConn and rawConn
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
lastIdxBcast time.Time
lastIdxBcastRequest time.Time
umut sync.RWMutex // provides updated* and lastIdx*
rwRunning bool
delete bool
initmut sync.Mutex // protects rwRunning and delete
trace map[string]bool
sup suppressor
parallelRequests int
limitRequestRate chan struct{}
imut sync.Mutex // protects Index
}
type Connection interface {
ID() string
Index([]protocol.FileInfo)
Request(name string, offset int64, size uint32, hash []byte) ([]byte, error)
Statistics() protocol.Statistics
Option(key string) string
}
const (
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
minFileHoldTimeS = 60 // Never allow file changes more often than this
maxFileHoldTimeS = 600 // Always allow file changes at least this often
)
var (
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
)
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
// for file data without altering the local repository in any way.
func NewModel(dir string, maxChangeBw int) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
sup: suppressor{threshold: int64(maxChangeBw)},
fq: NewFileQueue(),
dq: make(chan File),
}
go m.broadcastIndexLoop()
return m
}
func (m *Model) LimitRate(kbps int) {
m.limitRequestRate = make(chan struct{}, kbps)
n := kbps/10 + 1
go func() {
for {
time.Sleep(100 * time.Millisecond)
for i := 0; i < n; i++ {
select {
case m.limitRequestRate <- struct{}{}:
}
}
}
}()
}
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
func (m *Model) Trace(t string) {
m.trace[t] = true
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
func (m *Model) StartRW(del bool, threads int) {
m.initmut.Lock()
defer m.initmut.Unlock()
if m.rwRunning {
panic("starting started model")
}
m.rwRunning = true
m.delete = del
m.parallelRequests = threads
go m.cleanTempFiles()
if del {
go m.deleteLoop()
}
}
// Generation returns an opaque integer that is guaranteed to increment on
// every change to the local repository or global model.
func (m *Model) Generation() int64 {
m.umut.RLock()
defer m.umut.RUnlock()
return m.updatedLocal + m.updateGlobal
}
func (m *Model) LocalAge() float64 {
m.umut.RLock()
defer m.umut.RUnlock()
return time.Since(time.Unix(m.updatedLocal, 0)).Seconds()
}
type ConnectionInfo struct {
protocol.Statistics
Address string
ClientID string
ClientVersion string
Completion int
}
// ConnectionStats returns a map with connection statistics for each connected node.
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.gmut.RLock()
m.pmut.RLock()
m.rmut.RLock()
var tot int
for _, f := range m.global {
tot += f.Size()
}
var res = make(map[string]ConnectionInfo)
for node, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
ClientID: conn.Option("clientId"),
ClientVersion: conn.Option("clientVersion"),
}
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
var have int
for _, f := range m.remote[node] {
if f.Equals(m.global[f.Name]) {
have += f.Size()
}
}
ci.Completion = 100 * have / tot
res[node] = ci
}
m.rmut.RUnlock()
m.pmut.RUnlock()
m.gmut.RUnlock()
return res
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize() (files, deleted, bytes int) {
m.gmut.RLock()
for _, f := range m.global {
if f.Flags&protocol.FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
m.gmut.RUnlock()
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local repository.
func (m *Model) LocalSize() (files, deleted, bytes int) {
m.lmut.RLock()
for _, f := range m.local {
if f.Flags&protocol.FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
m.lmut.RUnlock()
return
}
// InSyncSize returns the number and total byte size of the local files that
// are in sync with the global model.
func (m *Model) InSyncSize() (files, bytes int) {
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if gf, ok := m.global[n]; ok && f.Equals(gf) {
files++
bytes += f.Size()
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int) {
qf := m.fq.QueuedFiles()
m.gmut.RLock()
for _, n := range qf {
f := m.global[n]
files = append(files, f)
bytes += f.Size()
}
m.gmut.RUnlock()
return
}
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
repo := make(map[string]File)
for _, f := range files {
m.indexUpdate(repo, f)
}
m.rmut.Lock()
m.remote[nodeID] = repo
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForFiles(files)
}
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
debugf("NET IDXUP(in): %s: %d files", nodeID, len(files))
}
m.rmut.Lock()
repo, ok := m.remote[nodeID]
if !ok {
warnf("Index update from node %s that does not have an index", nodeID)
m.rmut.Unlock()
return
}
for _, f := range files {
m.indexUpdate(repo, f)
}
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForFiles(files)
}
func (m *Model) indexUpdate(repo map[string]File, f File) {
if m.trace["idx"] {
var flagComment string
if f.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
debugf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
}
if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
warnf("IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f)
return
}
repo[f.Name] = f
}
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
if m.trace["net"] {
debugf("NET: %s: %v", node, err)
}
if err == protocol.ErrClusterHash {
warnf("Connection to %s closed due to mismatched cluster hash. Ensure that the configured cluster members are identical on both nodes.", node)
}
m.fq.RemoveAvailable(node)
m.pmut.Lock()
m.rmut.Lock()
conn, ok := m.rawConn[node]
if ok {
conn.Close()
}
delete(m.remote, node)
delete(m.protoConn, node)
delete(m.rawConn, node)
m.rmut.Unlock()
m.pmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
// Verify that the requested file exists in the local and global model.
m.lmut.RLock()
lf, localOk := m.local[name]
m.lmut.RUnlock()
m.gmut.RLock()
_, globalOk := m.global[name]
m.gmut.RUnlock()
if !localOk || !globalOk {
warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
return nil, ErrNoSuchFile
}
if lf.Flags&protocol.FlagInvalid != 0 {
return nil, ErrInvalid
}
if m.trace["net"] && nodeID != "<local>" {
debugf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
fn := path.Join(m.dir, name)
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer fd.Close()
buf := buffers.Get(int(size))
_, err = fd.ReadAt(buf, offset)
if err != nil {
return nil, err
}
if m.limitRequestRate != nil {
for s := 0; s < len(buf); s += 1024 {
<-m.limitRequestRate
}
}
return buf, nil
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
var updated bool
var newLocal = make(map[string]File)
m.lmut.RLock()
for _, f := range fs {
newLocal[f.Name] = f
if ef := m.local[f.Name]; !ef.Equals(f) {
updated = true
}
}
m.lmut.RUnlock()
if m.markDeletedLocals(newLocal) {
updated = true
}
m.lmut.RLock()
if len(newLocal) != len(m.local) {
updated = true
}
m.lmut.RUnlock()
if updated {
m.lmut.Lock()
m.local = newLocal
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
// SeedLocal replaces the local repository index with the given list of files,
// in protocol data types. Does not track deletes, should only be used to seed
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.lmut.Lock()
m.local = make(map[string]File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeedForGlobal()
}
// ConnectedTo returns true if we are connected to the named node.
func (m *Model) ConnectedTo(nodeID string) bool {
m.pmut.RLock()
_, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
return ok
}
// RepoID returns a unique ID representing the current repository location.
func (m *Model) RepoID() string {
return fmt.Sprintf("%x", sha1.Sum([]byte(m.dir)))
}
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// repository changes.
func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
nodeID := protoConn.ID()
m.pmut.Lock()
m.protoConn[nodeID] = protoConn
m.rawConn[nodeID] = rawConn
m.pmut.Unlock()
go func() {
idx := m.ProtocolIndex()
protoConn.Index(idx)
}()
m.initmut.Lock()
rw := m.rwRunning
m.initmut.Unlock()
if !rw {
return
}
for i := 0; i < m.parallelRequests; i++ {
i := i
go func() {
if m.trace["pull"] {
debugln("PULL: Starting", nodeID, i)
}
for {
m.pmut.RLock()
if _, ok := m.protoConn[nodeID]; !ok {
if m.trace["pull"] {
debugln("PULL: Exiting", nodeID, i)
}
m.pmut.RUnlock()
return
}
m.pmut.RUnlock()
qb, ok := m.fq.Get(nodeID)
if ok {
if m.trace["pull"] {
debugln("PULL: Request", nodeID, i, qb.name, qb.block.Offset)
}
data, _ := protoConn.Request(qb.name, qb.block.Offset, qb.block.Size, qb.block.Hash)
m.fq.Done(qb.name, qb.block.Offset, data)
} else {
time.Sleep(1 * time.Second)
}
}
}()
}
}
// ProtocolIndex returns the current local index in protocol data types.
// Must be called with the read lock held.
func (m *Model) ProtocolIndex() []protocol.FileInfo {
var index []protocol.FileInfo
m.lmut.RLock()
for _, f := range m.local {
mf := fileInfoFromFile(f)
if m.trace["idx"] {
var flagComment string
if mf.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
debugf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
}
index = append(index, mf)
}
m.lmut.RUnlock()
return index
}
func (m *Model) requestGlobal(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
m.pmut.RLock()
nc, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
}
if m.trace["net"] {
debugf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
return nc.Request(name, offset, size, hash)
}
func (m *Model) broadcastIndexLoop() {
for {
m.umut.RLock()
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
m.umut.RUnlock()
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
idx := m.ProtocolIndex()
var indexWg sync.WaitGroup
indexWg.Add(len(m.protoConn))
m.umut.Lock()
m.lastIdxBcast = time.Now()
m.umut.Unlock()
m.pmut.RLock()
for _, node := range m.protoConn {
node := node
if m.trace["net"] {
debugf("NET IDX(out/loop): %s: %d files", node.ID(), len(idx))
}
go func() {
node.Index(idx)
indexWg.Done()
}()
}
m.pmut.RUnlock()
indexWg.Wait()
}
time.Sleep(idxBcastHoldtime)
}
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
// note the file as having been deleted.
var updated bool
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if _, ok := newLocal[n]; !ok {
if gf := m.global[n]; !gf.NewerThan(f) {
if f.Flags&protocol.FlagDeleted == 0 {
f.Flags = protocol.FlagDeleted
f.Version++
f.Blocks = nil
updated = true
}
newLocal[n] = f
}
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return updated
}
func (m *Model) updateLocal(f File) {
var updated bool
m.lmut.Lock()
if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
m.local[f.Name] = f
updated = true
}
m.lmut.Unlock()
if updated {
m.recomputeGlobal()
// We don't recomputeNeed here for two reasons:
// - a need shouldn't have arisen due to having a newer local file
// - recomputeNeed might call into fq.Add but we might have been called by
// fq which would be a deadlock on fq
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
/*
XXX: Not done, needs elegant handling of availability
func (m *Model) recomputeGlobalFor(files []File) bool {
m.gmut.Lock()
defer m.gmut.Unlock()
var updated bool
for _, f := range files {
if gf, ok := m.global[f.Name]; !ok || f.NewerThan(gf) {
m.global[f.Name] = f
updated = true
// Fix availability
}
}
return updated
}
*/
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
m.lmut.RLock()
for n, f := range m.local {
newGlobal[n] = f
}
m.lmut.RUnlock()
var available = make(map[string][]string)
m.rmut.RLock()
var highestMod int64
for nodeID, fs := range m.remote {
for n, nf := range fs {
if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
newGlobal[n] = nf
available[n] = []string{nodeID}
if nf.Modified > highestMod {
highestMod = nf.Modified
}
} else if lf.Equals(nf) {
available[n] = append(available[n], nodeID)
}
}
}
m.rmut.RUnlock()
for f, ns := range available {
m.fq.SetAvailable(f, ns)
}
// Figure out if anything actually changed
m.gmut.RLock()
var updated bool
if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
updated = true
} else {
for n, f0 := range newGlobal {
if f1, ok := m.global[n]; !ok || !f0.Equals(f1) {
updated = true
break
}
}
}
m.gmut.RUnlock()
if updated {
m.gmut.Lock()
m.umut.Lock()
m.global = newGlobal
m.updateGlobal = time.Now().Unix()
m.umut.Unlock()
m.gmut.Unlock()
}
}
type addOrder struct {
n string
remote []Block
fm *fileMonitor
}
func (m *Model) recomputeNeedForGlobal() {
var toDelete []File
var toAdd []addOrder
m.gmut.RLock()
for _, gf := range m.global {
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
}
m.gmut.RUnlock()
for _, ao := range toAdd {
m.fq.Add(ao.n, ao.remote, ao.fm)
}
for _, gf := range toDelete {
m.dq <- gf
}
}
func (m *Model) recomputeNeedForFiles(files []File) {
var toDelete []File
var toAdd []addOrder
m.gmut.RLock()
for _, gf := range files {
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
}
m.gmut.RUnlock()
for _, ao := range toAdd {
m.fq.Add(ao.n, ao.remote, ao.fm)
}
for _, gf := range toDelete {
m.dq <- gf
}
}
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
m.lmut.RLock()
lf, ok := m.local[gf.Name]
m.lmut.RUnlock()
if !ok || gf.NewerThan(lf) {
if gf.Flags&protocol.FlagInvalid != 0 {
// Never attempt to sync invalid files
return toAdd, toDelete
}
if gf.Flags&protocol.FlagDeleted != 0 && !m.delete {
// Don't want to delete files, so forget this need
return toAdd, toDelete
}
if gf.Flags&protocol.FlagDeleted != 0 && !ok {
// Don't have the file, so don't need to delete it
return toAdd, toDelete
}
if m.trace["need"] {
debugf("NEED: lf:%v gf:%v", lf, gf)
}
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: gf.Name,
path: path.Clean(path.Join(m.dir, gf.Name)),
global: gf,
model: m,
localBlocks: local,
}
toAdd = append(toAdd, addOrder{gf.Name, remote, &fm})
}
}
return toAdd, toDelete
}
func (m *Model) WhoHas(name string) []string {
var remote []string
m.gmut.RLock()
m.rmut.RLock()
gf := m.global[name]
for node, files := range m.remote {
if file, ok := files[name]; ok && file.Equals(gf) {
remote = append(remote, node)
}
}
m.rmut.RUnlock()
m.gmut.RUnlock()
return remote
}
func (m *Model) deleteLoop() {
for file := range m.dq {
if m.trace["file"] {
debugln("FILE: Delete", file.Name)
}
path := path.Clean(path.Join(m.dir, file.Name))
err := os.Remove(path)
if err != nil {
warnf("%s: %v", file.Name, err)
}
m.updateLocal(file)
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks = make([]Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return File{
Name: f.Name,
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{
Size: b.Size,
Hash: b.Hash,
}
}
return protocol.FileInfo{
Name: f.Name,
Flags: f.Flags,
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
}

View File

@@ -1,597 +0,0 @@
package model
/*
Locking
=======
The model has read and write locks. These must be acquired as appropriate by
public methods. To prevent deadlock situations, private methods should never
acquire locks, but document what locks they require.
*/
import (
"crypto/sha1"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
)
type Model struct {
sync.RWMutex
dir string
global map[string]File // the latest version of each file as it exists in the cluster
local map[string]File // the files we currently have locally on disk
remote map[string]map[string]File
need map[string]bool // the files we need to update
nodes map[string]*protocol.Connection
rawConn map[string]io.ReadWriteCloser
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
lastIdxBcast time.Time
lastIdxBcastRequest time.Time
rwRunning bool
parallellFiles int
paralllelReqs int
delete bool
trace map[string]bool
}
const (
FlagDeleted = 1 << 12
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
)
var ErrNoSuchFile = errors.New("no such file")
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
// for file data without altering the local repository in any way.
func NewModel(dir string) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
need: make(map[string]bool),
nodes: make(map[string]*protocol.Connection),
rawConn: make(map[string]io.ReadWriteCloser),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
}
go m.broadcastIndexLoop()
return m
}
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
func (m *Model) Trace(t string) {
m.Lock()
defer m.Unlock()
m.trace[t] = true
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
func (m *Model) StartRW(del bool, pfiles, preqs int) {
m.Lock()
defer m.Unlock()
if m.rwRunning {
panic("starting started model")
}
m.rwRunning = true
m.delete = del
m.parallellFiles = pfiles
m.paralllelReqs = preqs
go m.cleanTempFiles()
go m.puller()
}
// Generation returns an opaque integer that is guaranteed to increment on
// every change to the local repository or global model.
func (m *Model) Generation() int64 {
m.RLock()
defer m.RUnlock()
return m.updatedLocal + m.updateGlobal
}
type ConnectionInfo struct {
protocol.Statistics
Address string
}
// ConnectionStats returns a map with connection statistics for each connected node.
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
type remoteAddrer interface {
RemoteAddr() net.Addr
}
m.RLock()
defer m.RUnlock()
var res = make(map[string]ConnectionInfo)
for node, conn := range m.nodes {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
}
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
ci.Address = nc.RemoteAddr().String()
}
res[node] = ci
}
return res
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
for _, f := range m.global {
if f.Flags&FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local repository.
func (m *Model) LocalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
for _, f := range m.local {
if f.Flags&FlagDeleted == 0 {
files++
bytes += f.Size()
} else {
deleted++
}
}
return
}
// InSyncSize returns the number and total byte size of the local files that
// are in sync with the global model.
func (m *Model) InSyncSize() (files, bytes int) {
m.RLock()
defer m.RUnlock()
for n, f := range m.local {
if gf, ok := m.global[n]; ok && f.Modified == gf.Modified {
files++
bytes += f.Size()
}
}
return
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int) {
m.RLock()
defer m.RUnlock()
for n := range m.need {
f := m.global[n]
files = append(files, f)
bytes += f.Size()
}
return
}
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
if m.trace["net"] {
log.Printf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
m.remote[nodeID] = make(map[string]File)
for _, f := range fs {
m.remote[nodeID][f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
if m.trace["net"] {
log.Printf("NET IDXUP(in): %s: %d files", nodeID, len(fs))
}
repo, ok := m.remote[nodeID]
if !ok {
return
}
for _, f := range fs {
if f.Flags&FlagDeleted != 0 && !m.delete {
// Files marked as deleted do not even enter the model
continue
}
repo[f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
// Close removes the peer from the model and closes the underlyign connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
m.Lock()
defer m.Unlock()
conn, ok := m.rawConn[node]
if ok {
conn.Close()
}
delete(m.remote, node)
delete(m.nodes, node)
delete(m.rawConn, node)
m.recomputeGlobal()
m.recomputeNeed()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
// Verify that the requested file exists in the local and global model.
m.RLock()
_, localOk := m.local[name]
_, globalOk := m.global[name]
m.RUnlock()
if !localOk || !globalOk {
log.Printf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
return nil, ErrNoSuchFile
}
if m.trace["net"] && nodeID != "<local>" {
log.Printf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
fn := path.Join(m.dir, name)
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
if err != nil {
return nil, err
}
defer fd.Close()
buf := buffers.Get(int(size))
_, err = fd.ReadAt(buf, int64(offset))
if err != nil {
return nil, err
}
return buf, nil
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
m.Lock()
defer m.Unlock()
var updated bool
var newLocal = make(map[string]File)
for _, f := range fs {
newLocal[f.Name] = f
if ef := m.local[f.Name]; ef.Modified != f.Modified {
updated = true
}
}
if m.markDeletedLocals(newLocal) {
updated = true
}
if len(newLocal) != len(m.local) {
updated = true
}
if updated {
m.local = newLocal
m.recomputeGlobal()
m.recomputeNeed()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
}
}
// SeedLocal replaces the local repository index with the given list of files,
// in protocol data types. Does not track deletes, should only be used to seed
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
m.local = make(map[string]File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
m.recomputeGlobal()
m.recomputeNeed()
}
// ConnectedTo returns true if we are connected to the named node.
func (m *Model) ConnectedTo(nodeID string) bool {
m.RLock()
defer m.RUnlock()
_, ok := m.nodes[nodeID]
return ok
}
// ProtocolIndex returns the current local index in protocol data types.
func (m *Model) ProtocolIndex() []protocol.FileInfo {
m.RLock()
defer m.RUnlock()
return m.protocolIndex()
}
// RepoID returns a unique ID representing the current repository location.
func (m *Model) RepoID() string {
return fmt.Sprintf("%x", sha1.Sum([]byte(m.dir)))
}
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// repository changes.
func (m *Model) AddConnection(conn io.ReadWriteCloser, nodeID string) {
node := protocol.NewConnection(nodeID, conn, conn, m)
m.Lock()
m.nodes[nodeID] = node
m.rawConn[nodeID] = conn
m.Unlock()
m.RLock()
idx := m.protocolIndex()
m.RUnlock()
go func() {
node.Index(idx)
}()
}
// protocolIndex returns the current local index in protocol data types.
// Must be called with the read lock held.
func (m *Model) protocolIndex() []protocol.FileInfo {
var index []protocol.FileInfo
for _, f := range m.local {
mf := fileInfoFromFile(f)
if m.trace["idx"] {
var flagComment string
if mf.Flags&FlagDeleted != 0 {
flagComment = " (deleted)"
}
log.Printf("IDX: %q m=%d f=%o%s (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, len(mf.Blocks))
}
index = append(index, mf)
}
return index
}
func (m *Model) requestGlobal(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
m.RLock()
nc, ok := m.nodes[nodeID]
m.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
}
if m.trace["net"] {
log.Printf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
return nc.Request(name, offset, size, hash)
}
func (m *Model) broadcastIndexLoop() {
for {
m.RLock()
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
m.RUnlock()
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
m.Lock()
var indexWg sync.WaitGroup
indexWg.Add(len(m.nodes))
idx := m.protocolIndex()
m.lastIdxBcast = time.Now()
for _, node := range m.nodes {
node := node
if m.trace["net"] {
log.Printf("NET IDX(out/loop): %s: %d files", node.ID, len(idx))
}
go func() {
node.Index(idx)
indexWg.Done()
}()
}
m.Unlock()
indexWg.Wait()
}
time.Sleep(idxBcastHoldtime)
}
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
// Must be called with the write lock held.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
// note the file as having been deleted.
var updated bool
for n, f := range m.local {
if _, ok := newLocal[n]; !ok {
if gf := m.global[n]; gf.Modified <= f.Modified {
if f.Flags&FlagDeleted == 0 {
f.Flags = FlagDeleted
f.Modified = f.Modified + 1
f.Blocks = nil
updated = true
}
newLocal[n] = f
}
}
}
return updated
}
func (m *Model) updateLocal(f File) {
if ef, ok := m.local[f.Name]; !ok || ef.Modified != f.Modified {
m.local[f.Name] = f
m.recomputeGlobal()
m.recomputeNeed()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
}
}
// Must be called with the write lock held.
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
for n, f := range m.local {
newGlobal[n] = f
}
for _, fs := range m.remote {
for n, f := range fs {
if cf, ok := newGlobal[n]; !ok || cf.Modified < f.Modified {
newGlobal[n] = f
}
}
}
// Figure out if anything actually changed
var updated bool
if len(newGlobal) != len(m.global) {
updated = true
} else {
for n, f0 := range newGlobal {
if f1, ok := m.global[n]; !ok || f0.Modified != f1.Modified {
updated = true
break
}
}
}
if updated {
m.updateGlobal = time.Now().Unix()
m.global = newGlobal
}
}
// Must be called with the write lock held.
func (m *Model) recomputeNeed() {
m.need = make(map[string]bool)
for n, f := range m.global {
hf, ok := m.local[n]
if !ok || f.Modified > hf.Modified {
if f.Flags&FlagDeleted != 0 && !m.delete {
// Don't want to delete files, so forget this need
continue
}
if f.Flags&FlagDeleted != 0 && !ok {
// Don't have the file, so don't need to delete it
continue
}
if m.trace["need"] {
log.Println("NEED:", ok, hf, f)
}
m.need[n] = true
}
}
}
// Must be called with the read lock held.
func (m *Model) whoHas(name string) []string {
var remote []string
gf := m.global[name]
for node, files := range m.remote {
if file, ok := files[name]; ok && file.Modified == gf.Modified {
remote = append(remote, node)
}
}
return remote
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks []Block
var offset uint64
for _, b := range f.Blocks {
blocks = append(blocks, Block{
Offset: offset,
Length: b.Length,
Hash: b.Hash,
})
offset += uint64(b.Length)
}
return File{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Blocks: blocks,
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
var blocks []protocol.BlockInfo
for _, b := range f.Blocks {
blocks = append(blocks, protocol.BlockInfo{
Length: b.Length,
Hash: b.Hash,
})
}
return protocol.FileInfo{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Blocks: blocks,
}
}

View File

@@ -1,252 +0,0 @@
package model
/*
Locking
=======
These methods are never called from the outside so don't follow the locking
policy in model.go.
TODO(jb): Refactor this into smaller and cleaner pieces.
TODO(jb): Increase performance by taking apparent peer bandwidth into account.
*/
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
)
func (m *Model) pullFile(name string) error {
m.RLock()
var localFile = m.local[name]
var globalFile = m.global[name]
var nodeIDs = m.whoHas(name)
m.RUnlock()
if len(nodeIDs) == 0 {
return fmt.Errorf("%s: no connected nodes with file available", name)
}
filename := path.Join(m.dir, name)
sdir := path.Dir(filename)
_, err := os.Stat(sdir)
if err != nil && os.IsNotExist(err) {
os.MkdirAll(sdir, 0777)
}
tmpFilename := tempName(filename, globalFile.Modified)
tmpFile, err := os.Create(tmpFilename)
if err != nil {
return err
}
contentChan := make(chan content, 32)
var applyDone sync.WaitGroup
applyDone.Add(1)
go func() {
applyContent(contentChan, tmpFile)
tmpFile.Close()
applyDone.Done()
}()
local, remote := BlockDiff(localFile.Blocks, globalFile.Blocks)
var fetchDone sync.WaitGroup
// One local copy routine
fetchDone.Add(1)
go func() {
for _, block := range local {
data, err := m.Request("<local>", name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}()
// N remote copy routines
var remoteBlocks = blockIterator{blocks: remote}
for i := 0; i < m.paralllelReqs; i++ {
curNode := nodeIDs[i%len(nodeIDs)]
fetchDone.Add(1)
go func(nodeID string) {
for {
block, ok := remoteBlocks.Next()
if !ok {
break
}
data, err := m.requestGlobal(nodeID, name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}(curNode)
}
fetchDone.Wait()
close(contentChan)
applyDone.Wait()
err = hashCheck(tmpFilename, globalFile.Blocks)
if err != nil {
return fmt.Errorf("%s: %s (deleting)", path.Base(name), err.Error())
}
err = os.Chtimes(tmpFilename, time.Unix(globalFile.Modified, 0), time.Unix(globalFile.Modified, 0))
if err != nil {
return err
}
err = os.Rename(tmpFilename, filename)
if err != nil {
return err
}
return nil
}
func (m *Model) puller() {
for {
time.Sleep(time.Second)
var ns []string
m.RLock()
for n := range m.need {
ns = append(ns, n)
}
m.RUnlock()
if len(ns) == 0 {
continue
}
var limiter = make(chan bool, m.parallellFiles)
var allDone sync.WaitGroup
for _, n := range ns {
limiter <- true
allDone.Add(1)
go func(n string) {
defer func() {
allDone.Done()
<-limiter
}()
m.RLock()
f, ok := m.global[n]
m.RUnlock()
if !ok {
return
}
var err error
if f.Flags&FlagDeleted == 0 {
if m.trace["file"] {
log.Printf("FILE: Pull %q", n)
}
err = m.pullFile(n)
} else {
if m.trace["file"] {
log.Printf("FILE: Remove %q", n)
}
// Cheerfully ignore errors here
_ = os.Remove(path.Join(m.dir, n))
}
if err == nil {
m.Lock()
m.updateLocal(f)
m.Unlock()
}
}(n)
}
allDone.Wait()
}
}
type content struct {
offset int64
data []byte
}
func applyContent(cc <-chan content, dst io.WriterAt) error {
var err error
for c := range cc {
_, err = dst.WriteAt(c.data, c.offset)
buffers.Put(c.data)
if err != nil {
return err
}
}
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}
type blockIterator struct {
sync.Mutex
blocks []Block
}
func (i *blockIterator) Next() (b Block, ok bool) {
i.Lock()
defer i.Unlock()
if len(i.blocks) == 0 {
return
}
b, i.blocks = i.blocks[0], i.blocks[1:]
ok = true
return
}

View File

@@ -1,181 +0,0 @@
package model
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Blocks []Block
}
func (f File) Size() (bytes int) {
for _, b := range f.Blocks {
bytes += int(b.Length)
}
return
}
func isTempName(name string) bool {
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
return nil
}
if info.Mode()&os.ModeType == 0 {
fi, err := os.Stat(p)
if err != nil {
return nil
}
modified := fi.ModTime().Unix()
m.RLock()
hf, ok := m.local[rn]
m.RUnlock()
if ok && hf.Modified == modified {
// No change
*res = append(*res, hf)
} else {
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
return nil
}
f := File{
Name: rn,
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
fn := m.genWalker(&files, ignore)
filepath.Walk(m.dir, fn)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, fi := range fis {
if fi.Mode()&os.ModeSymlink != 0 {
filepath.Walk(path.Join(m.dir, fi.Name())+"/", fn)
}
}
}
return
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed. Patterns marked in .stignore files
// are removed from the results.
func (m *Model) FilteredWalk(followSymlinks bool) []File {
var files, ignored = m.Walk(followSymlinks)
return ignoreFilter(ignored, files)
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFilter(patterns map[string][]string, files []File) (filtered []File) {
nextFile:
for _, f := range files {
first, last := path.Split(f.Name)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
continue nextFile
}
}
}
}
filtered = append(filtered, f)
}
return filtered
}

View File

@@ -1,7 +1,8 @@
package model
package main
import (
"bytes"
"fmt"
"os"
"reflect"
"testing"
@@ -11,13 +12,13 @@ import (
)
func TestNewModel(t *testing.T) {
m := NewModel("foo")
m := NewModel("foo", 1e6)
if m == nil {
t.Fatalf("NewModel returned nil")
}
if len(m.need) > 0 {
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Errorf("New model should have no Need")
}
@@ -31,19 +32,19 @@ var testDataExpected = map[string]File{
Name: "foo",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"empty": File{
Name: "empty",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
},
"bar": File{
Name: "bar",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
"baz/quux": File{
Name: "baz/quux",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0x9, Hash: []uint8{0xc1, 0x54, 0xd9, 0x4e, 0x94, 0xba, 0x72, 0x98, 0xa6, 0xad, 0xb0, 0x52, 0x3a, 0xfe, 0x34, 0xd1, 0xb6, 0xa5, 0x81, 0xd6, 0xb8, 0x93, 0xa7, 0x63, 0xd4, 0x5d, 0xdc, 0x5e, 0x20, 0x9d, 0xcb, 0x83}}},
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
@@ -58,11 +59,11 @@ func init() {
}
func TestUpdateLocal(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
if len(m.need) > 0 {
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Fatalf("Model with only local data should have no need")
}
@@ -100,7 +101,7 @@ func TestUpdateLocal(t *testing.T) {
}
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -111,13 +112,13 @@ func TestRemoteUpdateExisting(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l := len(m.need); l != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", l)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", len(fs))
}
}
func TestRemoteAddNew(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -128,13 +129,13 @@ func TestRemoteAddNew(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(m.need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs))
}
}
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -146,13 +147,13 @@ func TestRemoteUpdateOld(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
}
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -170,22 +171,22 @@ func TestRemoteIndexUpdate(t *testing.T) {
m.Index("42", []protocol.FileInfo{foo})
if _, ok := m.need["foo"]; !ok {
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
m.IndexUpdate("42", []protocol.FileInfo{bar})
if _, ok := m.need["foo"]; !ok {
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
if _, ok := m.need["bar"]; !ok {
if fs, _ := m.NeedFiles(); fs[1].Name != "bar" {
t.Error("Model doesn't need 'bar'")
}
}
func TestDelete(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -228,9 +229,12 @@ func TestDelete(t *testing.T) {
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
if ft := m.local["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
t.Error("Unexpected deleted flag = 0 in global table")
@@ -238,8 +242,11 @@ func TestDelete(t *testing.T) {
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
if ft := m.global["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
}
// Another update should change nothing
@@ -259,8 +266,11 @@ func TestDelete(t *testing.T) {
if len(m.local["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in local")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
if ft := m.local["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
}
if m.global["a new file"].Flags&(1<<12) == 0 {
@@ -269,13 +279,16 @@ func TestDelete(t *testing.T) {
if len(m.global["a new file"].Blocks) != 0 {
t.Error("Unexpected non-zero blocks for deleted file in global")
}
if ft := m.local["a new file"].Modified; ft != ot+1 {
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
if ft := m.global["a new file"].Modified; ft != ot {
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot)
}
if fv := m.local["a new file"].Version; fv != 1 {
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
}
}
func TestForgetNode(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -285,8 +298,8 @@ func TestForgetNode(t *testing.T) {
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
newFile := protocol.FileInfo{
@@ -296,14 +309,21 @@ func TestForgetNode(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
newFile = protocol.FileInfo{
Name: "new file 2",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("43", []protocol.FileInfo{newFile})
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
if l1, l2 := len(m.global), len(fs)+2; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 2 {
t.Errorf("Model len(need) incorrect (%d != 2)", len(fs))
}
m.Close("42", nil)
@@ -311,16 +331,17 @@ func TestForgetNode(t *testing.T) {
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(need) incorrect (%d != 1)", len(fs))
}
}
func TestRequest(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
@@ -340,3 +361,177 @@ func TestRequest(t *testing.T) {
t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs))
}
}
func TestIgnoreWithUnknownFlags(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
valid := protocol.FileInfo{
Name: "valid",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
Flags: protocol.FlagDeleted | 0755,
}
invalid := protocol.FileInfo{
Name: "invalid",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
Flags: 1<<27 | protocol.FlagDeleted | 0755,
}
m.Index("42", []protocol.FileInfo{valid, invalid})
if _, ok := m.global[valid.Name]; !ok {
t.Error("Model should include", valid)
}
if _, ok := m.global[invalid.Name]; ok {
t.Error("Model not should include", invalid)
}
}
func genFiles(n int) []protocol.FileInfo {
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
return files
}
func BenchmarkIndex10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index("42", files)
}
}
func BenchmarkIndex00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Index("42", files)
}
}
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", files)
}
}
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
ufiles := genFiles(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", ufiles)
}
}
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
ufiles := genFiles(1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", ufiles)
}
}
type FakeConnection struct {
id string
requestData []byte
}
func (FakeConnection) Close() error {
return nil
}
func (f FakeConnection) ID() string {
return string(f.id)
}
func (f FakeConnection) Option(string) string {
return ""
}
func (FakeConnection) Index([]protocol.FileInfo) {}
func (f FakeConnection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
return f.requestData, nil
}
func (FakeConnection) Ping() bool {
return true
}
func (FakeConnection) Statistics() protocol.Statistics {
return protocol.Statistics{}
}
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
const n = 1000
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
fc := FakeConnection{
id: "42",
requestData: []byte("some data to return"),
}
m.AddConnection(fc, fc)
m.Index("42", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
data, err := m.requestGlobal("42", files[i%n].Name, 0, 32, nil)
if err != nil {
b.Error(err)
}
if data == nil {
b.Error("nil data")
}
}
}

View File

@@ -62,11 +62,10 @@ reserved bits must be set to zero.
All data following the message header is in XDR (RFC 1014) encoding.
The actual data types in use by BEP, in XDR naming convention, are:
- unsigned int -- unsigned 32 bit integer
- hyper -- signed 64 bit integer
- unsigned hyper -- signed 64 bit integer
- opaque<> -- variable length opaque data
- string<> -- variable length string
- (unsigned) int -- (unsigned) 32 bit integer
- (unsigned) hyper -- (unsigned) 64 bit integer
- opaque<> -- variable length opaque data
- string<> -- variable length string
The encoding of opaque<> and string<> are identical, the distinction is
solely in interpretation. Opaque data should not be interpreted as such,
@@ -92,6 +91,7 @@ message.
string Name<>;
unsigned int Flags;
hyper Modified;
unsigned int Version;
BlockInfo Blocks<>;
}
@@ -102,15 +102,19 @@ message.
The file name is the part relative to the repository root. The
modification time is expressed as the number of seconds since the Unix
Epoch. The hash algorithm is implied by the hash length. Currently, the
hash must be 32 bytes long and computed by SHA256.
Epoch. The version field is a counter that increments each time the file
changes but resets to zero each time the modification is updated. This
is used to signal changes to the file (or file metadata) while the
modification time remains unchanged. The hash algorithm is implied by
the hash length. Currently, the hash must be 32 bytes long and computed
by SHA256.
The flags field is made up of the following single bit flags:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |D| Unix Perm. & Mode |
| Reserved |I|D| Unix Perm. & Mode |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- The lower 12 bits hold the common Unix permission and mode bits.
@@ -118,9 +122,13 @@ The flags field is made up of the following single bit flags:
- Bit 19 ("D") is set when the file has been deleted. The block list
shall contain zero blocks and the modification time indicates the
time of deletion or, if deletion time is not reliably determinable,
one second past the last know modification time.
the last known modification time and a higher version number.
- Bit 0 through 18 are reserved for future use and shall be set to
- Bit 18 ("I") is set when the file is invalid and unavailable for
synchronization. A peer may set this bit to indicate that it can
temporarily not serve data for the file.
- Bit 0 through 17 are reserved for future use and shall be set to
zero.
### Request (Type = 2)
@@ -185,6 +193,33 @@ model, the Index Update merely amends it with new or updated file
information. Any files not mentioned in an Index Update are left
unchanged.
### Options (Type = 7)
This informational message provides information about the client
configuration, version, etc. It is sent at connection initiation and,
optionally, when any of the sent parameters have changed. The message is
in the form of a list of (key, value) pairs, both of string type.
struct OptionsMessage {
KeyValue Options<>;
}
struct KeyValue {
string Key;
string Value;
}
Key ID:s apart from the well known ones are implementation
specific. An implementation is expected to ignore unknown keys. An
implementation may impose limits on key and value size.
Well known keys:
- "clientId" -- The name of the implementation. Example: "syncthing".
- "clientVersion" -- The version of the client. Example: "v1.0.33-47". The
Following the SemVer 2.0 specification for version strings is
encouraged but not enforced.
Example Exchange
----------------

View File

@@ -5,7 +5,7 @@ import "io"
type TestModel struct {
data []byte
name string
offset uint64
offset int64
size uint32
hash []byte
closed bool
@@ -17,7 +17,7 @@ func (t *TestModel) Index(nodeID string, files []FileInfo) {
func (t *TestModel) IndexUpdate(nodeID string, files []FileInfo) {
}
func (t *TestModel) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (t *TestModel) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
t.name = name
t.offset = offset
t.size = size

View File

@@ -4,7 +4,7 @@ import "io"
type request struct {
name string
offset uint64
offset int64
size uint32
hash []byte
}
@@ -39,9 +39,10 @@ func (w *marshalWriter) writeIndex(idx []FileInfo) {
w.writeString(f.Name)
w.writeUint32(f.Flags)
w.writeUint64(uint64(f.Modified))
w.writeUint32(f.Version)
w.writeUint32(uint32(len(f.Blocks)))
for _, b := range f.Blocks {
w.writeUint32(b.Length)
w.writeUint32(b.Size)
w.writeBytes(b.Hash)
}
}
@@ -55,7 +56,7 @@ func WriteIndex(w io.Writer, idx []FileInfo) (int, error) {
func (w *marshalWriter) writeRequest(r request) {
w.writeString(r.name)
w.writeUint64(r.offset)
w.writeUint64(uint64(r.offset))
w.writeUint32(r.size)
w.writeBytes(r.hash)
}
@@ -64,6 +65,14 @@ func (w *marshalWriter) writeResponse(data []byte) {
w.writeBytes(data)
}
func (w *marshalWriter) writeOptions(opts map[string]string) {
w.writeUint32(uint32(len(opts)))
for k, v := range opts {
w.writeString(k)
w.writeString(v)
}
}
func (r *marshalReader) readHeader() header {
return decodeHeader(r.readUint32())
}
@@ -77,10 +86,11 @@ func (r *marshalReader) readIndex() []FileInfo {
files[i].Name = r.readString()
files[i].Flags = r.readUint32()
files[i].Modified = int64(r.readUint64())
files[i].Version = r.readUint32()
nblocks := r.readUint32()
blocks := make([]BlockInfo, nblocks)
for j := range blocks {
blocks[j].Length = r.readUint32()
blocks[j].Size = r.readUint32()
blocks[j].Hash = r.readBytes()
}
files[i].Blocks = blocks
@@ -98,7 +108,7 @@ func ReadIndex(r io.Reader) ([]FileInfo, error) {
func (r *marshalReader) readRequest() request {
var req request
req.name = r.readString()
req.offset = r.readUint64()
req.offset = int64(r.readUint64())
req.size = r.readUint32()
req.hash = r.readBytes()
return req
@@ -107,3 +117,14 @@ func (r *marshalReader) readRequest() request {
func (r *marshalReader) readResponse() []byte {
return r.readBytes()
}
func (r *marshalReader) readOptions() map[string]string {
n := r.readUint32()
opts := make(map[string]string, n)
for i := 0; i < int(n); i++ {
k := r.readString()
v := r.readString()
opts[k] = v
}
return opts
}

View File

@@ -12,8 +12,9 @@ func TestIndex(t *testing.T) {
idx := []FileInfo{
{
"Foo",
0755,
FlagInvalid & FlagDeleted & 0755,
1234567890,
142,
[]BlockInfo{
{12345678, []byte("hash hash hash")},
{23456781, []byte("ash hash hashh")},
@@ -23,6 +24,7 @@ func TestIndex(t *testing.T) {
"Quux/Quux",
0644,
2345678901,
232323232,
[]BlockInfo{
{45678123, []byte("4321 hash hash hash")},
{56781234, []byte("3214 ash hash hashh")},
@@ -44,7 +46,7 @@ func TestIndex(t *testing.T) {
}
func TestRequest(t *testing.T) {
f := func(name string, offset uint64, size uint32, hash []byte) bool {
f := func(name string, offset int64, size uint32, hash []byte) bool {
var buf = new(bytes.Buffer)
var req = request{name, offset, size, hash}
var wr = marshalWriter{w: buf}
@@ -81,6 +83,7 @@ func BenchmarkWriteIndex(b *testing.B) {
"Foo",
0777,
1234567890,
424242,
[]BlockInfo{
{12345678, []byte("hash hash hash")},
{23456781, []byte("ash hash hashh")},
@@ -90,6 +93,7 @@ func BenchmarkWriteIndex(b *testing.B) {
"Quux/Quux",
0644,
2345678901,
323232,
[]BlockInfo{
{45678123, []byte("4321 hash hash hash")},
{56781234, []byte("3214 ash hash hashh")},
@@ -113,3 +117,23 @@ func BenchmarkWriteRequest(b *testing.B) {
wr.writeRequest(req)
}
}
func TestOptions(t *testing.T) {
opts := map[string]string{
"foo": "bar",
"someKey": "otherValue",
"hello": "",
"": "42",
}
var buf = new(bytes.Buffer)
var wr = marshalWriter{w: buf}
wr.writeOptions(opts)
var rd = marshalReader{r: buf}
var ropts = rd.readOptions()
if !reflect.DeepEqual(opts, ropts) {
t.Error("Incorrect options marshal/demarshal")
}
}

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"log"
"sync"
"time"
@@ -18,18 +19,29 @@ const (
messageTypePing = 4
messageTypePong = 5
messageTypeIndexUpdate = 6
messageTypeOptions = 7
)
const (
FlagDeleted = 1 << 12
FlagInvalid = 1 << 13
)
var (
ErrClusterHash = fmt.Errorf("Configuration error: mismatched cluster hash")
)
type FileInfo struct {
Name string
Flags uint32
Modified int64
Version uint32
Blocks []BlockInfo
}
type BlockInfo struct {
Length uint32
Hash []byte
Size uint32
Hash []byte
}
type Model interface {
@@ -38,7 +50,7 @@ type Model interface {
// An index update was received from the peer node
IndexUpdate(nodeID string, files []FileInfo)
// A request was made by the peer node
Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error)
Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error)
// The peer node closed the connection
Close(nodeID string, err error)
}
@@ -46,16 +58,19 @@ type Model interface {
type Connection struct {
sync.RWMutex
ID string
receiver Model
reader io.Reader
mreader *marshalReader
writer io.Writer
mwriter *marshalWriter
closed bool
awaiting map[int]chan asyncResult
nextId int
indexSent map[string]int64
id string
receiver Model
reader io.Reader
mreader *marshalReader
writer io.Writer
mwriter *marshalWriter
closed bool
awaiting map[int]chan asyncResult
nextId int
indexSent map[string][2]int64
peerOptions map[string]string
myOptions map[string]string
optionsLock sync.Mutex
hasSentIndex bool
hasRecvdIndex bool
@@ -75,7 +90,7 @@ const (
pingIdleTime = 5 * time.Minute
)
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) *Connection {
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) *Connection {
flrd := flate.NewReader(reader)
flwr, err := flate.NewWriter(writer, flate.BestSpeed)
if err != nil {
@@ -83,21 +98,40 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
}
c := Connection{
id: nodeID,
receiver: receiver,
reader: flrd,
mreader: &marshalReader{r: flrd},
writer: flwr,
mwriter: &marshalWriter{w: flwr},
awaiting: make(map[int]chan asyncResult),
ID: nodeID,
}
go c.readerLoop()
go c.pingerLoop()
if options != nil {
c.myOptions = options
go func() {
c.Lock()
c.mwriter.writeHeader(header{0, c.nextId, messageTypeOptions})
c.mwriter.writeOptions(options)
err := c.flush()
if err != nil {
log.Println("Warning: Write error during initial handshake:", err)
}
c.nextId++
c.Unlock()
}()
}
return &c
}
func (c *Connection) ID() string {
return c.id
}
// Index writes the list of file information to the connected peer node
func (c *Connection) Index(idx []FileInfo) {
c.Lock()
@@ -106,18 +140,18 @@ func (c *Connection) Index(idx []FileInfo) {
// This is the first time we send an index.
msgType = messageTypeIndex
c.indexSent = make(map[string]int64)
c.indexSent = make(map[string][2]int64)
for _, f := range idx {
c.indexSent[f.Name] = f.Modified
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
}
} else {
// We have sent one full index. Only send updates now.
msgType = messageTypeIndexUpdate
var diff []FileInfo
for _, f := range idx {
if modified, ok := c.indexSent[f.Name]; !ok || f.Modified != modified {
if vs, ok := c.indexSent[f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
diff = append(diff, f)
c.indexSent[f.Name] = f.Modified
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
}
}
idx = diff
@@ -131,16 +165,16 @@ func (c *Connection) Index(idx []FileInfo) {
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
return
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return
}
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *Connection) Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (c *Connection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -152,13 +186,13 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
c.mwriter.writeRequest(request{name, offset, size, hash})
if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return nil, c.mwriter.err
}
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return nil, err
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -171,7 +205,7 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
return res.val, res.err
}
func (c *Connection) Ping() bool {
func (c *Connection) ping() bool {
c.Lock()
if c.closed {
c.Unlock()
@@ -183,11 +217,11 @@ func (c *Connection) Ping() bool {
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return false
} else if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return false
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -197,9 +231,6 @@ func (c *Connection) Ping() bool {
return ok && res.err == nil
}
func (c *Connection) Stop() {
}
type flusher interface {
Flush() error
}
@@ -211,7 +242,7 @@ func (c *Connection) flush() error {
return nil
}
func (c *Connection) Close(err error) {
func (c *Connection) close(err error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -224,7 +255,7 @@ func (c *Connection) Close(err error) {
c.awaiting = nil
c.Unlock()
c.receiver.Close(c.ID, err)
c.receiver.Close(c.id, err)
}
func (c *Connection) isClosed() bool {
@@ -238,11 +269,11 @@ loop:
for {
hdr := c.mreader.readHeader()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
if hdr.version != 0 {
c.Close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
c.close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
break loop
}
@@ -250,10 +281,10 @@ loop:
case messageTypeIndex:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.Index(c.ID, files)
c.receiver.Index(c.id, files)
}
c.Lock()
c.hasRecvdIndex = true
@@ -262,16 +293,16 @@ loop:
case messageTypeIndexUpdate:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.IndexUpdate(c.ID, files)
c.receiver.IndexUpdate(c.id, files)
}
case messageTypeRequest:
req := c.mreader.readRequest()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
go c.processRequest(hdr.msgID, req)
@@ -280,7 +311,7 @@ loop:
data := c.mreader.readResponse()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.Lock()
@@ -300,10 +331,10 @@ loop:
err := c.flush()
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
break loop
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
break loop
}
@@ -321,27 +352,38 @@ loop:
c.Unlock()
}
case messageTypeOptions:
c.optionsLock.Lock()
c.peerOptions = c.mreader.readOptions()
c.optionsLock.Unlock()
if mh, rh := c.myOptions["clusterHash"], c.peerOptions["clusterHash"]; len(mh) > 0 && len(rh) > 0 && mh != rh {
c.close(ErrClusterHash)
break loop
}
default:
c.Close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
c.close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
break loop
}
}
}
func (c *Connection) processRequest(msgID int, req request) {
data, _ := c.receiver.Request(c.ID, req.name, req.offset, req.size, req.hash)
data, _ := c.receiver.Request(c.id, req.name, req.offset, req.size, req.hash)
c.Lock()
c.mwriter.writeUint32(encodeHeader(header{0, msgID, messageTypeResponse}))
c.mwriter.writeResponse(data)
err := c.flush()
err := c.mwriter.err
if err == nil {
err = c.flush()
}
c.Unlock()
buffers.Put(data)
if err != nil {
c.Close(err)
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(err)
}
}
@@ -356,15 +398,15 @@ func (c *Connection) pingerLoop() {
if ready {
go func() {
rc <- c.Ping()
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.Close(fmt.Errorf("Ping failure"))
c.close(fmt.Errorf("Ping failure"))
}
case <-time.After(pingTimeout):
c.Close(fmt.Errorf("Ping timeout"))
c.close(fmt.Errorf("Ping timeout"))
}
}
}
@@ -388,3 +430,9 @@ func (c *Connection) Statistics() Statistics {
return stats
}
func (c *Connection) Option(key string) string {
c.optionsLock.Lock()
defer c.optionsLock.Unlock()
return c.peerOptions[key]
}

View File

@@ -43,13 +43,13 @@ func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, nil)
c1 := NewConnection("c1", br, aw, nil)
c0 := NewConnection("c0", ar, bw, nil, nil)
c1 := NewConnection("c1", br, aw, nil, nil)
if ok := c0.Ping(); !ok {
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
}
if ok := c1.Ping(); !ok {
if ok := c1.ping(); !ok {
t.Error("c1 ping failed")
}
}
@@ -67,10 +67,10 @@ func TestPingErr(t *testing.T) {
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
c0 := NewConnection("c0", ar, ebw, m0)
NewConnection("c1", br, eaw, m1)
c0 := NewConnection("c0", ar, ebw, m0, nil)
NewConnection("c1", br, eaw, m1, nil)
res := c0.Ping()
res := c0.ping()
if (i < 4 || j < 4) && res {
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 8 && j >= 8) && !res {
@@ -94,8 +94,8 @@ func TestRequestResponseErr(t *testing.T) {
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
NewConnection("c0", ar, ebw, m0)
c1 := NewConnection("c1", br, eaw, m1)
NewConnection("c0", ar, ebw, m0, nil)
c1 := NewConnection("c1", br, eaw, m1, nil)
d, err := c1.Request("tn", 1234, 3456, []byte("hashbytes"))
if err == e || err == ErrClosed {
@@ -143,8 +143,8 @@ func TestVersionErr(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.mwriter.writeHeader(header{
version: 2,
@@ -165,8 +165,8 @@ func TestTypeErr(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.mwriter.writeHeader(header{
version: 0,
@@ -187,10 +187,10 @@ func TestClose(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0 := NewConnection("c0", ar, bw, m0, nil)
NewConnection("c1", br, aw, m1, nil)
c0.Close(nil)
c0.close(nil)
ok := c0.isClosed()
if !ok {
@@ -199,7 +199,7 @@ func TestClose(t *testing.T) {
// None of these should panic, some should return an error
ok = c0.Ping()
ok = c0.ping()
if ok {
t.Error("Ping should not return true")
}

72
suppressor.go Normal file
View File

@@ -0,0 +1,72 @@
package main
import (
"sync"
"time"
)
const (
MAX_CHANGE_HISTORY = 4
)
type change struct {
size int64
when time.Time
}
type changeHistory struct {
changes []change
next int64
prevSup bool
}
type suppressor struct {
sync.Mutex
changes map[string]changeHistory
threshold int64 // bytes/s
}
func (h changeHistory) bandwidth(t time.Time) int64 {
if len(h.changes) == 0 {
return 0
}
var t0 = h.changes[0].when
if t == t0 {
return 0
}
var bw float64
for _, c := range h.changes {
bw += float64(c.size)
}
return int64(bw / t.Sub(t0).Seconds())
}
func (h *changeHistory) append(size int64, t time.Time) {
c := change{size, t}
if len(h.changes) == MAX_CHANGE_HISTORY {
h.changes = h.changes[1:MAX_CHANGE_HISTORY]
}
h.changes = append(h.changes, c)
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()
if s.changes == nil {
s.changes = make(map[string]changeHistory)
}
h := s.changes[name]
sup := h.bandwidth(t) > s.threshold
prevSup := h.prevSup
h.prevSup = sup
if !sup {
h.append(size, t)
}
s.changes[name] = h
s.Unlock()
return sup, prevSup
}

113
suppressor_test.go Normal file
View File

@@ -0,0 +1,113 @@
package main
import (
"testing"
"time"
)
func TestSuppressor(t *testing.T) {
s := suppressor{threshold: 10000}
t0 := time.Now()
t1 := t0
sup, prev := s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Never suppress first change")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is 10000 / 10 = 1000
t1 = t0.Add(10 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000) / 11 = 1818
t1 = t0.Add(11 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 100500, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 12 = 10041
t1 = t0.Add(12 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
if !sup {
t.Fatal("Should be over threshold")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 15 = 8033
t1 = t0.Add(15 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
t.Error("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1)
if sup {
t.Fatal("Should be Ok")
}
if !prev {
t.Fatal("Incorrect prev status")
}
}
func TestHistory(t *testing.T) {
h := changeHistory{}
t0 := time.Now()
h.append(40, t0)
if l := len(h.changes); l != 1 {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
for i := 1; i < MAX_CHANGE_HISTORY; i++ {
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
}
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 40+MAX_CHANGE_HISTORY-1 {
t.Errorf("Incorrect last record size %d", s)
}
h.append(999, t0.Add(time.Duration(999)*time.Second))
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 41 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 999 {
t.Errorf("Incorrect last record size %d", s)
}
}

View File

View File

View File

0
testdata/empty vendored Normal file
View File

View File

17
tls.go
View File

@@ -3,7 +3,7 @@ package main
import (
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
@@ -12,11 +12,12 @@ import (
"math/big"
"os"
"path"
"strings"
"time"
)
const (
tlsRSABits = 2048
tlsRSABits = 3072
tlsName = "syncthing"
)
@@ -25,13 +26,15 @@ func loadCert(dir string) (tls.Certificate, error) {
}
func certId(bs []byte) string {
hf := sha1.New()
hf := sha256.New()
hf.Write(bs)
id := hf.Sum(nil)
return base32.StdEncoding.EncodeToString(id)
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
}
func newCertificate(dir string) {
infoln("Generating RSA certificate and key...")
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
fatalErr(err)
@@ -47,7 +50,7 @@ func newCertificate(dir string) {
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
}
@@ -58,11 +61,11 @@ func newCertificate(dir string) {
fatalErr(err)
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
okln("Created TLS certificate file")
okln("Created RSA certificate file")
keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
fatalErr(err)
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
keyOut.Close()
okln("Created TLS key file")
okln("Created RSA key file")
}

52
usage.go Normal file
View File

@@ -0,0 +1,52 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"text/tabwriter"
)
func optionTable(w io.Writer, rows [][]string) {
tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0)
for _, row := range rows {
for i, cell := range row {
if i > 0 {
tw.Write([]byte("\t"))
}
tw.Write([]byte(cell))
}
tw.Write([]byte("\n"))
}
tw.Flush()
}
func usageFor(fs *flag.FlagSet, usage string) func() {
return func() {
var b bytes.Buffer
b.WriteString("Usage:\n " + usage + "\n")
var options [][]string
fs.VisitAll(func(f *flag.Flag) {
var dash = "-"
if len(f.Name) > 1 {
dash = "--"
}
var opt = " " + dash + f.Name
if f.DefValue != "false" {
opt += "=" + f.DefValue
}
options = append(options, []string{opt, f.Usage})
})
if len(options) > 0 {
b.WriteString("\nOptions:\n")
optionTable(&b, options)
}
fmt.Println(b.String())
}
}

243
walk.go Normal file
View File

@@ -0,0 +1,243 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Blocks []Block
}
func (f File) Size() (bytes int) {
for _, b := range f.Blocks {
bytes += int(b.Size)
}
return
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}
func isTempName(name string) bool {
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, lf)
} else {
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
if m.trace["file"] {
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
}
if !prev {
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
}
if ok {
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
return nil
} else if prev && !cur {
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
}
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
f := File{
Name: rn,
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
return
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@@ -1,4 +1,4 @@
package model
package main
import (
"fmt"
@@ -13,7 +13,7 @@ var testdata = []struct {
hash string
}{
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
{"baz/quux", 9, "c154d94e94ba7298a6adb0523afe34d1b6a581d6b893a763d45ddc5e209dcb83"},
{"empty", 0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
}
@@ -22,7 +22,7 @@ var correctIgnores = map[string][]string{
}
func TestWalk(t *testing.T) {
m := NewModel("testdata")
m := NewModel("testdata", 1e6)
files, ignores := m.Walk(false)
if l1, l2 := len(files), len(testdata); l1 != l2 {
@@ -50,53 +50,34 @@ func TestWalk(t *testing.T) {
}
}
func TestFilteredWalk(t *testing.T) {
m := NewModel("testdata")
files := m.FilteredWalk(false)
if len(files) != 2 {
t.Fatalf("Incorrect number of walked filtered files %d != 2", len(files))
}
if files[0].Name != "bar" {
t.Error("Incorrect first file", files[0])
}
if files[1].Name != "foo" {
t.Error("Incorrect second file", files[1])
}
}
func TestIgnore(t *testing.T) {
var patterns = map[string][]string{
"": {"t2"},
"foo": {"bar", "z*"},
"foo/baz": {"quux", ".*"},
}
var files = []File{
{Name: "foo/bar"},
{Name: "foo/quux"},
{Name: "foo/zuux"},
{Name: "foo/qzuux"},
{Name: "foo/baz/t1"},
{Name: "foo/baz/t2"},
{Name: "foo/baz/bar"},
{Name: "foo/baz/quuxa"},
{Name: "foo/baz/aquux"},
{Name: "foo/baz/.quux"},
{Name: "foo/baz/zquux"},
{Name: "foo/baz/quux"},
{Name: "foo/bazz/quux"},
}
var remaining = []File{
{Name: "foo/quux"},
{Name: "foo/qzuux"},
{Name: "foo/baz/t1"},
{Name: "foo/baz/quuxa"},
{Name: "foo/baz/aquux"},
{Name: "foo/bazz/quux"},
var tests = []struct {
f string
r bool
}{
{"foo/bar", true},
{"foo/quux", false},
{"foo/zuux", true},
{"foo/qzuux", false},
{"foo/baz/t1", false},
{"foo/baz/t2", true},
{"foo/baz/bar", true},
{"foo/baz/quuxa", false},
{"foo/baz/aquux", false},
{"foo/baz/.quux", true},
{"foo/baz/zquux", true},
{"foo/baz/quux", true},
{"foo/bazz/quux", false},
}
var filtered = ignoreFilter(patterns, files)
if !reflect.DeepEqual(filtered, remaining) {
t.Errorf("Filtering mismatch\n %v\n %v", remaining, filtered)
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}
}