mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
197 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d0600de38 | ||
|
|
6a1c055288 | ||
|
|
b9ec30ebdb | ||
|
|
428164f395 | ||
|
|
ba59e0d3f0 | ||
|
|
5d8f0f835e | ||
|
|
b4a1aadd1b | ||
|
|
8f41d90ab1 | ||
|
|
9743386166 | ||
|
|
0afcb5b7e7 | ||
|
|
043dea760f | ||
|
|
0618e2b9b4 | ||
|
|
3c171d281c | ||
|
|
c217b7cd22 | ||
|
|
23593c3d20 | ||
|
|
192117dc11 | ||
|
|
24b8f9211a | ||
|
|
51788d6f0e | ||
|
|
ea0bed2238 | ||
|
|
e2fe57c440 | ||
|
|
434a0ccf2a | ||
|
|
e7bf3ac108 | ||
|
|
c5bdaebf2b | ||
|
|
645233e7dc | ||
|
|
c6e396e8fb | ||
|
|
a57e2b358f | ||
|
|
d0863d495c | ||
|
|
5837277f8d | ||
|
|
87d473dc8f | ||
|
|
9744629c4b | ||
|
|
8f0a015abf | ||
|
|
f89fa6caed | ||
|
|
21a7f3960a | ||
|
|
9f63feef30 | ||
|
|
c171780c0d | ||
|
|
5daf6ecf70 | ||
|
|
6c8135126d | ||
|
|
91d5c4a1ae | ||
|
|
2cbe81f1c7 | ||
|
|
a26ce61d92 | ||
|
|
478300f6d8 | ||
|
|
3a5b816125 | ||
|
|
b6814241cc | ||
|
|
fc6eabea28 | ||
|
|
14b3791b2b | ||
|
|
e6b29988e5 | ||
|
|
3cb7b8f22b | ||
|
|
2297e29502 | ||
|
|
ea41acfff5 | ||
|
|
1aefc50e35 | ||
|
|
9bd4fa5008 | ||
|
|
89c2f61b30 | ||
|
|
a1d575894a | ||
|
|
71def3a970 | ||
|
|
13854250b3 | ||
|
|
e6078f9449 | ||
|
|
5980952495 | ||
|
|
618c376e18 | ||
|
|
d31a126408 | ||
|
|
6d3f8a2c06 | ||
|
|
b1ba976122 | ||
|
|
81d5d1d4a6 | ||
|
|
ea5ef28c5a | ||
|
|
fc2ebc6cad | ||
|
|
01096fff6c | ||
|
|
2ea3558283 | ||
|
|
20a47695fb | ||
|
|
1dde9ec2d8 | ||
|
|
0841a46055 | ||
|
|
84c0749d20 | ||
|
|
6b02f9e44f | ||
|
|
84d7452f9e | ||
|
|
9b449cb527 | ||
|
|
d9ffd359e2 | ||
|
|
b67443eb40 | ||
|
|
4ac204b604 | ||
|
|
fff50b5472 | ||
|
|
8d5aed410f | ||
|
|
ba0e4ded65 | ||
|
|
f0b18685a5 | ||
|
|
fc2b557ae6 | ||
|
|
af399ae9f3 | ||
|
|
45fcf4bc84 | ||
|
|
55f61ccb5e | ||
|
|
b601fc5627 | ||
|
|
832c0ffad0 | ||
|
|
cb33f27f23 | ||
|
|
92dee7c082 | ||
|
|
b9af45bc6b | ||
|
|
a18f6c6d90 | ||
|
|
6e11e3cda9 | ||
|
|
2935aebe53 | ||
|
|
71f78f0d62 | ||
|
|
3e1194e5ff | ||
|
|
6d64992e64 | ||
|
|
211180108e | ||
|
|
17e78d6f7e | ||
|
|
1ef86379fb | ||
|
|
884a7d6a1b | ||
|
|
334961fe10 | ||
|
|
2cfb24892f | ||
|
|
d4fe1400d2 | ||
|
|
69ef4d261d | ||
|
|
91c102e4fe | ||
|
|
b4db177045 | ||
|
|
340c9095dd | ||
|
|
e3bc33dc88 | ||
|
|
eebc145055 | ||
|
|
92b01fa48a | ||
|
|
2a0d1ab294 | ||
|
|
2bdab426ff | ||
|
|
e769de9986 | ||
|
|
4b11e66914 | ||
|
|
28d3936a3c | ||
|
|
986b15573a | ||
|
|
46d828e349 | ||
|
|
48603a1619 | ||
|
|
17d5f2bbfc | ||
|
|
b64af73607 | ||
|
|
c9cce9613e | ||
|
|
1392905d63 | ||
|
|
271d7eedc4 | ||
|
|
ab8482a424 | ||
|
|
c8a14d1c3d | ||
|
|
8974c33f2f | ||
|
|
ed675a61d7 | ||
|
|
60b00af0bb | ||
|
|
0ceddc4fa3 | ||
|
|
8c1996f7e5 | ||
|
|
6679c84cfb | ||
|
|
7b6f43cbb5 | ||
|
|
c124989163 | ||
|
|
c549e413a2 | ||
|
|
63a05ff6fa | ||
|
|
89a5aac6ea | ||
|
|
232d715c37 | ||
|
|
1c4e710adc | ||
|
|
7fdea0dd93 | ||
|
|
5b84b72d15 | ||
|
|
7e0be89052 | ||
|
|
632bcae856 | ||
|
|
fd56123acf | ||
|
|
a2a2e1d466 | ||
|
|
d4c5786a14 | ||
|
|
42ad9f8b02 | ||
|
|
0f6b34160c | ||
|
|
7e3b29e3e0 | ||
|
|
2f660aff7a | ||
|
|
af3e64a5a7 | ||
|
|
9560265adc | ||
|
|
4097528aa2 | ||
|
|
71d50a50f4 | ||
|
|
ec0489a8ea | ||
|
|
7948d046d1 | ||
|
|
223bdbb9aa | ||
|
|
726afc915a | ||
|
|
86c0a527fd | ||
|
|
bb0fd87550 | ||
|
|
673ab42c3c | ||
|
|
4543bfb837 | ||
|
|
005b207737 | ||
|
|
bceacf04ca | ||
|
|
707e992f19 | ||
|
|
1c757db153 | ||
|
|
001a6724ec | ||
|
|
976baff44f | ||
|
|
469e96126a | ||
|
|
24efbe7d33 | ||
|
|
704e0fa6b8 | ||
|
|
c70fef1208 | ||
|
|
454e672d42 | ||
|
|
647fdcf6a5 | ||
|
|
e75e68faa0 | ||
|
|
74c27ad4e2 | ||
|
|
cf04e101b9 | ||
|
|
4151972d3e | ||
|
|
3dc199d8df | ||
|
|
fc4b23fbc6 | ||
|
|
064bfd366f | ||
|
|
f5ea00b297 | ||
|
|
746d52930d | ||
|
|
cd2040a7d2 | ||
|
|
f2d8b68278 | ||
|
|
31ea72dbb3 | ||
|
|
e48222ada0 | ||
|
|
8e65d36691 | ||
|
|
7d235a454d | ||
|
|
5c1db4f0f4 | ||
|
|
8d3aa97047 | ||
|
|
f5987fba32 | ||
|
|
eba1c9e649 | ||
|
|
f774b0a5dc | ||
|
|
251b109d14 | ||
|
|
bef9ccfa71 | ||
|
|
768a7d5052 | ||
|
|
e86296884a | ||
|
|
8589a0fb40 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,4 @@
|
||||
syncthing
|
||||
syncthing.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
|
||||
22
CONTRIBUTING.md
Normal file
22
CONTRIBUTING.md
Normal file
@@ -0,0 +1,22 @@
|
||||
Please do contribute!
|
||||
|
||||
## Building
|
||||
|
||||
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
|
||||
|
||||
## Tests
|
||||
|
||||
Yes please!
|
||||
|
||||
## Style
|
||||
|
||||
`go fmt`
|
||||
|
||||
## Documentation
|
||||
|
||||
[Hack it here](https://github.com/calmh/syncthing/wiki)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
Copyright (C) 2013-2014 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
|
||||
157
README.md
157
README.md
@@ -1,18 +1,18 @@
|
||||
syncthing
|
||||
syncthing [](https://drone.io/github.com/calmh/syncthing/latest)
|
||||
=========
|
||||
|
||||
This is `syncthing`, an open BitTorrent Sync alternative. It is
|
||||
currently far from ready for mass consumption, but it is a usable proof
|
||||
of concept and tech demo. The following are the project goals:
|
||||
This is the `syncthing` project. The following are the project goals:
|
||||
|
||||
1. Define an open, secure, language neutral protocol usable for
|
||||
efficient synchronization of a file repository between an arbitrary
|
||||
number of nodes. This is the [Block Exchange
|
||||
Protocol](https://github.com/calmh/syncthing/blob/master/protocol/PROTOCOL.md)
|
||||
(BEP).
|
||||
1. Define a protocol for synchronization of a file repository between a
|
||||
number of collaborating nodes. The protocol should be well defined,
|
||||
unambigous, easily understood, free to use, efficient, secure and
|
||||
languange neutral. This is the [Block Exchange
|
||||
Protocol](https://github.com/calmh/syncthing/blob/master/protocol/PROTOCOL.md).
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility.
|
||||
said protocol. This is the `syncthing` utility. It is the hope that
|
||||
alternative, compatible implementations of the protocol will come to
|
||||
exist.
|
||||
|
||||
The two are evolving together; the protocol is not to be considered
|
||||
stable until syncthing 1.0 is released, at which point it is locked down
|
||||
@@ -25,137 +25,18 @@ making sure large swarms of selfish agents behave and somehow work
|
||||
towards a common goal. Here we have a much smaller swarm of cooperative
|
||||
agents and a simpler approach will suffice.
|
||||
|
||||
Features
|
||||
--------
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The following features are _currently implemented and working_:
|
||||
|
||||
* The formation of a cluster of nodes, certificate authenticated and
|
||||
communicating over TLS over TCP.
|
||||
|
||||
* Synchronization of a single directory among the cluster nodes.
|
||||
|
||||
* Change detection by periodic scanning of the local repository.
|
||||
|
||||
* Static configuration of cluster nodes.
|
||||
|
||||
* Automatic discovery of cluster nodes on the local network. See
|
||||
[discover.go](https://github.com/calmh/syncthing/blob/master/discover/discover.go)
|
||||
for the protocol specification.
|
||||
|
||||
* Handling of deleted files. Deletes can be propagated or ignored per
|
||||
client.
|
||||
|
||||
The following features are _not yet implemented but planned_:
|
||||
|
||||
* Syncing multiple directories from the same syncthing instance.
|
||||
|
||||
* Change detection by listening to file system notifications instead of
|
||||
periodic scanning.
|
||||
|
||||
* HTTP GUI.
|
||||
|
||||
The following features are _not implemented but may be implemented_ in
|
||||
the future:
|
||||
|
||||
* Automatic remote node discovery using a DHT. This is not technically
|
||||
very difficult but requires one or more globally reachable root
|
||||
nodes. This is open for discussion -- perhaps we can piggyback on an
|
||||
existing DHT, or root nodes need to be established in some other
|
||||
manner.
|
||||
|
||||
* Automatic NAT handling via UPNP. Required for the above, not very
|
||||
useful without it.
|
||||
|
||||
* Conflict resolution. Currently whichever file has the newest
|
||||
modification time "wins". The correct behavior in the face of
|
||||
conflicts is open for discussion.
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
Security is one of the primary project goals. This means that it should
|
||||
not be possible for an attacker to join a cluster uninvited, and it
|
||||
should not be possible to extract private information from intercepted
|
||||
traffic. Currently this is implemented as follows.
|
||||
|
||||
All traffic is protected by TLS. To prevent uninvited nodes from joining
|
||||
a cluster, the certificate fingerprint of each node is compared to a
|
||||
preset list of acceptable nodes at connection establishment. The
|
||||
fingerprint is computed as the SHA-1 hash of the certificate and
|
||||
displayed in BASE32 encoding to form a compact yet convenient string.
|
||||
Currently SHA-1 is deemed secure against preimage attacks.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
`go get github.com/calmh/syncthing`
|
||||
|
||||
Check out the options:
|
||||
|
||||
```
|
||||
$ syncthing --help
|
||||
Usage:
|
||||
syncthing [options]
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
Run syncthing to let it create it's config directory and certificate:
|
||||
|
||||
```
|
||||
$ syncthing
|
||||
11:34:13 tls.go:61: OK: wrote cert.pem
|
||||
11:34:13 tls.go:67: OK: wrote key.pem
|
||||
11:34:13 main.go:66: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
|
||||
11:34:13 main.go:90: FATAL: No config file
|
||||
```
|
||||
|
||||
Take note of the "My ID: ..." line. Perform the same operation on
|
||||
another computer (or the same computer but with a different `--home` for
|
||||
testing) to create another node. Take note of that ID as well, and
|
||||
create a config file `~/.syncthing/syncthing.ini` looking something like
|
||||
this:
|
||||
|
||||
```
|
||||
[repository]
|
||||
dir = /Users/jb/Synced
|
||||
|
||||
[nodes]
|
||||
NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q = 172.16.32.1:22000 192.23.34.56:22000
|
||||
CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL = dynamic
|
||||
```
|
||||
|
||||
This assumes that the first node is reachable on either of the two
|
||||
addresses listed (perhaps one internal and one port-forwarded external)
|
||||
and that the other node is not normally reachable from the outside. Save
|
||||
this config file, identically, to both nodes. If both nodes are running
|
||||
on the same network, you can set all addresses to 'dynamic' and they
|
||||
will find each other by local node discovery.
|
||||
|
||||
Start syncthing on both nodes. If you're running both on the same
|
||||
computer, one needs a different repository directory (in the config
|
||||
file) and listening port (set as a command line paramter). For the
|
||||
cautious, one side can be set to be read only.
|
||||
|
||||
```
|
||||
$ syncthing --ro
|
||||
13:30:55 main.go:102: INFO: My ID: NCTBZAAHXR6ZZP3D7SL3DLYFFQERMW4Q
|
||||
13:30:55 main.go:149: INFO: Initial repository scan in progress
|
||||
13:30:59 main.go:153: INFO: Listening for incoming connections
|
||||
13:30:59 main.go:157: INFO: Attempting to connect to other nodes
|
||||
13:30:59 main.go:247: INFO: Starting local discovery
|
||||
13:30:59 main.go:165: OK: Ready to synchronize
|
||||
13:31:04 discover.go:113: INFO: Discovered node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL at 172.16.32.24:23456
|
||||
13:31:14 main.go:296: OK: Connected to node CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL
|
||||
13:31:19 main.go:345: INFO: Transferred 139 KiB in (14 KiB/s), 139 KiB out (14 KiB/s)
|
||||
...
|
||||
```
|
||||
You should see the synchronization start and then finish a short while
|
||||
later. Add nodes to taste.
|
||||
The syncthing documentation is kept on the
|
||||
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
MIT
|
||||
All documentation and protocol specifications are licensed
|
||||
under the [Creative Commons Attribution 4.0 International
|
||||
License](http://creativecommons.org/licenses/by/4.0/).
|
||||
|
||||
All code is licensed under the [MIT
|
||||
License](https://github.com/calmh/syncthing/blob/master/LICENSE).
|
||||
|
||||
27
assets.sh
Executable file
27
assets.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
cat <<EOT
|
||||
package auto
|
||||
|
||||
import "compress/gzip"
|
||||
import "bytes"
|
||||
import "io/ioutil"
|
||||
|
||||
var Assets = make(map[string][]byte)
|
||||
|
||||
func init() {
|
||||
var data []byte
|
||||
var gr *gzip.Reader
|
||||
EOT
|
||||
|
||||
cd gui
|
||||
for f in $(find . -type f) ; do
|
||||
f="${f#./}"
|
||||
echo "gr, _ = gzip.NewReader(bytes.NewBuffer([]byte{"
|
||||
gzip -n -c $f | od -vt x1 | sed 's/^[0-9a-f]*//' | sed 's/\([0-9a-f][0-9a-f]\)/0x\1,/g'
|
||||
echo "}))"
|
||||
echo "data, _ = ioutil.ReadAll(gr)"
|
||||
echo "Assets[\"$f\"] = data"
|
||||
done
|
||||
echo "}"
|
||||
|
||||
BIN
assets/st-logo.pxm
Normal file
BIN
assets/st-logo.pxm
Normal file
Binary file not shown.
12926
auto/gui.files.go
Normal file
12926
auto/gui.files.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,26 @@
|
||||
package buffers
|
||||
|
||||
var buffers = make(chan []byte, 32)
|
||||
const (
|
||||
largeMin = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
smallBuffers = make(chan []byte, 32)
|
||||
largeBuffers = make(chan []byte, 32)
|
||||
)
|
||||
|
||||
func Get(size int) []byte {
|
||||
var ch = largeBuffers
|
||||
if size < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
select {
|
||||
case buf = <-buffers:
|
||||
case buf = <-ch:
|
||||
default:
|
||||
}
|
||||
|
||||
if len(buf) < size {
|
||||
return make([]byte, size)
|
||||
}
|
||||
@@ -15,12 +28,18 @@ func Get(size int) []byte {
|
||||
}
|
||||
|
||||
func Put(buf []byte) {
|
||||
if cap(buf) == 0 {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) == 0 {
|
||||
return
|
||||
}
|
||||
buf = buf[:cap(buf)]
|
||||
|
||||
var ch = largeBuffers
|
||||
if len(buf) < largeMin {
|
||||
ch = smallBuffers
|
||||
}
|
||||
|
||||
select {
|
||||
case buffers <- buf:
|
||||
case ch <- buf:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
91
build.sh
Executable file
91
build.sh
Executable file
@@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
|
||||
export COPYFILE_DISABLE=true
|
||||
|
||||
distFiles=(README.md LICENSE) # apart from the binary itself
|
||||
version=$(git describe --always)
|
||||
|
||||
build() {
|
||||
go build -ldflags "-w -X main.Version $version" ./cmd/syncthing
|
||||
}
|
||||
|
||||
prepare() {
|
||||
./assets.sh | gofmt > auto/gui.files.go
|
||||
go get -d
|
||||
}
|
||||
|
||||
test() {
|
||||
go test ./...
|
||||
}
|
||||
|
||||
tarDist() {
|
||||
name="$1"
|
||||
mkdir -p "$name"
|
||||
cp syncthing "${distFiles[@]}" "$name"
|
||||
tar zcvf "$name.tar.gz" "$name"
|
||||
rm -rf "$name"
|
||||
}
|
||||
|
||||
zipDist() {
|
||||
name="$1"
|
||||
mkdir -p "$name"
|
||||
cp syncthing.exe "${distFiles[@]}" "$name"
|
||||
zip -r "$name.zip" "$name"
|
||||
rm -rf "$name"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
"")
|
||||
build
|
||||
;;
|
||||
|
||||
tar)
|
||||
rm -f *.tar.gz *.zip
|
||||
prepare
|
||||
test || exit 1
|
||||
build
|
||||
|
||||
eval $(go env)
|
||||
name="syncthing-$GOOS-$GOARCH-$version"
|
||||
|
||||
tarDist "$name"
|
||||
;;
|
||||
|
||||
all)
|
||||
rm -f *.tar.gz *.zip
|
||||
prepare
|
||||
test || exit 1
|
||||
|
||||
export GOARM=7
|
||||
for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 windows-amd64 ; do
|
||||
export GOOS=${os%-*}
|
||||
export GOARCH=${os#*-}
|
||||
|
||||
build
|
||||
|
||||
name="syncthing-$os-$version"
|
||||
case $GOOS in
|
||||
windows)
|
||||
zipDist "$name"
|
||||
rm -f syncthing.exe
|
||||
;;
|
||||
*)
|
||||
tarDist "$name"
|
||||
rm -f syncthing
|
||||
;;
|
||||
esac
|
||||
done
|
||||
;;
|
||||
|
||||
upload)
|
||||
tag=$(git describe)
|
||||
shopt -s nullglob
|
||||
for f in *gz *zip ; do
|
||||
relup calmh/syncthing "$tag" "$f"
|
||||
done
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build parameter $1"
|
||||
;;
|
||||
esac
|
||||
42
cid/cid.go
Normal file
42
cid/cid.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package cid
|
||||
|
||||
type Map struct {
|
||||
toCid map[string]int
|
||||
toName []string
|
||||
}
|
||||
|
||||
func NewMap() *Map {
|
||||
return &Map{
|
||||
toCid: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) Get(name string) int {
|
||||
cid, ok := m.toCid[name]
|
||||
if ok {
|
||||
return cid
|
||||
}
|
||||
|
||||
// Find a free slot to get a new ID
|
||||
for i, n := range m.toName {
|
||||
if n == "" {
|
||||
m.toName[i] = name
|
||||
m.toCid[name] = i
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
// Add it to the end since we didn't find a free slot
|
||||
m.toName = append(m.toName, name)
|
||||
cid = len(m.toName) - 1
|
||||
m.toCid[name] = cid
|
||||
return cid
|
||||
}
|
||||
|
||||
func (m *Map) Clear(name string) {
|
||||
cid, ok := m.toCid[name]
|
||||
if ok {
|
||||
m.toName[cid] = ""
|
||||
delete(m.toCid, name)
|
||||
}
|
||||
}
|
||||
1
cmd/.gitignore
vendored
Normal file
1
cmd/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
!syncthing
|
||||
@@ -6,20 +6,18 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type BlockList []Block
|
||||
|
||||
type Block struct {
|
||||
Offset uint64
|
||||
Length uint32
|
||||
Offset int64
|
||||
Size uint32
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
// Blocks returns the blockwise hash of the reader.
|
||||
func Blocks(r io.Reader, blocksize int) (BlockList, error) {
|
||||
var blocks BlockList
|
||||
var offset uint64
|
||||
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
|
||||
var blocks []Block
|
||||
var offset int64
|
||||
for {
|
||||
lr := &io.LimitedReader{r, int64(blocksize)}
|
||||
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
|
||||
hf := sha256.New()
|
||||
n, err := io.Copy(hf, lr)
|
||||
if err != nil {
|
||||
@@ -32,19 +30,28 @@ func Blocks(r io.Reader, blocksize int) (BlockList, error) {
|
||||
|
||||
b := Block{
|
||||
Offset: offset,
|
||||
Length: uint32(n),
|
||||
Size: uint32(n),
|
||||
Hash: hf.Sum(nil),
|
||||
}
|
||||
blocks = append(blocks, b)
|
||||
offset += uint64(n)
|
||||
offset += int64(n)
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
// Empty file
|
||||
blocks = append(blocks, Block{
|
||||
Offset: 0,
|
||||
Size: 0,
|
||||
Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
|
||||
})
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// To returns the list of blocks necessary to transform src into dst.
|
||||
// Both block lists must have been created with the same block size.
|
||||
func (src BlockList) To(tgt BlockList) (have, need BlockList) {
|
||||
// BlockDiff returns lists of common and missing (to transform src into tgt)
|
||||
// blocks. Both block lists must have been created with the same block size.
|
||||
func BlockDiff(src, tgt []Block) (have, need []Block) {
|
||||
if len(tgt) == 0 && len(src) != 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -11,7 +11,8 @@ var blocksTestData = []struct {
|
||||
blocksize int
|
||||
hash []string
|
||||
}{
|
||||
{[]byte(""), 1024, []string{}},
|
||||
{[]byte(""), 1024, []string{
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
|
||||
{[]byte("contents"), 1024, []string{
|
||||
"d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8"}},
|
||||
{[]byte("contents"), 9, []string{
|
||||
@@ -52,7 +53,7 @@ func TestBlocks(t *testing.T) {
|
||||
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
|
||||
} else {
|
||||
i := 0
|
||||
for off := uint64(0); off < uint64(len(test.data)); off += uint64(test.blocksize) {
|
||||
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
|
||||
if blocks[i].Offset != off {
|
||||
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
|
||||
}
|
||||
@@ -61,8 +62,8 @@ func TestBlocks(t *testing.T) {
|
||||
if rem := len(test.data) - int(off); bs > rem {
|
||||
bs = rem
|
||||
}
|
||||
if int(blocks[i].Length) != bs {
|
||||
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Length, bs)
|
||||
if int(blocks[i].Size) != bs {
|
||||
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
|
||||
}
|
||||
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
|
||||
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
|
||||
@@ -86,7 +87,7 @@ var diffTestData = []struct {
|
||||
{"contents", "cantents", 3, []Block{{0, 3, nil}}},
|
||||
{"contents", "contants", 3, []Block{{3, 3, nil}}},
|
||||
{"contents", "cantants", 3, []Block{{0, 3, nil}, {3, 3, nil}}},
|
||||
{"contents", "", 3, nil},
|
||||
{"contents", "", 3, []Block{{0, 0, nil}}},
|
||||
{"", "contents", 3, []Block{{0, 3, nil}, {3, 3, nil}, {6, 2, nil}}},
|
||||
{"con", "contents", 3, []Block{{3, 3, nil}, {6, 2, nil}}},
|
||||
{"contents", "con", 3, nil},
|
||||
@@ -98,7 +99,7 @@ func TestDiff(t *testing.T) {
|
||||
for i, test := range diffTestData {
|
||||
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
|
||||
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
|
||||
_, d := a.To(b)
|
||||
_, d := BlockDiff(a, b)
|
||||
if len(d) != len(test.d) {
|
||||
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))
|
||||
} else {
|
||||
@@ -106,8 +107,8 @@ func TestDiff(t *testing.T) {
|
||||
if d[j].Offset != test.d[j].Offset {
|
||||
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
|
||||
}
|
||||
if d[j].Length != test.d[j].Length {
|
||||
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Length, test.d[j].Length)
|
||||
if d[j].Size != test.d[j].Size {
|
||||
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
210
cmd/syncthing/config.go
Normal file
210
cmd/syncthing/config.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Configuration struct {
|
||||
Version int `xml:"version,attr" default:"1"`
|
||||
Repositories []RepositoryConfiguration `xml:"repository"`
|
||||
Options OptionsConfiguration `xml:"options"`
|
||||
XMLName xml.Name `xml:"configuration" json:"-"`
|
||||
}
|
||||
|
||||
type RepositoryConfiguration struct {
|
||||
Directory string `xml:"directory,attr"`
|
||||
Nodes []NodeConfiguration `xml:"node"`
|
||||
}
|
||||
|
||||
type NodeConfiguration struct {
|
||||
NodeID string `xml:"id,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Addresses []string `xml:"address"`
|
||||
}
|
||||
|
||||
type OptionsConfiguration struct {
|
||||
ListenAddress []string `xml:"listenAddress" default:":22000" ini:"listen-address"`
|
||||
ReadOnly bool `xml:"readOnly" ini:"read-only"`
|
||||
AllowDelete bool `xml:"allowDelete" default:"true" ini:"allow-delete"`
|
||||
FollowSymlinks bool `xml:"followSymlinks" default:"true" ini:"follow-symlinks"`
|
||||
GUIEnabled bool `xml:"guiEnabled" default:"true" ini:"gui-enabled"`
|
||||
GUIAddress string `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"`
|
||||
GlobalAnnServer string `xml:"globalAnnounceServer" default:"announce.syncthing.net:22025" ini:"global-announce-server"`
|
||||
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true" ini:"global-announce-enabled"`
|
||||
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true" ini:"local-announce-enabled"`
|
||||
ParallelRequests int `xml:"parallelRequests" default:"16" ini:"parallel-requests"`
|
||||
MaxSendKbps int `xml:"maxSendKbps" ini:"max-send-kbps"`
|
||||
RescanIntervalS int `xml:"rescanIntervalS" default:"60" ini:"rescan-interval"`
|
||||
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60" ini:"reconnection-interval"`
|
||||
MaxChangeKbps int `xml:"maxChangeKbps" default:"1000" ini:"max-change-bw"`
|
||||
}
|
||||
|
||||
func setDefaults(data interface{}, setEmptySlices bool) error {
|
||||
s := reflect.ValueOf(data).Elem()
|
||||
t := s.Type()
|
||||
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
f := s.Field(i)
|
||||
tag := t.Field(i).Tag
|
||||
|
||||
v := tag.Get("default")
|
||||
if len(v) > 0 {
|
||||
if f.Kind().String() == "slice" && f.Len() != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch f.Interface().(type) {
|
||||
case string:
|
||||
f.SetString(v)
|
||||
|
||||
case []string:
|
||||
if setEmptySlices {
|
||||
rv := reflect.MakeSlice(reflect.TypeOf([]string{}), 1, 1)
|
||||
rv.Index(0).SetString(v)
|
||||
f.Set(rv)
|
||||
}
|
||||
|
||||
case int:
|
||||
i, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(i)
|
||||
|
||||
case bool:
|
||||
f.SetBool(v == "true")
|
||||
|
||||
default:
|
||||
panic(f.Type())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readConfigINI(m map[string]string, data interface{}) error {
|
||||
s := reflect.ValueOf(data).Elem()
|
||||
t := s.Type()
|
||||
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
f := s.Field(i)
|
||||
tag := t.Field(i).Tag
|
||||
|
||||
name := tag.Get("ini")
|
||||
if len(name) == 0 {
|
||||
name = strings.ToLower(t.Field(i).Name)
|
||||
}
|
||||
|
||||
if v, ok := m[name]; ok {
|
||||
switch f.Interface().(type) {
|
||||
case string:
|
||||
f.SetString(v)
|
||||
|
||||
case int:
|
||||
i, err := strconv.ParseInt(v, 10, 64)
|
||||
if err == nil {
|
||||
f.SetInt(i)
|
||||
}
|
||||
|
||||
case bool:
|
||||
f.SetBool(v == "true")
|
||||
|
||||
default:
|
||||
panic(f.Type())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeConfigXML(wr io.Writer, cfg Configuration) error {
|
||||
e := xml.NewEncoder(wr)
|
||||
e.Indent("", " ")
|
||||
err := e.Encode(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = wr.Write([]byte("\n"))
|
||||
return err
|
||||
}
|
||||
|
||||
func uniqueStrings(ss []string) []string {
|
||||
var m = make(map[string]bool, len(ss))
|
||||
for _, s := range ss {
|
||||
m[s] = true
|
||||
}
|
||||
|
||||
var us = make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
us = append(us, k)
|
||||
}
|
||||
|
||||
return us
|
||||
}
|
||||
|
||||
func readConfigXML(rd io.Reader) (Configuration, error) {
|
||||
var cfg Configuration
|
||||
|
||||
setDefaults(&cfg, false)
|
||||
setDefaults(&cfg.Options, false)
|
||||
|
||||
var err error
|
||||
if rd != nil {
|
||||
err = xml.NewDecoder(rd).Decode(&cfg)
|
||||
}
|
||||
|
||||
setDefaults(&cfg.Options, true)
|
||||
|
||||
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
type NodeConfigurationList []NodeConfiguration
|
||||
|
||||
func (l NodeConfigurationList) Less(a, b int) bool {
|
||||
return l[a].NodeID < l[b].NodeID
|
||||
}
|
||||
func (l NodeConfigurationList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
func (l NodeConfigurationList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func clusterHash(nodes []NodeConfiguration) string {
|
||||
sort.Sort(NodeConfigurationList(nodes))
|
||||
h := sha256.New()
|
||||
for _, n := range nodes {
|
||||
h.Write([]byte(n.NodeID))
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func cleanNodeList(nodes []NodeConfiguration, myID string) []NodeConfiguration {
|
||||
var myIDExists bool
|
||||
for _, node := range nodes {
|
||||
if node.NodeID == myID {
|
||||
myIDExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !myIDExists {
|
||||
nodes = append(nodes, NodeConfiguration{
|
||||
NodeID: myID,
|
||||
Addresses: []string{"dynamic"},
|
||||
Name: "",
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(NodeConfigurationList(nodes))
|
||||
|
||||
return nodes
|
||||
}
|
||||
173
cmd/syncthing/filemonitor.go
Normal file
173
cmd/syncthing/filemonitor.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
)
|
||||
|
||||
type fileMonitor struct {
|
||||
name string // in-repo name
|
||||
path string // full path
|
||||
writeDone sync.WaitGroup
|
||||
model *Model
|
||||
global File
|
||||
localBlocks []Block
|
||||
copyError error
|
||||
writeError error
|
||||
}
|
||||
|
||||
func (m *fileMonitor) FileBegins(cc <-chan content) error {
|
||||
if m.model.trace["file"] {
|
||||
log.Printf("FILE: FileBegins: " + m.name)
|
||||
}
|
||||
|
||||
tmp := tempName(m.path, m.global.Modified)
|
||||
|
||||
dir := path.Dir(tmp)
|
||||
_, err := os.Stat(dir)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
outFile, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.writeDone.Add(1)
|
||||
|
||||
var writeWg sync.WaitGroup
|
||||
if len(m.localBlocks) > 0 {
|
||||
writeWg.Add(1)
|
||||
inFile, err := os.Open(m.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy local blocks, close infile when done
|
||||
go m.copyLocalBlocks(inFile, outFile, &writeWg)
|
||||
}
|
||||
|
||||
// Write remote blocks,
|
||||
writeWg.Add(1)
|
||||
go m.copyRemoteBlocks(cc, outFile, &writeWg)
|
||||
|
||||
// Wait for both writing routines, then close the outfile
|
||||
go func() {
|
||||
writeWg.Wait()
|
||||
outFile.Close()
|
||||
m.writeDone.Done()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
|
||||
defer inFile.Close()
|
||||
defer writeWg.Done()
|
||||
|
||||
var buf = buffers.Get(BlockSize)
|
||||
defer buffers.Put(buf)
|
||||
|
||||
for _, lb := range m.localBlocks {
|
||||
buf = buf[:lb.Size]
|
||||
_, err := inFile.ReadAt(buf, lb.Offset)
|
||||
if err != nil {
|
||||
m.copyError = err
|
||||
return
|
||||
}
|
||||
_, err = outFile.WriteAt(buf, lb.Offset)
|
||||
if err != nil {
|
||||
m.copyError = err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
|
||||
defer writeWg.Done()
|
||||
|
||||
for content := range cc {
|
||||
_, err := outFile.WriteAt(content.data, content.offset)
|
||||
buffers.Put(content.data)
|
||||
if err != nil {
|
||||
m.writeError = err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *fileMonitor) FileDone() error {
|
||||
if m.model.trace["file"] {
|
||||
log.Printf("FILE: FileDone: " + m.name)
|
||||
}
|
||||
|
||||
m.writeDone.Wait()
|
||||
|
||||
tmp := tempName(m.path, m.global.Modified)
|
||||
defer os.Remove(tmp)
|
||||
|
||||
if m.copyError != nil {
|
||||
return m.copyError
|
||||
}
|
||||
if m.writeError != nil {
|
||||
return m.writeError
|
||||
}
|
||||
|
||||
err := hashCheck(tmp, m.global.Blocks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Rename(tmp, m.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.model.updateLocal(m.global)
|
||||
return nil
|
||||
}
|
||||
|
||||
func hashCheck(name string, correct []Block) error {
|
||||
rf, err := os.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rf.Close()
|
||||
|
||||
current, err := Blocks(rf, BlockSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(current) != len(correct) {
|
||||
return errors.New("incorrect number of blocks")
|
||||
}
|
||||
for i := range current {
|
||||
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
|
||||
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
239
cmd/syncthing/filequeue.go
Normal file
239
cmd/syncthing/filequeue.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Monitor interface {
|
||||
FileBegins(<-chan content) error
|
||||
FileDone() error
|
||||
}
|
||||
|
||||
type FileQueue struct {
|
||||
files queuedFileList
|
||||
sorted bool
|
||||
fmut sync.Mutex // protects files and sorted
|
||||
availability map[string][]string
|
||||
amut sync.Mutex // protects availability
|
||||
queued map[string]bool
|
||||
}
|
||||
|
||||
type queuedFile struct {
|
||||
name string
|
||||
blocks []Block
|
||||
activeBlocks []bool
|
||||
given int
|
||||
remaining int
|
||||
channel chan content
|
||||
nodes []string
|
||||
nodesChecked time.Time
|
||||
monitor Monitor
|
||||
}
|
||||
|
||||
type content struct {
|
||||
offset int64
|
||||
data []byte
|
||||
}
|
||||
|
||||
type queuedFileList []queuedFile
|
||||
|
||||
func (l queuedFileList) Len() int { return len(l) }
|
||||
|
||||
func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
|
||||
|
||||
func (l queuedFileList) Less(a, b int) bool {
|
||||
// Sort by most blocks already given out, then alphabetically
|
||||
if l[a].given != l[b].given {
|
||||
return l[a].given > l[b].given
|
||||
}
|
||||
return l[a].name < l[b].name
|
||||
}
|
||||
|
||||
type queuedBlock struct {
|
||||
name string
|
||||
block Block
|
||||
index int
|
||||
}
|
||||
|
||||
func NewFileQueue() *FileQueue {
|
||||
return &FileQueue{
|
||||
availability: make(map[string][]string),
|
||||
queued: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
|
||||
q.fmut.Lock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
if q.queued[name] {
|
||||
return
|
||||
}
|
||||
|
||||
q.files = append(q.files, queuedFile{
|
||||
name: name,
|
||||
blocks: blocks,
|
||||
activeBlocks: make([]bool, len(blocks)),
|
||||
remaining: len(blocks),
|
||||
channel: make(chan content),
|
||||
monitor: monitor,
|
||||
})
|
||||
q.queued[name] = true
|
||||
q.sorted = false
|
||||
}
|
||||
|
||||
func (q *FileQueue) Len() int {
|
||||
q.fmut.Lock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
return len(q.files)
|
||||
}
|
||||
|
||||
func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) {
|
||||
q.fmut.Lock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
if !q.sorted {
|
||||
sort.Sort(q.files)
|
||||
q.sorted = true
|
||||
}
|
||||
|
||||
for i := range q.files {
|
||||
qf := &q.files[i]
|
||||
|
||||
q.amut.Lock()
|
||||
av := q.availability[qf.name]
|
||||
q.amut.Unlock()
|
||||
|
||||
if len(av) == 0 {
|
||||
// Noone has the file we want; abort.
|
||||
if qf.remaining != len(qf.blocks) {
|
||||
// We have already started on this file; close it down
|
||||
close(qf.channel)
|
||||
if mon := qf.monitor; mon != nil {
|
||||
mon.FileDone()
|
||||
}
|
||||
}
|
||||
delete(q.queued, qf.name)
|
||||
q.deleteAt(i)
|
||||
return queuedBlock{}, false
|
||||
}
|
||||
|
||||
for _, ni := range av {
|
||||
// Find and return the next block in the queue
|
||||
if ni == nodeID {
|
||||
for j, b := range qf.blocks {
|
||||
if !qf.activeBlocks[j] {
|
||||
qf.activeBlocks[j] = true
|
||||
qf.given++
|
||||
return queuedBlock{
|
||||
name: qf.name,
|
||||
block: b,
|
||||
index: j,
|
||||
}, true
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We found nothing to do
|
||||
return queuedBlock{}, false
|
||||
}
|
||||
|
||||
func (q *FileQueue) Done(file string, offset int64, data []byte) {
|
||||
q.fmut.Lock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
c := content{
|
||||
offset: offset,
|
||||
data: data,
|
||||
}
|
||||
for i := range q.files {
|
||||
qf := &q.files[i]
|
||||
|
||||
if qf.name == file {
|
||||
if qf.monitor != nil && qf.remaining == len(qf.blocks) {
|
||||
err := qf.monitor.FileBegins(qf.channel)
|
||||
if err != nil {
|
||||
log.Printf("WARNING: %s: %v (not synced)", qf.name, err)
|
||||
delete(q.queued, qf.name)
|
||||
q.deleteAt(i)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
qf.channel <- c
|
||||
qf.remaining--
|
||||
|
||||
if qf.remaining == 0 {
|
||||
close(qf.channel)
|
||||
if qf.monitor != nil {
|
||||
err := qf.monitor.FileDone()
|
||||
if err != nil {
|
||||
log.Printf("WARNING: %s: %v", qf.name, err)
|
||||
}
|
||||
}
|
||||
delete(q.queued, qf.name)
|
||||
q.deleteAt(i)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We found nothing, might have errored out already
|
||||
}
|
||||
|
||||
func (q *FileQueue) QueuedFiles() (files []string) {
|
||||
q.fmut.Lock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
for _, qf := range q.files {
|
||||
files = append(files, qf.name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (q *FileQueue) deleteAt(i int) {
|
||||
q.files = append(q.files[:i], q.files[i+1:]...)
|
||||
}
|
||||
|
||||
func (q *FileQueue) deleteFile(n string) {
|
||||
for i, file := range q.files {
|
||||
if n == file.name {
|
||||
q.deleteAt(i)
|
||||
delete(q.queued, file.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *FileQueue) SetAvailable(file string, nodes []string) {
|
||||
q.amut.Lock()
|
||||
defer q.amut.Unlock()
|
||||
|
||||
q.availability[file] = nodes
|
||||
}
|
||||
|
||||
func (q *FileQueue) RemoveAvailable(toRemove string) {
|
||||
q.fmut.Lock()
|
||||
q.amut.Lock()
|
||||
defer q.amut.Unlock()
|
||||
defer q.fmut.Unlock()
|
||||
|
||||
for file, nodes := range q.availability {
|
||||
for i, node := range nodes {
|
||||
if node == toRemove {
|
||||
q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])]
|
||||
if len(q.availability[file]) == 0 {
|
||||
q.deleteFile(file)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
295
cmd/syncthing/filequeue_test.go
Normal file
295
cmd/syncthing/filequeue_test.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFileQueueAdd(t *testing.T) {
|
||||
q := NewFileQueue()
|
||||
q.Add("foo", nil, nil)
|
||||
}
|
||||
|
||||
func TestFileQueueAddSorting(t *testing.T) {
|
||||
q := NewFileQueue()
|
||||
q.SetAvailable("zzz", []string{"nodeID"})
|
||||
q.SetAvailable("aaa", []string{"nodeID"})
|
||||
|
||||
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
|
||||
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
|
||||
b, _ := q.Get("nodeID")
|
||||
if b.name != "aaa" {
|
||||
t.Errorf("Incorrectly sorted get: %+v", b)
|
||||
}
|
||||
|
||||
q = NewFileQueue()
|
||||
q.SetAvailable("zzz", []string{"nodeID"})
|
||||
q.SetAvailable("aaa", []string{"nodeID"})
|
||||
|
||||
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
|
||||
b, _ = q.Get("nodeID") // Start on zzzz
|
||||
if b.name != "zzz" {
|
||||
t.Errorf("Incorrectly sorted get: %+v", b)
|
||||
}
|
||||
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
|
||||
b, _ = q.Get("nodeID")
|
||||
if b.name != "zzz" {
|
||||
// Continue rather than starting a new file
|
||||
t.Errorf("Incorrectly sorted get: %+v", b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileQueueLen(t *testing.T) {
|
||||
q := NewFileQueue()
|
||||
q.Add("foo", nil, nil)
|
||||
q.Add("bar", nil, nil)
|
||||
|
||||
if l := q.Len(); l != 2 {
|
||||
t.Errorf("Incorrect len %d != 2 after adds", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileQueueGet(t *testing.T) {
|
||||
q := NewFileQueue()
|
||||
q.SetAvailable("foo", []string{"nodeID"})
|
||||
q.SetAvailable("bar", []string{"nodeID"})
|
||||
|
||||
q.Add("foo", []Block{
|
||||
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
|
||||
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
|
||||
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
|
||||
}, nil)
|
||||
q.Add("bar", []Block{
|
||||
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
|
||||
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
|
||||
}, nil)
|
||||
|
||||
// First get should return the first block of the first file
|
||||
|
||||
expected := queuedBlock{
|
||||
name: "bar",
|
||||
block: Block{
|
||||
Offset: 0,
|
||||
Size: 128,
|
||||
Hash: []byte("some bar hash bytes"),
|
||||
},
|
||||
}
|
||||
actual, ok := q.Get("nodeID")
|
||||
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned (first)\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
|
||||
// Second get should return the next block of the first file
|
||||
|
||||
expected = queuedBlock{
|
||||
name: "bar",
|
||||
block: Block{
|
||||
Offset: 128,
|
||||
Size: 128,
|
||||
Hash: []byte("some other bar hash bytes"),
|
||||
},
|
||||
index: 1,
|
||||
}
|
||||
actual, ok = q.Get("nodeID")
|
||||
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned (second)\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
|
||||
// Third get should return the first block of the second file
|
||||
|
||||
expected = queuedBlock{
|
||||
name: "foo",
|
||||
block: Block{
|
||||
Offset: 0,
|
||||
Size: 128,
|
||||
Hash: []byte("some foo hash bytes"),
|
||||
},
|
||||
}
|
||||
actual, ok = q.Get("nodeID")
|
||||
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned (third)\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func TestFileQueueDone(t *testing.T) {
|
||||
ch := make(chan content)
|
||||
var recv sync.WaitGroup
|
||||
recv.Add(1)
|
||||
go func() {
|
||||
content := <-ch
|
||||
if bytes.Compare(content.data, []byte("first block bytes")) != 0 {
|
||||
t.Error("Incorrect data in first content block")
|
||||
}
|
||||
|
||||
content = <-ch
|
||||
if bytes.Compare(content.data, []byte("second block bytes")) != 0 {
|
||||
t.Error("Incorrect data in second content block")
|
||||
}
|
||||
|
||||
_, ok := <-ch
|
||||
if ok {
|
||||
t.Error("Content channel not closed")
|
||||
}
|
||||
|
||||
recv.Done()
|
||||
}()
|
||||
|
||||
q := FileQueue{resolver: fakeResolver{}}
|
||||
q.Add("foo", []Block{
|
||||
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
|
||||
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
|
||||
}, ch)
|
||||
|
||||
b0, _ := q.Get("nodeID")
|
||||
b1, _ := q.Get("nodeID")
|
||||
|
||||
q.Done(b0.name, b0.block.Offset, []byte("first block bytes"))
|
||||
q.Done(b1.name, b1.block.Offset, []byte("second block bytes"))
|
||||
|
||||
recv.Wait()
|
||||
|
||||
// Queue should now have one file less
|
||||
|
||||
if l := q.Len(); l != 0 {
|
||||
t.Error("Queue not empty")
|
||||
}
|
||||
|
||||
_, ok := q.Get("nodeID")
|
||||
if ok {
|
||||
t.Error("Unexpected OK Get()")
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func TestFileQueueGetNodeIDs(t *testing.T) {
|
||||
q := NewFileQueue()
|
||||
q.SetAvailable("a-foo", []string{"nodeID", "a"})
|
||||
q.SetAvailable("b-bar", []string{"nodeID", "b"})
|
||||
|
||||
q.Add("a-foo", []Block{
|
||||
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
|
||||
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
|
||||
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
|
||||
}, nil)
|
||||
q.Add("b-bar", []Block{
|
||||
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
|
||||
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
|
||||
}, nil)
|
||||
|
||||
expected := queuedBlock{
|
||||
name: "b-bar",
|
||||
block: Block{
|
||||
Offset: 0,
|
||||
Size: 128,
|
||||
Hash: []byte("some bar hash bytes"),
|
||||
},
|
||||
}
|
||||
actual, ok := q.Get("b")
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
|
||||
expected = queuedBlock{
|
||||
name: "a-foo",
|
||||
block: Block{
|
||||
Offset: 0,
|
||||
Size: 128,
|
||||
Hash: []byte("some foo hash bytes"),
|
||||
},
|
||||
}
|
||||
actual, ok = q.Get("a")
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
|
||||
expected = queuedBlock{
|
||||
name: "a-foo",
|
||||
block: Block{
|
||||
Offset: 128,
|
||||
Size: 128,
|
||||
Hash: []byte("some other foo hash bytes"),
|
||||
},
|
||||
index: 1,
|
||||
}
|
||||
actual, ok = q.Get("nodeID")
|
||||
if !ok {
|
||||
t.Error("Unexpected non-OK Get()")
|
||||
}
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileQueueThreadHandling(t *testing.T) {
|
||||
// This should pass with go test -race
|
||||
|
||||
const n = 100
|
||||
var total int
|
||||
var blocks []Block
|
||||
for i := 1; i <= n; i++ {
|
||||
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
|
||||
total += i
|
||||
}
|
||||
|
||||
q := NewFileQueue()
|
||||
q.Add("foo", blocks, nil)
|
||||
q.SetAvailable("foo", []string{"nodeID"})
|
||||
|
||||
var start = make(chan bool)
|
||||
var gotTot uint32
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(n)
|
||||
for i := 1; i <= n; i++ {
|
||||
go func() {
|
||||
<-start
|
||||
b, _ := q.Get("nodeID")
|
||||
atomic.AddUint32(&gotTot, uint32(b.block.Offset))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
close(start)
|
||||
wg.Wait()
|
||||
if int(gotTot) != total {
|
||||
t.Errorf("Total mismatch; %d != %d", gotTot, total)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteAt(t *testing.T) {
|
||||
q := FileQueue{}
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
q.files = queuedFileList{{name: "a"}, {name: "b"}, {name: "c"}, {name: "d"}}
|
||||
q.deleteAt(i)
|
||||
if l := len(q.files); l != 3 {
|
||||
t.Fatalf("deleteAt(%d) failed; %d != 3", i, l)
|
||||
}
|
||||
}
|
||||
|
||||
q.files = queuedFileList{{name: "a"}}
|
||||
q.deleteAt(0)
|
||||
if l := len(q.files); l != 0 {
|
||||
t.Fatalf("deleteAt(only) failed; %d != 0", l)
|
||||
}
|
||||
}
|
||||
172
cmd/syncthing/gui.go
Normal file
172
cmd/syncthing/gui.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/martini"
|
||||
)
|
||||
|
||||
type guiError struct {
|
||||
Time time.Time
|
||||
Error string
|
||||
}
|
||||
|
||||
var (
|
||||
configInSync = true
|
||||
guiErrors = []guiError{}
|
||||
guiErrorsMut sync.Mutex
|
||||
)
|
||||
|
||||
func startGUI(addr string, m *Model) {
|
||||
router := martini.NewRouter()
|
||||
router.Get("/", getRoot)
|
||||
router.Get("/rest/version", restGetVersion)
|
||||
router.Get("/rest/model", restGetModel)
|
||||
router.Get("/rest/connections", restGetConnections)
|
||||
router.Get("/rest/config", restGetConfig)
|
||||
router.Get("/rest/config/sync", restGetConfigInSync)
|
||||
router.Get("/rest/need", restGetNeed)
|
||||
router.Get("/rest/system", restGetSystem)
|
||||
router.Get("/rest/errors", restGetErrors)
|
||||
|
||||
router.Post("/rest/config", restPostConfig)
|
||||
router.Post("/rest/restart", restPostRestart)
|
||||
router.Post("/rest/error", restPostError)
|
||||
|
||||
go func() {
|
||||
mr := martini.New()
|
||||
mr.Use(embeddedStatic())
|
||||
mr.Use(martini.Recovery())
|
||||
mr.Action(router.Handle)
|
||||
mr.Map(m)
|
||||
err := http.ListenAndServe(addr, mr)
|
||||
if err != nil {
|
||||
warnln("GUI not possible:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func getRoot(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/index.html", 302)
|
||||
}
|
||||
|
||||
func restGetVersion() string {
|
||||
return Version
|
||||
}
|
||||
|
||||
func restGetModel(m *Model, w http.ResponseWriter) {
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
globalFiles, globalDeleted, globalBytes := m.GlobalSize()
|
||||
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
||||
|
||||
localFiles, localDeleted, localBytes := m.LocalSize()
|
||||
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
||||
|
||||
inSyncFiles, inSyncBytes := m.InSyncSize()
|
||||
res["inSyncFiles"], res["inSyncBytes"] = inSyncFiles, inSyncBytes
|
||||
|
||||
files, total := m.NeedFiles()
|
||||
res["needFiles"], res["needBytes"] = len(files), total
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetConnections(m *Model, w http.ResponseWriter) {
|
||||
var res = m.ConnectionStats()
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetConfig(w http.ResponseWriter) {
|
||||
json.NewEncoder(w).Encode(cfg)
|
||||
}
|
||||
|
||||
func restPostConfig(req *http.Request) {
|
||||
err := json.NewDecoder(req.Body).Decode(&cfg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
} else {
|
||||
saveConfig()
|
||||
configInSync = false
|
||||
}
|
||||
}
|
||||
|
||||
func restGetConfigInSync(w http.ResponseWriter) {
|
||||
json.NewEncoder(w).Encode(map[string]bool{"configInSync": configInSync})
|
||||
}
|
||||
|
||||
func restPostRestart(req *http.Request) {
|
||||
restart()
|
||||
}
|
||||
|
||||
type guiFile File
|
||||
|
||||
func (f guiFile) MarshalJSON() ([]byte, error) {
|
||||
type t struct {
|
||||
Name string
|
||||
Size int64
|
||||
}
|
||||
return json.Marshal(t{
|
||||
Name: f.Name,
|
||||
Size: File(f).Size,
|
||||
})
|
||||
}
|
||||
|
||||
func restGetNeed(m *Model, w http.ResponseWriter) {
|
||||
files, _ := m.NeedFiles()
|
||||
gfs := make([]guiFile, len(files))
|
||||
for i, f := range files {
|
||||
gfs[i] = guiFile(f)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(gfs)
|
||||
}
|
||||
|
||||
var cpuUsagePercent float64
|
||||
var cpuUsageLock sync.RWMutex
|
||||
|
||||
func restGetSystem(w http.ResponseWriter) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
res := make(map[string]interface{})
|
||||
res["myID"] = myID
|
||||
res["goroutines"] = runtime.NumGoroutine()
|
||||
res["alloc"] = m.Alloc
|
||||
res["sys"] = m.Sys
|
||||
cpuUsageLock.RLock()
|
||||
res["cpuPercent"] = cpuUsagePercent
|
||||
cpuUsageLock.RUnlock()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetErrors(w http.ResponseWriter) {
|
||||
guiErrorsMut.Lock()
|
||||
json.NewEncoder(w).Encode(guiErrors)
|
||||
guiErrorsMut.Unlock()
|
||||
}
|
||||
|
||||
func restPostError(req *http.Request) {
|
||||
bs, _ := ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
showGuiError(string(bs))
|
||||
}
|
||||
|
||||
func showGuiError(err string) {
|
||||
guiErrorsMut.Lock()
|
||||
guiErrors = append(guiErrors, guiError{time.Now(), err})
|
||||
if len(guiErrors) > 5 {
|
||||
guiErrors = guiErrors[len(guiErrors)-5:]
|
||||
}
|
||||
guiErrorsMut.Unlock()
|
||||
}
|
||||
9
cmd/syncthing/gui_development.go
Normal file
9
cmd/syncthing/gui_development.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build guidev
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/codegangsta/martini"
|
||||
|
||||
func embeddedStatic() interface{} {
|
||||
return martini.Static("gui")
|
||||
}
|
||||
40
cmd/syncthing/gui_embedded.go
Normal file
40
cmd/syncthing/gui_embedded.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//+build !guidev
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/auto"
|
||||
)
|
||||
|
||||
func embeddedStatic() interface{} {
|
||||
var modt = time.Now().UTC().Format(http.TimeFormat)
|
||||
|
||||
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
|
||||
file := req.URL.Path
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
|
||||
bs, ok := auto.Assets[file]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
|
||||
if len(mtype) != 0 {
|
||||
res.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
res.Header().Set("Content-Size", fmt.Sprintf("%d", len(bs)))
|
||||
res.Header().Set("Last-Modified", modt)
|
||||
|
||||
res.Write(bs)
|
||||
}
|
||||
}
|
||||
31
cmd/syncthing/gui_unix.go
Normal file
31
cmd/syncthing/gui_unix.go
Normal file
@@ -0,0 +1,31 @@
|
||||
//+build !windows,!solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
go trackCPUUsage()
|
||||
}
|
||||
|
||||
func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage syscall.Rusage
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
cpuUsagePercent = 100 * float64(usageDiff) / float64(timeDiff)
|
||||
cpuUsageLock.Unlock()
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
}
|
||||
}
|
||||
43
cmd/syncthing/locktrace.go
Normal file
43
cmd/syncthing/locktrace.go
Normal file
@@ -0,0 +1,43 @@
|
||||
//+build locktrace
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"path"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
lockTime time.Time
|
||||
)
|
||||
|
||||
func (m *Model) Lock() {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
log.Printf("%s:%d: Lock()...", path.Base(file), line)
|
||||
blockTime := time.Now()
|
||||
m.RWMutex.Lock()
|
||||
lockTime = time.Now()
|
||||
log.Printf("%s:%d: ...Lock() [%.04f ms]", path.Base(file), line, time.Since(blockTime).Seconds()*1000)
|
||||
}
|
||||
|
||||
func (m *Model) Unlock() {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
m.RWMutex.Unlock()
|
||||
log.Printf("%s:%d: Unlock() [%.04f ms]", path.Base(file), line, time.Since(lockTime).Seconds()*1000)
|
||||
}
|
||||
|
||||
func (m *Model) RLock() {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
log.Printf("%s:%d: RLock()...", path.Base(file), line)
|
||||
blockTime := time.Now()
|
||||
m.RWMutex.RLock()
|
||||
log.Printf("%s:%d: ...RLock() [%.04f ms]", path.Base(file), line, time.Since(blockTime).Seconds()*1000)
|
||||
}
|
||||
|
||||
func (m *Model) RUnlock() {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
m.RWMutex.RUnlock()
|
||||
log.Printf("%s:%d: RUnlock()", path.Base(file), line)
|
||||
}
|
||||
@@ -6,21 +6,21 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var debugEnabled = true
|
||||
var logger = log.New(os.Stderr, "", log.Lshortfile|log.Ltime)
|
||||
var logger *log.Logger
|
||||
|
||||
func init() {
|
||||
log.SetOutput(os.Stderr)
|
||||
logger = log.New(os.Stderr, "", log.Flags())
|
||||
}
|
||||
|
||||
func debugln(vals ...interface{}) {
|
||||
if debugEnabled {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
|
||||
func debugf(format string, vals ...interface{}) {
|
||||
if debugEnabled {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
|
||||
func infoln(vals ...interface{}) {
|
||||
@@ -45,11 +45,13 @@ func okf(format string, vals ...interface{}) {
|
||||
|
||||
func warnln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
showGuiError(s)
|
||||
logger.Output(2, "WARNING: "+s)
|
||||
}
|
||||
|
||||
func warnf(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
showGuiError(s)
|
||||
logger.Output(2, "WARNING: "+s)
|
||||
}
|
||||
|
||||
598
cmd/syncthing/main.go
Normal file
598
cmd/syncthing/main.go
Normal file
@@ -0,0 +1,598 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/ini"
|
||||
"github.com/calmh/syncthing/discover"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
var cfg Configuration
|
||||
var Version = "unknown-dev"
|
||||
|
||||
var (
|
||||
myID string
|
||||
)
|
||||
|
||||
var (
|
||||
showVersion bool
|
||||
confDir string
|
||||
trace string
|
||||
profiler string
|
||||
verbose bool
|
||||
startupDelay int
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
|
||||
flag.StringVar(&trace, "debug.trace", "", "(connect,net,idx,file,pull)")
|
||||
flag.StringVar(&profiler, "debug.profiler", "", "(addr)")
|
||||
flag.BoolVar(&showVersion, "version", false, "Show version")
|
||||
flag.BoolVar(&verbose, "v", false, "Be more verbose")
|
||||
flag.IntVar(&startupDelay, "delay", 0, "Startup delay (s)")
|
||||
flag.Usage = usageFor(flag.CommandLine, "syncthing [options]")
|
||||
flag.Parse()
|
||||
|
||||
if startupDelay > 0 {
|
||||
time.Sleep(time.Duration(startupDelay) * time.Second)
|
||||
}
|
||||
|
||||
if showVersion {
|
||||
fmt.Println(Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(os.Getenv("GOGC")) == 0 {
|
||||
debug.SetGCPercent(25)
|
||||
}
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
if len(trace) > 0 {
|
||||
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
|
||||
logger.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
|
||||
}
|
||||
confDir = expandTilde(confDir)
|
||||
|
||||
// Ensure that our home directory exists and that we have a certificate and key.
|
||||
|
||||
ensureDir(confDir, 0700)
|
||||
cert, err := loadCert(confDir)
|
||||
if err != nil {
|
||||
newCertificate(confDir)
|
||||
cert, err = loadCert(confDir)
|
||||
fatalErr(err)
|
||||
}
|
||||
|
||||
myID = string(certID(cert.Certificate[0]))
|
||||
log.SetPrefix("[" + myID[0:5] + "] ")
|
||||
logger.SetPrefix("[" + myID[0:5] + "] ")
|
||||
|
||||
infoln("Version", Version)
|
||||
infoln("My ID:", myID)
|
||||
|
||||
// Prepare to be able to save configuration
|
||||
|
||||
cfgFile := path.Join(confDir, "config.xml")
|
||||
go saveConfigLoop(cfgFile)
|
||||
|
||||
// Load the configuration file, if it exists.
|
||||
// If it does not, create a template.
|
||||
|
||||
cf, err := os.Open(cfgFile)
|
||||
if err == nil {
|
||||
// Read config.xml
|
||||
cfg, err = readConfigXML(cf)
|
||||
if err != nil {
|
||||
fatalln(err)
|
||||
}
|
||||
cf.Close()
|
||||
} else {
|
||||
// No config.xml, let's try the old syncthing.ini
|
||||
iniFile := path.Join(confDir, "syncthing.ini")
|
||||
cf, err := os.Open(iniFile)
|
||||
if err == nil {
|
||||
infoln("Migrating syncthing.ini to config.xml")
|
||||
iniCfg := ini.Parse(cf)
|
||||
cf.Close()
|
||||
os.Rename(iniFile, path.Join(confDir, "migrated_syncthing.ini"))
|
||||
|
||||
cfg, _ = readConfigXML(nil)
|
||||
cfg.Repositories = []RepositoryConfiguration{
|
||||
{Directory: iniCfg.Get("repository", "dir")},
|
||||
}
|
||||
readConfigINI(iniCfg.OptionMap("settings"), &cfg.Options)
|
||||
for name, addrs := range iniCfg.OptionMap("nodes") {
|
||||
n := NodeConfiguration{
|
||||
NodeID: name,
|
||||
Addresses: strings.Fields(addrs),
|
||||
}
|
||||
cfg.Repositories[0].Nodes = append(cfg.Repositories[0].Nodes, n)
|
||||
}
|
||||
|
||||
saveConfig()
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.Repositories) == 0 {
|
||||
infoln("No config file; starting with empty defaults")
|
||||
|
||||
cfg, err = readConfigXML(nil)
|
||||
cfg.Repositories = []RepositoryConfiguration{
|
||||
{
|
||||
Directory: path.Join(getHomeDir(), "Sync"),
|
||||
Nodes: []NodeConfiguration{
|
||||
{NodeID: myID, Addresses: []string{"dynamic"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
saveConfig()
|
||||
infof("Edit %s to taste or use the GUI\n", cfgFile)
|
||||
}
|
||||
|
||||
// Make sure the local node is in the node list.
|
||||
cfg.Repositories[0].Nodes = cleanNodeList(cfg.Repositories[0].Nodes, myID)
|
||||
|
||||
var dir = expandTilde(cfg.Repositories[0].Directory)
|
||||
|
||||
if len(profiler) > 0 {
|
||||
go func() {
|
||||
err := http.ListenAndServe(profiler, nil)
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// The TLS configuration is used for both the listening socket and outgoing
|
||||
// connections.
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
NextProtos: []string{"bep/1.0"},
|
||||
ServerName: myID,
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
ensureDir(dir, -1)
|
||||
m := NewModel(dir, cfg.Options.MaxChangeKbps*1000)
|
||||
for _, t := range strings.Split(trace, ",") {
|
||||
m.Trace(t)
|
||||
}
|
||||
if cfg.Options.MaxSendKbps > 0 {
|
||||
m.LimitRate(cfg.Options.MaxSendKbps)
|
||||
}
|
||||
|
||||
// GUI
|
||||
if cfg.Options.GUIEnabled && cfg.Options.GUIAddress != "" {
|
||||
addr, err := net.ResolveTCPAddr("tcp", cfg.Options.GUIAddress)
|
||||
if err != nil {
|
||||
warnf("Cannot start GUI on %q: %v", cfg.Options.GUIAddress, err)
|
||||
} else {
|
||||
var hostOpen, hostShow string
|
||||
switch {
|
||||
case addr.IP == nil:
|
||||
hostOpen = "localhost"
|
||||
hostShow = "0.0.0.0"
|
||||
case addr.IP.IsUnspecified():
|
||||
hostOpen = "localhost"
|
||||
hostShow = addr.IP.String()
|
||||
default:
|
||||
hostOpen = addr.IP.String()
|
||||
hostShow = hostOpen
|
||||
}
|
||||
|
||||
infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port)
|
||||
startGUI(cfg.Options.GUIAddress, m)
|
||||
openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port))
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the repository and update the local model before establishing any
|
||||
// connections to other nodes.
|
||||
|
||||
if verbose {
|
||||
infoln("Populating repository index")
|
||||
}
|
||||
loadIndex(m)
|
||||
updateLocalModel(m)
|
||||
|
||||
connOpts := map[string]string{
|
||||
"clientId": "syncthing",
|
||||
"clientVersion": Version,
|
||||
"clusterHash": clusterHash(cfg.Repositories[0].Nodes),
|
||||
}
|
||||
|
||||
// Routine to listen for incoming connections
|
||||
if verbose {
|
||||
infoln("Listening for incoming connections")
|
||||
}
|
||||
for _, addr := range cfg.Options.ListenAddress {
|
||||
go listen(myID, addr, m, tlsCfg, connOpts)
|
||||
}
|
||||
|
||||
// Routine to connect out to configured nodes
|
||||
if verbose {
|
||||
infoln("Attempting to connect to other nodes")
|
||||
}
|
||||
disc := discovery(cfg.Options.ListenAddress[0])
|
||||
go connect(myID, disc, m, tlsCfg, connOpts)
|
||||
|
||||
// Routine to pull blocks from other nodes to synchronize the local
|
||||
// repository. Does not run when we are in read only (publish only) mode.
|
||||
if !cfg.Options.ReadOnly {
|
||||
if verbose {
|
||||
if cfg.Options.AllowDelete {
|
||||
infoln("Deletes from peer nodes are allowed")
|
||||
} else {
|
||||
infoln("Deletes from peer nodes will be ignored")
|
||||
}
|
||||
okln("Ready to synchronize (read-write)")
|
||||
}
|
||||
m.StartRW(cfg.Options.AllowDelete, cfg.Options.ParallelRequests)
|
||||
} else if verbose {
|
||||
okln("Ready to synchronize (read only; no external updates accepted)")
|
||||
}
|
||||
|
||||
// Periodically scan the repository and update the local
|
||||
// XXX: Should use some fsnotify mechanism.
|
||||
go func() {
|
||||
td := time.Duration(cfg.Options.RescanIntervalS) * time.Second
|
||||
for {
|
||||
time.Sleep(td)
|
||||
if m.LocalAge() > (td / 2).Seconds() {
|
||||
updateLocalModel(m)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if verbose {
|
||||
// Periodically print statistics
|
||||
go printStatsLoop(m)
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func restart() {
|
||||
infoln("Restarting")
|
||||
args := os.Args
|
||||
doAppend := true
|
||||
for _, arg := range args {
|
||||
if arg == "-delay" {
|
||||
doAppend = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if doAppend {
|
||||
args = append(args, "-delay", "2")
|
||||
}
|
||||
pgm, err := exec.LookPath(os.Args[0])
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
return
|
||||
}
|
||||
proc, err := os.StartProcess(pgm, args, &os.ProcAttr{
|
||||
Env: os.Environ(),
|
||||
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
|
||||
})
|
||||
if err != nil {
|
||||
fatalln(err)
|
||||
}
|
||||
proc.Release()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var saveConfigCh = make(chan struct{})
|
||||
|
||||
func saveConfigLoop(cfgFile string) {
|
||||
for _ = range saveConfigCh {
|
||||
fd, err := os.Create(cfgFile + ".tmp")
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = writeConfigXML(fd, cfg)
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
fd.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
err := os.Remove(cfgFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
warnln(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = os.Rename(cfgFile+".tmp", cfgFile)
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func saveConfig() {
|
||||
saveConfigCh <- struct{}{}
|
||||
}
|
||||
|
||||
func printStatsLoop(m *Model) {
|
||||
var lastUpdated int64
|
||||
var lastStats = make(map[string]ConnectionInfo)
|
||||
|
||||
for {
|
||||
time.Sleep(60 * time.Second)
|
||||
|
||||
for node, stats := range m.ConnectionStats() {
|
||||
secs := time.Since(lastStats[node].At).Seconds()
|
||||
inbps := 8 * int(float64(stats.InBytesTotal-lastStats[node].InBytesTotal)/secs)
|
||||
outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs)
|
||||
|
||||
if inbps+outbps > 0 {
|
||||
infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(int64(inbps)), MetricPrefix(int64(outbps)))
|
||||
}
|
||||
|
||||
lastStats[node] = stats
|
||||
}
|
||||
|
||||
if lu := m.Generation(); lu > lastUpdated {
|
||||
lastUpdated = lu
|
||||
files, _, bytes := m.GlobalSize()
|
||||
infof("%6d files, %9sB in cluster", files, BinaryPrefix(bytes))
|
||||
files, _, bytes = m.LocalSize()
|
||||
infof("%6d files, %9sB in local repo", files, BinaryPrefix(bytes))
|
||||
needFiles, bytes := m.NeedFiles()
|
||||
infof("%6d files, %9sB to synchronize", len(needFiles), BinaryPrefix(bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func listen(myID string, addr string, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
|
||||
if strings.Contains(trace, "connect") {
|
||||
debugln("NET: Listening on", addr)
|
||||
}
|
||||
l, err := tls.Listen("tcp", addr, tlsCfg)
|
||||
fatalErr(err)
|
||||
|
||||
listen:
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.Contains(trace, "connect") {
|
||||
debugln("NET: Connect from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
tc := conn.(*tls.Conn)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
remoteID := certID(tc.ConnectionState().PeerCertificates[0].Raw)
|
||||
|
||||
if remoteID == myID {
|
||||
warnf("Connect from myself (%s) - should not happen", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(remoteID) {
|
||||
warnf("Connect from connected node (%s)", remoteID)
|
||||
}
|
||||
|
||||
for _, nodeCfg := range cfg.Repositories[0].Nodes {
|
||||
if nodeCfg.NodeID == remoteID {
|
||||
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue listen
|
||||
}
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func discovery(addr string) *discover.Discoverer {
|
||||
_, portstr, err := net.SplitHostPort(addr)
|
||||
fatalErr(err)
|
||||
port, _ := strconv.Atoi(portstr)
|
||||
|
||||
if !cfg.Options.LocalAnnEnabled {
|
||||
port = -1
|
||||
} else if verbose {
|
||||
infoln("Sending local discovery announcements")
|
||||
}
|
||||
|
||||
if !cfg.Options.GlobalAnnEnabled {
|
||||
cfg.Options.GlobalAnnServer = ""
|
||||
} else if verbose {
|
||||
infoln("Sending external discovery announcements")
|
||||
}
|
||||
|
||||
disc, err := discover.NewDiscoverer(myID, port, cfg.Options.GlobalAnnServer)
|
||||
|
||||
if err != nil {
|
||||
warnf("No discovery possible (%v)", err)
|
||||
}
|
||||
|
||||
return disc
|
||||
}
|
||||
|
||||
func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
|
||||
for {
|
||||
nextNode:
|
||||
for _, nodeCfg := range cfg.Repositories[0].Nodes {
|
||||
if nodeCfg.NodeID == myID {
|
||||
continue
|
||||
}
|
||||
if m.ConnectedTo(nodeCfg.NodeID) {
|
||||
continue
|
||||
}
|
||||
for _, addr := range nodeCfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
if disc != nil {
|
||||
t := disc.Lookup(nodeCfg.NodeID)
|
||||
if len(t) == 0 {
|
||||
continue
|
||||
}
|
||||
addr = t[0] //XXX: Handle all of them
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(trace, "connect") {
|
||||
debugln("NET: Dial", nodeCfg.NodeID, addr)
|
||||
}
|
||||
conn, err := tls.Dial("tcp", addr, tlsCfg)
|
||||
if err != nil {
|
||||
if strings.Contains(trace, "connect") {
|
||||
debugln("NET:", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
remoteID := certID(conn.ConnectionState().PeerCertificates[0].Raw)
|
||||
if remoteID != nodeCfg.NodeID {
|
||||
warnln("Unexpected nodeID", remoteID, "!=", nodeCfg.NodeID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
protoConn := protocol.NewConnection(remoteID, conn, conn, m, connOpts)
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue nextNode
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(cfg.Options.ReconnectIntervalS) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func updateLocalModel(m *Model) {
|
||||
files, _ := m.Walk(cfg.Options.FollowSymlinks)
|
||||
m.ReplaceLocal(files)
|
||||
saveIndex(m)
|
||||
}
|
||||
|
||||
func saveIndex(m *Model) {
|
||||
name := m.RepoID() + ".idx.gz"
|
||||
fullName := path.Join(confDir, name)
|
||||
idxf, err := os.Create(fullName + ".tmp")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
gzw := gzip.NewWriter(idxf)
|
||||
|
||||
protocol.IndexMessage{
|
||||
Repository: "local",
|
||||
Files: m.ProtocolIndex(),
|
||||
}.EncodeXDR(gzw)
|
||||
gzw.Close()
|
||||
idxf.Close()
|
||||
os.Rename(fullName+".tmp", fullName)
|
||||
}
|
||||
|
||||
func loadIndex(m *Model) {
|
||||
name := m.RepoID() + ".idx.gz"
|
||||
idxf, err := os.Open(path.Join(confDir, name))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer idxf.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(idxf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
var im protocol.IndexMessage
|
||||
err = im.DecodeXDR(gzr)
|
||||
if err != nil || im.Repository != "local" {
|
||||
return
|
||||
}
|
||||
m.SeedLocal(im.Files)
|
||||
}
|
||||
|
||||
func ensureDir(dir string, mode int) {
|
||||
fi, err := os.Stat(dir)
|
||||
if os.IsNotExist(err) {
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
fatalErr(err)
|
||||
} else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode {
|
||||
err := os.Chmod(dir, os.FileMode(mode))
|
||||
fatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
func expandTilde(p string) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return p
|
||||
}
|
||||
|
||||
if strings.HasPrefix(p, "~/") {
|
||||
return strings.Replace(p, "~", getUnixHomeDir(), 1)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func getUnixHomeDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
fatalln("No home directory?")
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
func getHomeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
||||
if home == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
return home
|
||||
}
|
||||
return getUnixHomeDir()
|
||||
}
|
||||
|
||||
func getDefaultConfDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return path.Join(os.Getenv("AppData"), "syncthing")
|
||||
}
|
||||
return expandTilde("~/.syncthing")
|
||||
}
|
||||
917
cmd/syncthing/model.go
Normal file
917
cmd/syncthing/model.go
Normal file
@@ -0,0 +1,917 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
dir string
|
||||
|
||||
global map[string]File // the latest version of each file as it exists in the cluster
|
||||
gmut sync.RWMutex // protects global
|
||||
local map[string]File // the files we currently have locally on disk
|
||||
lmut sync.RWMutex // protects local
|
||||
remote map[string]map[string]File
|
||||
rmut sync.RWMutex // protects remote
|
||||
protoConn map[string]Connection
|
||||
rawConn map[string]io.Closer
|
||||
pmut sync.RWMutex // protects protoConn and rawConn
|
||||
|
||||
// Queue for files to fetch. fq can call back into the model, so we must ensure
|
||||
// to hold no locks when calling methods on fq.
|
||||
fq *FileQueue
|
||||
dq chan File // queue for files to delete
|
||||
|
||||
updatedLocal int64 // timestamp of last update to local
|
||||
updateGlobal int64 // timestamp of last update to remote
|
||||
lastIdxBcast time.Time
|
||||
lastIdxBcastRequest time.Time
|
||||
umut sync.RWMutex // provides updated* and lastIdx*
|
||||
|
||||
rwRunning bool
|
||||
delete bool
|
||||
initmut sync.Mutex // protects rwRunning and delete
|
||||
|
||||
trace map[string]bool
|
||||
|
||||
sup suppressor
|
||||
|
||||
parallelRequests int
|
||||
limitRequestRate chan struct{}
|
||||
|
||||
imut sync.Mutex // protects Index
|
||||
}
|
||||
|
||||
type Connection interface {
|
||||
ID() string
|
||||
Index(string, []protocol.FileInfo)
|
||||
Request(repo, name string, offset int64, size int) ([]byte, error)
|
||||
Statistics() protocol.Statistics
|
||||
Option(key string) string
|
||||
}
|
||||
|
||||
const (
|
||||
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
|
||||
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoSuchFile = errors.New("no such file")
|
||||
ErrInvalid = errors.New("file is invalid")
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
// where it sends index information to connected peers and responds to requests
|
||||
// for file data without altering the local repository in any way.
|
||||
func NewModel(dir string, maxChangeBw int) *Model {
|
||||
m := &Model{
|
||||
dir: dir,
|
||||
global: make(map[string]File),
|
||||
local: make(map[string]File),
|
||||
remote: make(map[string]map[string]File),
|
||||
protoConn: make(map[string]Connection),
|
||||
rawConn: make(map[string]io.Closer),
|
||||
lastIdxBcast: time.Now(),
|
||||
trace: make(map[string]bool),
|
||||
sup: suppressor{threshold: int64(maxChangeBw)},
|
||||
fq: NewFileQueue(),
|
||||
dq: make(chan File),
|
||||
}
|
||||
|
||||
go m.broadcastIndexLoop()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Model) LimitRate(kbps int) {
|
||||
m.limitRequestRate = make(chan struct{}, kbps)
|
||||
n := kbps/10 + 1
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for i := 0; i < n; i++ {
|
||||
select {
|
||||
case m.limitRequestRate <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
|
||||
func (m *Model) Trace(t string) {
|
||||
m.trace[t] = true
|
||||
}
|
||||
|
||||
// StartRW starts read/write processing on the current model. When in
|
||||
// read/write mode the model will attempt to keep in sync with the cluster by
|
||||
// pulling needed files from peer nodes.
|
||||
func (m *Model) StartRW(del bool, threads int) {
|
||||
m.initmut.Lock()
|
||||
defer m.initmut.Unlock()
|
||||
|
||||
if m.rwRunning {
|
||||
panic("starting started model")
|
||||
}
|
||||
|
||||
m.rwRunning = true
|
||||
m.delete = del
|
||||
m.parallelRequests = threads
|
||||
|
||||
go m.cleanTempFiles()
|
||||
if del {
|
||||
go m.deleteLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// Generation returns an opaque integer that is guaranteed to increment on
|
||||
// every change to the local repository or global model.
|
||||
func (m *Model) Generation() int64 {
|
||||
m.umut.RLock()
|
||||
defer m.umut.RUnlock()
|
||||
|
||||
return m.updatedLocal + m.updateGlobal
|
||||
}
|
||||
|
||||
func (m *Model) LocalAge() float64 {
|
||||
m.umut.RLock()
|
||||
defer m.umut.RUnlock()
|
||||
|
||||
return time.Since(time.Unix(m.updatedLocal, 0)).Seconds()
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
protocol.Statistics
|
||||
Address string
|
||||
ClientID string
|
||||
ClientVersion string
|
||||
Completion int
|
||||
}
|
||||
|
||||
// ConnectionStats returns a map with connection statistics for each connected node.
|
||||
func (m *Model) ConnectionStats() map[string]ConnectionInfo {
|
||||
type remoteAddrer interface {
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
||||
m.gmut.RLock()
|
||||
m.pmut.RLock()
|
||||
m.rmut.RLock()
|
||||
|
||||
var tot int64
|
||||
for _, f := range m.global {
|
||||
tot += f.Size
|
||||
}
|
||||
|
||||
var res = make(map[string]ConnectionInfo)
|
||||
for node, conn := range m.protoConn {
|
||||
ci := ConnectionInfo{
|
||||
Statistics: conn.Statistics(),
|
||||
ClientID: conn.Option("clientId"),
|
||||
ClientVersion: conn.Option("clientVersion"),
|
||||
}
|
||||
if nc, ok := m.rawConn[node].(remoteAddrer); ok {
|
||||
ci.Address = nc.RemoteAddr().String()
|
||||
}
|
||||
|
||||
var have int64
|
||||
for _, f := range m.remote[node] {
|
||||
if f.Equals(m.global[f.Name]) {
|
||||
have += f.Size
|
||||
}
|
||||
}
|
||||
|
||||
ci.Completion = 100
|
||||
if tot != 0 {
|
||||
ci.Completion = int(100 * have / tot)
|
||||
}
|
||||
|
||||
res[node] = ci
|
||||
}
|
||||
|
||||
m.rmut.RUnlock()
|
||||
m.pmut.RUnlock()
|
||||
m.gmut.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
// GlobalSize returns the number of files, deleted files and total bytes for all
|
||||
// files in the global model.
|
||||
func (m *Model) GlobalSize() (files, deleted int, bytes int64) {
|
||||
m.gmut.RLock()
|
||||
|
||||
for _, f := range m.global {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
files++
|
||||
bytes += f.Size
|
||||
} else {
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
|
||||
m.gmut.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// LocalSize returns the number of files, deleted files and total bytes for all
|
||||
// files in the local repository.
|
||||
func (m *Model) LocalSize() (files, deleted int, bytes int64) {
|
||||
m.lmut.RLock()
|
||||
|
||||
for _, f := range m.local {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
files++
|
||||
bytes += f.Size
|
||||
} else {
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
|
||||
m.lmut.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// InSyncSize returns the number and total byte size of the local files that
|
||||
// are in sync with the global model.
|
||||
func (m *Model) InSyncSize() (files, bytes int64) {
|
||||
m.gmut.RLock()
|
||||
m.lmut.RLock()
|
||||
|
||||
for n, f := range m.local {
|
||||
if gf, ok := m.global[n]; ok && f.Equals(gf) {
|
||||
files++
|
||||
bytes += f.Size
|
||||
}
|
||||
}
|
||||
|
||||
m.lmut.RUnlock()
|
||||
m.gmut.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// NeedFiles returns the list of currently needed files and the total size.
|
||||
func (m *Model) NeedFiles() (files []File, bytes int64) {
|
||||
qf := m.fq.QueuedFiles()
|
||||
|
||||
m.gmut.RLock()
|
||||
|
||||
for _, n := range qf {
|
||||
f := m.global[n]
|
||||
files = append(files, f)
|
||||
bytes += f.Size
|
||||
}
|
||||
|
||||
m.gmut.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Index is called when a new node is connected and we receive their full index.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
|
||||
var files = make([]File, len(fs))
|
||||
for i := range fs {
|
||||
files[i] = fileFromFileInfo(fs[i])
|
||||
}
|
||||
|
||||
m.imut.Lock()
|
||||
defer m.imut.Unlock()
|
||||
|
||||
if m.trace["net"] {
|
||||
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
|
||||
}
|
||||
|
||||
repo := make(map[string]File)
|
||||
for _, f := range files {
|
||||
m.indexUpdate(repo, f)
|
||||
}
|
||||
|
||||
m.rmut.Lock()
|
||||
m.remote[nodeID] = repo
|
||||
m.rmut.Unlock()
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeedForFiles(files)
|
||||
}
|
||||
|
||||
// IndexUpdate is called for incremental updates to connected nodes' indexes.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
|
||||
var files = make([]File, len(fs))
|
||||
for i := range fs {
|
||||
files[i] = fileFromFileInfo(fs[i])
|
||||
}
|
||||
|
||||
m.imut.Lock()
|
||||
defer m.imut.Unlock()
|
||||
|
||||
if m.trace["net"] {
|
||||
debugf("NET IDXUP(in): %s: %d files", nodeID, len(files))
|
||||
}
|
||||
|
||||
m.rmut.Lock()
|
||||
repo, ok := m.remote[nodeID]
|
||||
if !ok {
|
||||
warnf("Index update from node %s that does not have an index", nodeID)
|
||||
m.rmut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
m.indexUpdate(repo, f)
|
||||
}
|
||||
m.rmut.Unlock()
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeedForFiles(files)
|
||||
}
|
||||
|
||||
func (m *Model) indexUpdate(repo map[string]File, f File) {
|
||||
if m.trace["idx"] {
|
||||
var flagComment string
|
||||
if f.Flags&protocol.FlagDeleted != 0 {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
debugf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
|
||||
}
|
||||
|
||||
if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
|
||||
warnf("IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f)
|
||||
return
|
||||
}
|
||||
|
||||
repo[f.Name] = f
|
||||
}
|
||||
|
||||
// Close removes the peer from the model and closes the underlying connection if possible.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) Close(node string, err error) {
|
||||
if m.trace["net"] {
|
||||
debugf("NET: %s: %v", node, err)
|
||||
}
|
||||
if err == protocol.ErrClusterHash {
|
||||
warnf("Connection to %s closed due to mismatched cluster hash. Ensure that the configured cluster members are identical on both nodes.", node)
|
||||
} else if err != io.EOF {
|
||||
warnf("Connection to %s closed: %v", node, err)
|
||||
}
|
||||
|
||||
m.fq.RemoveAvailable(node)
|
||||
|
||||
m.pmut.Lock()
|
||||
m.rmut.Lock()
|
||||
|
||||
conn, ok := m.rawConn[node]
|
||||
if ok {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
delete(m.remote, node)
|
||||
delete(m.protoConn, node)
|
||||
delete(m.rawConn, node)
|
||||
|
||||
m.rmut.Unlock()
|
||||
m.pmut.Unlock()
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeedForGlobal()
|
||||
}
|
||||
|
||||
// Request returns the specified data segment by reading it from local disk.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]byte, error) {
|
||||
// Verify that the requested file exists in the local and global model.
|
||||
m.lmut.RLock()
|
||||
lf, localOk := m.local[name]
|
||||
m.lmut.RUnlock()
|
||||
|
||||
m.gmut.RLock()
|
||||
_, globalOk := m.global[name]
|
||||
m.gmut.RUnlock()
|
||||
|
||||
if !localOk || !globalOk {
|
||||
warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
|
||||
return nil, ErrNoSuchFile
|
||||
}
|
||||
if lf.Flags&protocol.FlagInvalid != 0 {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
if m.trace["net"] && nodeID != "<local>" {
|
||||
debugf("NET REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
|
||||
}
|
||||
fn := path.Join(m.dir, name)
|
||||
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
buf := buffers.Get(int(size))
|
||||
_, err = fd.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.limitRequestRate != nil {
|
||||
for s := 0; s < len(buf); s += 1024 {
|
||||
<-m.limitRequestRate
|
||||
}
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// ReplaceLocal replaces the local repository index with the given list of files.
|
||||
func (m *Model) ReplaceLocal(fs []File) {
|
||||
var updated bool
|
||||
var newLocal = make(map[string]File)
|
||||
|
||||
m.lmut.RLock()
|
||||
for _, f := range fs {
|
||||
newLocal[f.Name] = f
|
||||
if ef := m.local[f.Name]; !ef.Equals(f) {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
m.lmut.RUnlock()
|
||||
|
||||
if m.markDeletedLocals(newLocal) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
m.lmut.RLock()
|
||||
if len(newLocal) != len(m.local) {
|
||||
updated = true
|
||||
}
|
||||
m.lmut.RUnlock()
|
||||
|
||||
if updated {
|
||||
m.lmut.Lock()
|
||||
m.local = newLocal
|
||||
m.lmut.Unlock()
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeedForGlobal()
|
||||
|
||||
m.umut.Lock()
|
||||
m.updatedLocal = time.Now().Unix()
|
||||
m.lastIdxBcastRequest = time.Now()
|
||||
m.umut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// SeedLocal replaces the local repository index with the given list of files,
|
||||
// in protocol data types. Does not track deletes, should only be used to seed
|
||||
// the local index from a cache file at startup.
|
||||
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
|
||||
m.lmut.Lock()
|
||||
m.local = make(map[string]File)
|
||||
for _, f := range fs {
|
||||
m.local[f.Name] = fileFromFileInfo(f)
|
||||
}
|
||||
m.lmut.Unlock()
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeedForGlobal()
|
||||
}
|
||||
|
||||
// ConnectedTo returns true if we are connected to the named node.
|
||||
func (m *Model) ConnectedTo(nodeID string) bool {
|
||||
m.pmut.RLock()
|
||||
_, ok := m.protoConn[nodeID]
|
||||
m.pmut.RUnlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// RepoID returns a unique ID representing the current repository location.
|
||||
func (m *Model) RepoID() string {
|
||||
return fmt.Sprintf("%x", sha1.Sum([]byte(m.dir)))
|
||||
}
|
||||
|
||||
// AddConnection adds a new peer connection to the model. An initial index will
|
||||
// be sent to the connected peer, thereafter index updates whenever the local
|
||||
// repository changes.
|
||||
func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
|
||||
nodeID := protoConn.ID()
|
||||
m.pmut.Lock()
|
||||
m.protoConn[nodeID] = protoConn
|
||||
m.rawConn[nodeID] = rawConn
|
||||
m.pmut.Unlock()
|
||||
|
||||
go func() {
|
||||
idx := m.ProtocolIndex()
|
||||
protoConn.Index("default", idx)
|
||||
}()
|
||||
|
||||
m.initmut.Lock()
|
||||
rw := m.rwRunning
|
||||
m.initmut.Unlock()
|
||||
if !rw {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < m.parallelRequests; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
if m.trace["pull"] {
|
||||
debugln("PULL: Starting", nodeID, i)
|
||||
}
|
||||
for {
|
||||
m.pmut.RLock()
|
||||
if _, ok := m.protoConn[nodeID]; !ok {
|
||||
if m.trace["pull"] {
|
||||
debugln("PULL: Exiting", nodeID, i)
|
||||
}
|
||||
m.pmut.RUnlock()
|
||||
return
|
||||
}
|
||||
m.pmut.RUnlock()
|
||||
|
||||
qb, ok := m.fq.Get(nodeID)
|
||||
if ok {
|
||||
if m.trace["pull"] {
|
||||
debugln("PULL: Request", nodeID, i, qb.name, qb.block.Offset)
|
||||
}
|
||||
data, _ := protoConn.Request("default", qb.name, qb.block.Offset, int(qb.block.Size))
|
||||
m.fq.Done(qb.name, qb.block.Offset, data)
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ProtocolIndex returns the current local index in protocol data types.
|
||||
// Must be called with the read lock held.
|
||||
func (m *Model) ProtocolIndex() []protocol.FileInfo {
|
||||
var index []protocol.FileInfo
|
||||
|
||||
m.lmut.RLock()
|
||||
|
||||
for _, f := range m.local {
|
||||
mf := fileInfoFromFile(f)
|
||||
if m.trace["idx"] {
|
||||
var flagComment string
|
||||
if mf.Flags&protocol.FlagDeleted != 0 {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
debugf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
|
||||
}
|
||||
index = append(index, mf)
|
||||
}
|
||||
|
||||
m.lmut.RUnlock()
|
||||
return index
|
||||
}
|
||||
|
||||
func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash []byte) ([]byte, error) {
|
||||
m.pmut.RLock()
|
||||
nc, ok := m.protoConn[nodeID]
|
||||
m.pmut.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
|
||||
}
|
||||
|
||||
if m.trace["net"] {
|
||||
debugf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
|
||||
}
|
||||
|
||||
return nc.Request("default", name, offset, size)
|
||||
}
|
||||
|
||||
func (m *Model) broadcastIndexLoop() {
|
||||
for {
|
||||
m.umut.RLock()
|
||||
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
|
||||
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
|
||||
m.umut.RUnlock()
|
||||
|
||||
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
|
||||
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
|
||||
idx := m.ProtocolIndex()
|
||||
|
||||
var indexWg sync.WaitGroup
|
||||
indexWg.Add(len(m.protoConn))
|
||||
|
||||
m.umut.Lock()
|
||||
m.lastIdxBcast = time.Now()
|
||||
m.umut.Unlock()
|
||||
|
||||
m.pmut.RLock()
|
||||
for _, node := range m.protoConn {
|
||||
node := node
|
||||
if m.trace["net"] {
|
||||
debugf("NET IDX(out/loop): %s: %d files", node.ID(), len(idx))
|
||||
}
|
||||
go func() {
|
||||
node.Index("default", idx)
|
||||
indexWg.Done()
|
||||
}()
|
||||
}
|
||||
m.pmut.RUnlock()
|
||||
|
||||
indexWg.Wait()
|
||||
}
|
||||
time.Sleep(idxBcastHoldtime)
|
||||
}
|
||||
}
|
||||
|
||||
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
|
||||
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
|
||||
// For every file in the existing local table, check if they are also
|
||||
// present in the new local table. If they are not, check that we already
|
||||
// had the newest version available according to the global table and if so
|
||||
// note the file as having been deleted.
|
||||
var updated bool
|
||||
|
||||
m.gmut.RLock()
|
||||
m.lmut.RLock()
|
||||
|
||||
for n, f := range m.local {
|
||||
if _, ok := newLocal[n]; !ok {
|
||||
if gf := m.global[n]; !gf.NewerThan(f) {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
f.Flags = protocol.FlagDeleted
|
||||
f.Version++
|
||||
f.Blocks = nil
|
||||
updated = true
|
||||
}
|
||||
newLocal[n] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.lmut.RUnlock()
|
||||
m.gmut.RUnlock()
|
||||
|
||||
return updated
|
||||
}
|
||||
|
||||
func (m *Model) updateLocal(f File) {
|
||||
var updated bool
|
||||
|
||||
m.lmut.Lock()
|
||||
if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
|
||||
m.local[f.Name] = f
|
||||
updated = true
|
||||
}
|
||||
m.lmut.Unlock()
|
||||
|
||||
if updated {
|
||||
m.recomputeGlobal()
|
||||
// We don't recomputeNeed here for two reasons:
|
||||
// - a need shouldn't have arisen due to having a newer local file
|
||||
// - recomputeNeed might call into fq.Add but we might have been called by
|
||||
// fq which would be a deadlock on fq
|
||||
|
||||
m.umut.Lock()
|
||||
m.updatedLocal = time.Now().Unix()
|
||||
m.lastIdxBcastRequest = time.Now()
|
||||
m.umut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
XXX: Not done, needs elegant handling of availability
|
||||
|
||||
func (m *Model) recomputeGlobalFor(files []File) bool {
|
||||
m.gmut.Lock()
|
||||
defer m.gmut.Unlock()
|
||||
|
||||
var updated bool
|
||||
for _, f := range files {
|
||||
if gf, ok := m.global[f.Name]; !ok || f.NewerThan(gf) {
|
||||
m.global[f.Name] = f
|
||||
updated = true
|
||||
// Fix availability
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
*/
|
||||
|
||||
func (m *Model) recomputeGlobal() {
|
||||
var newGlobal = make(map[string]File)
|
||||
|
||||
m.lmut.RLock()
|
||||
for n, f := range m.local {
|
||||
newGlobal[n] = f
|
||||
}
|
||||
m.lmut.RUnlock()
|
||||
|
||||
var available = make(map[string][]string)
|
||||
|
||||
m.rmut.RLock()
|
||||
var highestMod int64
|
||||
for nodeID, fs := range m.remote {
|
||||
for n, nf := range fs {
|
||||
if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
|
||||
newGlobal[n] = nf
|
||||
available[n] = []string{nodeID}
|
||||
if nf.Modified > highestMod {
|
||||
highestMod = nf.Modified
|
||||
}
|
||||
} else if lf.Equals(nf) {
|
||||
available[n] = append(available[n], nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
|
||||
for f, ns := range available {
|
||||
m.fq.SetAvailable(f, ns)
|
||||
}
|
||||
|
||||
// Figure out if anything actually changed
|
||||
|
||||
m.gmut.RLock()
|
||||
var updated bool
|
||||
if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
|
||||
updated = true
|
||||
} else {
|
||||
for n, f0 := range newGlobal {
|
||||
if f1, ok := m.global[n]; !ok || !f0.Equals(f1) {
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
m.gmut.RUnlock()
|
||||
|
||||
if updated {
|
||||
m.gmut.Lock()
|
||||
m.umut.Lock()
|
||||
m.global = newGlobal
|
||||
m.updateGlobal = time.Now().Unix()
|
||||
m.umut.Unlock()
|
||||
m.gmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type addOrder struct {
|
||||
n string
|
||||
remote []Block
|
||||
fm *fileMonitor
|
||||
}
|
||||
|
||||
func (m *Model) recomputeNeedForGlobal() {
|
||||
var toDelete []File
|
||||
var toAdd []addOrder
|
||||
|
||||
m.gmut.RLock()
|
||||
|
||||
for _, gf := range m.global {
|
||||
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
|
||||
}
|
||||
|
||||
m.gmut.RUnlock()
|
||||
|
||||
for _, ao := range toAdd {
|
||||
m.fq.Add(ao.n, ao.remote, ao.fm)
|
||||
}
|
||||
for _, gf := range toDelete {
|
||||
m.dq <- gf
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) recomputeNeedForFiles(files []File) {
|
||||
var toDelete []File
|
||||
var toAdd []addOrder
|
||||
|
||||
m.gmut.RLock()
|
||||
|
||||
for _, gf := range files {
|
||||
toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
|
||||
}
|
||||
|
||||
m.gmut.RUnlock()
|
||||
|
||||
for _, ao := range toAdd {
|
||||
m.fq.Add(ao.n, ao.remote, ao.fm)
|
||||
}
|
||||
for _, gf := range toDelete {
|
||||
m.dq <- gf
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
|
||||
m.lmut.RLock()
|
||||
lf, ok := m.local[gf.Name]
|
||||
m.lmut.RUnlock()
|
||||
|
||||
if !ok || gf.NewerThan(lf) {
|
||||
if gf.Flags&protocol.FlagInvalid != 0 {
|
||||
// Never attempt to sync invalid files
|
||||
return toAdd, toDelete
|
||||
}
|
||||
if gf.Flags&protocol.FlagDeleted != 0 && !m.delete {
|
||||
// Don't want to delete files, so forget this need
|
||||
return toAdd, toDelete
|
||||
}
|
||||
if gf.Flags&protocol.FlagDeleted != 0 && !ok {
|
||||
// Don't have the file, so don't need to delete it
|
||||
return toAdd, toDelete
|
||||
}
|
||||
if m.trace["need"] {
|
||||
debugf("NEED: lf:%v gf:%v", lf, gf)
|
||||
}
|
||||
|
||||
if gf.Flags&protocol.FlagDeleted != 0 {
|
||||
toDelete = append(toDelete, gf)
|
||||
} else {
|
||||
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
|
||||
fm := fileMonitor{
|
||||
name: gf.Name,
|
||||
path: path.Clean(path.Join(m.dir, gf.Name)),
|
||||
global: gf,
|
||||
model: m,
|
||||
localBlocks: local,
|
||||
}
|
||||
toAdd = append(toAdd, addOrder{gf.Name, remote, &fm})
|
||||
}
|
||||
}
|
||||
|
||||
return toAdd, toDelete
|
||||
}
|
||||
|
||||
func (m *Model) WhoHas(name string) []string {
|
||||
var remote []string
|
||||
|
||||
m.gmut.RLock()
|
||||
m.rmut.RLock()
|
||||
|
||||
gf := m.global[name]
|
||||
for node, files := range m.remote {
|
||||
if file, ok := files[name]; ok && file.Equals(gf) {
|
||||
remote = append(remote, node)
|
||||
}
|
||||
}
|
||||
|
||||
m.rmut.RUnlock()
|
||||
m.gmut.RUnlock()
|
||||
return remote
|
||||
}
|
||||
|
||||
func (m *Model) deleteLoop() {
|
||||
for file := range m.dq {
|
||||
if m.trace["file"] {
|
||||
debugln("FILE: Delete", file.Name)
|
||||
}
|
||||
path := path.Clean(path.Join(m.dir, file.Name))
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
warnf("%s: %v", file.Name, err)
|
||||
}
|
||||
|
||||
m.updateLocal(file)
|
||||
}
|
||||
}
|
||||
|
||||
func fileFromFileInfo(f protocol.FileInfo) File {
|
||||
var blocks = make([]Block, len(f.Blocks))
|
||||
var offset int64
|
||||
for i, b := range f.Blocks {
|
||||
blocks[i] = Block{
|
||||
Offset: offset,
|
||||
Size: b.Size,
|
||||
Hash: b.Hash,
|
||||
}
|
||||
offset += int64(b.Size)
|
||||
}
|
||||
return File{
|
||||
Name: f.Name,
|
||||
Size: offset,
|
||||
Flags: f.Flags,
|
||||
Modified: f.Modified,
|
||||
Version: f.Version,
|
||||
Blocks: blocks,
|
||||
}
|
||||
}
|
||||
|
||||
func fileInfoFromFile(f File) protocol.FileInfo {
|
||||
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
|
||||
for i, b := range f.Blocks {
|
||||
blocks[i] = protocol.BlockInfo{
|
||||
Size: b.Size,
|
||||
Hash: b.Hash,
|
||||
}
|
||||
}
|
||||
return protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: f.Modified,
|
||||
Version: f.Version,
|
||||
Blocks: blocks,
|
||||
}
|
||||
}
|
||||
540
cmd/syncthing/model_test.go
Normal file
540
cmd/syncthing/model_test.go
Normal file
@@ -0,0 +1,540 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
func TestNewModel(t *testing.T) {
|
||||
m := NewModel("foo", 1e6)
|
||||
|
||||
if m == nil {
|
||||
t.Fatalf("NewModel returned nil")
|
||||
}
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) > 0 {
|
||||
t.Errorf("New model should have no Need")
|
||||
}
|
||||
|
||||
if len(m.local) > 0 {
|
||||
t.Errorf("New model should have no Have")
|
||||
}
|
||||
}
|
||||
|
||||
var testDataExpected = map[string]File{
|
||||
"foo": File{
|
||||
Name: "foo",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Size: 7,
|
||||
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
|
||||
},
|
||||
"empty": File{
|
||||
Name: "empty",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Size: 0,
|
||||
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
|
||||
},
|
||||
"bar": File{
|
||||
Name: "bar",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Size: 10,
|
||||
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Fix expected test data to match reality
|
||||
for n, f := range testDataExpected {
|
||||
fi, _ := os.Stat("testdata/" + n)
|
||||
f.Flags = uint32(fi.Mode())
|
||||
f.Modified = fi.ModTime().Unix()
|
||||
testDataExpected[n] = f
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLocal(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) > 0 {
|
||||
t.Fatalf("Model with only local data should have no need")
|
||||
}
|
||||
|
||||
if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 {
|
||||
t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 {
|
||||
t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2)
|
||||
}
|
||||
for name, file := range testDataExpected {
|
||||
if f, ok := m.local[name]; ok {
|
||||
if !reflect.DeepEqual(f, file) {
|
||||
t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Missing file %q in local table", name)
|
||||
}
|
||||
if f, ok := m.global[name]; ok {
|
||||
if !reflect.DeepEqual(f, file) {
|
||||
t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Missing file %q in global table", name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified {
|
||||
t.Fatalf("Incorrect local for %q", f.Name)
|
||||
}
|
||||
if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified {
|
||||
t.Fatalf("Incorrect global for %q", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteUpdateExisting(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "foo",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index("42", []protocol.FileInfo{newFile})
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 1 {
|
||||
t.Errorf("Model missing Need for one file (%d != 1)", len(fs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteAddNew(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "a new file",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index("42", []protocol.FileInfo{newFile})
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 1 {
|
||||
t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteUpdateOld(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
oldTimeStamp := int64(1234)
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "foo",
|
||||
Modified: oldTimeStamp,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index("42", []protocol.FileInfo{newFile})
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 0 {
|
||||
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteIndexUpdate(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
foo := protocol.FileInfo{
|
||||
Name: "foo",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
|
||||
bar := protocol.FileInfo{
|
||||
Name: "bar",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
|
||||
m.Index("42", []protocol.FileInfo{foo})
|
||||
|
||||
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
|
||||
t.Error("Model doesn't need 'foo'")
|
||||
}
|
||||
|
||||
m.IndexUpdate("42", []protocol.FileInfo{bar})
|
||||
|
||||
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
|
||||
t.Error("Model doesn't need 'foo'")
|
||||
}
|
||||
if fs, _ := m.NeedFiles(); fs[1].Name != "bar" {
|
||||
t.Error("Model doesn't need 'bar'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
ot := time.Now().Unix()
|
||||
newFile := File{
|
||||
Name: "a new file",
|
||||
Modified: ot,
|
||||
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.updateLocal(newFile)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
// The deleted file is kept in the local and global tables and marked as deleted.
|
||||
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
if m.local["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in local table")
|
||||
}
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in global table")
|
||||
}
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.global["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
|
||||
}
|
||||
|
||||
// Another update should change nothing
|
||||
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
if m.local["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in local table")
|
||||
}
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in global table")
|
||||
}
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.global["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForgetNode(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 0 {
|
||||
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
|
||||
}
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "new file",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index("42", []protocol.FileInfo{newFile})
|
||||
|
||||
newFile = protocol.FileInfo{
|
||||
Name: "new file 2",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index("43", []protocol.FileInfo{newFile})
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+2; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 2 {
|
||||
t.Errorf("Model len(need) incorrect (%d != 2)", len(fs))
|
||||
}
|
||||
|
||||
m.Close("42", nil)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
if fs, _ := m.NeedFiles(); len(fs) != 1 {
|
||||
t.Errorf("Model len(need) incorrect (%d != 1)", len(fs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
bs, err := m.Request("some node", "default", "foo", 0, 6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(bs, []byte("foobar")) != 0 {
|
||||
t.Errorf("Incorrect data from request: %q", string(bs))
|
||||
}
|
||||
|
||||
bs, err = m.Request("some node", "default", "../walk.go", 0, 6)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error on insecure file read")
|
||||
}
|
||||
if bs != nil {
|
||||
t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreWithUnknownFlags(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
valid := protocol.FileInfo{
|
||||
Name: "valid",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
Flags: protocol.FlagDeleted | 0755,
|
||||
}
|
||||
|
||||
invalid := protocol.FileInfo{
|
||||
Name: "invalid",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
Flags: 1<<27 | protocol.FlagDeleted | 0755,
|
||||
}
|
||||
|
||||
m.Index("42", []protocol.FileInfo{valid, invalid})
|
||||
|
||||
if _, ok := m.global[valid.Name]; !ok {
|
||||
t.Error("Model should include", valid)
|
||||
}
|
||||
if _, ok := m.global[invalid.Name]; ok {
|
||||
t.Error("Model not should include", invalid)
|
||||
}
|
||||
}
|
||||
|
||||
func genFiles(n int) []protocol.FileInfo {
|
||||
files := make([]protocol.FileInfo, n)
|
||||
t := time.Now().Unix()
|
||||
for i := 0; i < n; i++ {
|
||||
files[i] = protocol.FileInfo{
|
||||
Name: fmt.Sprintf("file%d", i),
|
||||
Modified: t,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
func BenchmarkIndex10000(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
files := genFiles(10000)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Index("42", files)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndex00100(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
files := genFiles(100)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Index("42", files)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
files := genFiles(10000)
|
||||
m.Index("42", files)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.IndexUpdate("42", files)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
files := genFiles(10000)
|
||||
m.Index("42", files)
|
||||
|
||||
ufiles := genFiles(100)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.IndexUpdate("42", ufiles)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
files := genFiles(10000)
|
||||
m.Index("42", files)
|
||||
|
||||
ufiles := genFiles(1)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.IndexUpdate("42", ufiles)
|
||||
}
|
||||
}
|
||||
|
||||
type FakeConnection struct {
|
||||
id string
|
||||
requestData []byte
|
||||
}
|
||||
|
||||
func (FakeConnection) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f FakeConnection) ID() string {
|
||||
return string(f.id)
|
||||
}
|
||||
|
||||
func (f FakeConnection) Option(string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (FakeConnection) Index(string, []protocol.FileInfo) {}
|
||||
|
||||
func (f FakeConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
|
||||
return f.requestData, nil
|
||||
}
|
||||
|
||||
func (FakeConnection) Ping() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (FakeConnection) Statistics() protocol.Statistics {
|
||||
return protocol.Statistics{}
|
||||
}
|
||||
|
||||
func BenchmarkRequest(b *testing.B) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
const n = 1000
|
||||
files := make([]protocol.FileInfo, n)
|
||||
t := time.Now().Unix()
|
||||
for i := 0; i < n; i++ {
|
||||
files[i] = protocol.FileInfo{
|
||||
Name: fmt.Sprintf("file%d", i),
|
||||
Modified: t,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
}
|
||||
|
||||
fc := FakeConnection{
|
||||
id: "42",
|
||||
requestData: []byte("some data to return"),
|
||||
}
|
||||
m.AddConnection(fc, fc)
|
||||
m.Index("42", files)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := m.requestGlobal("42", files[i%n].Name, 0, 32, nil)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if data == nil {
|
||||
b.Error("nil data")
|
||||
}
|
||||
}
|
||||
}
|
||||
34
cmd/syncthing/openurl.go
Normal file
34
cmd/syncthing/openurl.go
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
Copyright 2011 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func openURL(url string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
return exec.Command("cmd.exe", "/C", "start "+url).Run()
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
return exec.Command("open", url).Run()
|
||||
}
|
||||
|
||||
return exec.Command("xdg-open", url).Run()
|
||||
}
|
||||
72
cmd/syncthing/suppressor.go
Normal file
72
cmd/syncthing/suppressor.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxChangeHistory = 4
|
||||
)
|
||||
|
||||
type change struct {
|
||||
size int64
|
||||
when time.Time
|
||||
}
|
||||
|
||||
type changeHistory struct {
|
||||
changes []change
|
||||
next int64
|
||||
prevSup bool
|
||||
}
|
||||
|
||||
type suppressor struct {
|
||||
sync.Mutex
|
||||
changes map[string]changeHistory
|
||||
threshold int64 // bytes/s
|
||||
}
|
||||
|
||||
func (h changeHistory) bandwidth(t time.Time) int64 {
|
||||
if len(h.changes) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var t0 = h.changes[0].when
|
||||
if t == t0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var bw float64
|
||||
for _, c := range h.changes {
|
||||
bw += float64(c.size)
|
||||
}
|
||||
return int64(bw / t.Sub(t0).Seconds())
|
||||
}
|
||||
|
||||
func (h *changeHistory) append(size int64, t time.Time) {
|
||||
c := change{size, t}
|
||||
if len(h.changes) == MaxChangeHistory {
|
||||
h.changes = h.changes[1:MaxChangeHistory]
|
||||
}
|
||||
h.changes = append(h.changes, c)
|
||||
}
|
||||
|
||||
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
|
||||
s.Lock()
|
||||
|
||||
if s.changes == nil {
|
||||
s.changes = make(map[string]changeHistory)
|
||||
}
|
||||
h := s.changes[name]
|
||||
sup := h.bandwidth(t) > s.threshold
|
||||
prevSup := h.prevSup
|
||||
h.prevSup = sup
|
||||
if !sup {
|
||||
h.append(size, t)
|
||||
}
|
||||
s.changes[name] = h
|
||||
|
||||
s.Unlock()
|
||||
|
||||
return sup, prevSup
|
||||
}
|
||||
113
cmd/syncthing/suppressor_test.go
Normal file
113
cmd/syncthing/suppressor_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSuppressor(t *testing.T) {
|
||||
s := suppressor{threshold: 10000}
|
||||
t0 := time.Now()
|
||||
|
||||
t1 := t0
|
||||
sup, prev := s.suppress("foo", 10000, t1)
|
||||
if sup {
|
||||
t.Fatal("Never suppress first change")
|
||||
}
|
||||
if prev {
|
||||
t.Fatal("Incorrect prev status")
|
||||
}
|
||||
|
||||
// bw is 10000 / 10 = 1000
|
||||
t1 = t0.Add(10 * time.Second)
|
||||
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
|
||||
t.Errorf("Incorrect bw %d", bw)
|
||||
}
|
||||
sup, prev = s.suppress("foo", 10000, t1)
|
||||
if sup {
|
||||
t.Fatal("Should still be fine")
|
||||
}
|
||||
if prev {
|
||||
t.Fatal("Incorrect prev status")
|
||||
}
|
||||
|
||||
// bw is (10000 + 10000) / 11 = 1818
|
||||
t1 = t0.Add(11 * time.Second)
|
||||
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
|
||||
t.Errorf("Incorrect bw %d", bw)
|
||||
}
|
||||
sup, prev = s.suppress("foo", 100500, t1)
|
||||
if sup {
|
||||
t.Fatal("Should still be fine")
|
||||
}
|
||||
if prev {
|
||||
t.Fatal("Incorrect prev status")
|
||||
}
|
||||
|
||||
// bw is (10000 + 10000 + 100500) / 12 = 10041
|
||||
t1 = t0.Add(12 * time.Second)
|
||||
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
|
||||
t.Errorf("Incorrect bw %d", bw)
|
||||
}
|
||||
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
|
||||
if !sup {
|
||||
t.Fatal("Should be over threshold")
|
||||
}
|
||||
if prev {
|
||||
t.Fatal("Incorrect prev status")
|
||||
}
|
||||
|
||||
// bw is (10000 + 10000 + 100500) / 15 = 8033
|
||||
t1 = t0.Add(15 * time.Second)
|
||||
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
|
||||
t.Errorf("Incorrect bw %d", bw)
|
||||
}
|
||||
sup, prev = s.suppress("foo", 10000000, t1)
|
||||
if sup {
|
||||
t.Fatal("Should be Ok")
|
||||
}
|
||||
if !prev {
|
||||
t.Fatal("Incorrect prev status")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistory(t *testing.T) {
|
||||
h := changeHistory{}
|
||||
|
||||
t0 := time.Now()
|
||||
h.append(40, t0)
|
||||
|
||||
if l := len(h.changes); l != 1 {
|
||||
t.Errorf("Incorrect history length %d", l)
|
||||
}
|
||||
if s := h.changes[0].size; s != 40 {
|
||||
t.Errorf("Incorrect first record size %d", s)
|
||||
}
|
||||
|
||||
for i := 1; i < MaxChangeHistory; i++ {
|
||||
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
|
||||
}
|
||||
|
||||
if l := len(h.changes); l != MaxChangeHistory {
|
||||
t.Errorf("Incorrect history length %d", l)
|
||||
}
|
||||
if s := h.changes[0].size; s != 40 {
|
||||
t.Errorf("Incorrect first record size %d", s)
|
||||
}
|
||||
if s := h.changes[MaxChangeHistory-1].size; s != 40+MaxChangeHistory-1 {
|
||||
t.Errorf("Incorrect last record size %d", s)
|
||||
}
|
||||
|
||||
h.append(999, t0.Add(time.Duration(999)*time.Second))
|
||||
|
||||
if l := len(h.changes); l != MaxChangeHistory {
|
||||
t.Errorf("Incorrect history length %d", l)
|
||||
}
|
||||
if s := h.changes[0].size; s != 41 {
|
||||
t.Errorf("Incorrect first record size %d", s)
|
||||
}
|
||||
if s := h.changes[MaxChangeHistory-1].size; s != 999 {
|
||||
t.Errorf("Incorrect last record size %d", s)
|
||||
}
|
||||
|
||||
}
|
||||
2
cmd/syncthing/testdata/.stignore
vendored
Normal file
2
cmd/syncthing/testdata/.stignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.*
|
||||
quux
|
||||
0
cmd/syncthing/testdata/empty
vendored
Normal file
0
cmd/syncthing/testdata/empty
vendored
Normal file
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsRSABits = 2048
|
||||
tlsRSABits = 3072
|
||||
tlsName = "syncthing"
|
||||
)
|
||||
|
||||
@@ -24,14 +25,16 @@ func loadCert(dir string) (tls.Certificate, error) {
|
||||
return tls.LoadX509KeyPair(path.Join(dir, "cert.pem"), path.Join(dir, "key.pem"))
|
||||
}
|
||||
|
||||
func certId(bs []byte) string {
|
||||
hf := sha1.New()
|
||||
func certID(bs []byte) string {
|
||||
hf := sha256.New()
|
||||
hf.Write(bs)
|
||||
id := hf.Sum(nil)
|
||||
return base32.StdEncoding.EncodeToString(id)
|
||||
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
|
||||
}
|
||||
|
||||
func newCertificate(dir string) {
|
||||
infoln("Generating RSA certificate and key...")
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
|
||||
fatalErr(err)
|
||||
|
||||
@@ -47,7 +50,7 @@ func newCertificate(dir string) {
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
@@ -58,11 +61,11 @@ func newCertificate(dir string) {
|
||||
fatalErr(err)
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
okln("wrote cert.pem")
|
||||
okln("Created RSA certificate file")
|
||||
|
||||
keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
fatalErr(err)
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
okln("wrote key.pem")
|
||||
okln("Created RSA key file")
|
||||
}
|
||||
52
cmd/syncthing/usage.go
Normal file
52
cmd/syncthing/usage.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
func optionTable(w io.Writer, rows [][]string) {
|
||||
tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0)
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i > 0 {
|
||||
tw.Write([]byte("\t"))
|
||||
}
|
||||
tw.Write([]byte(cell))
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func usageFor(fs *flag.FlagSet, usage string) func() {
|
||||
return func() {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("Usage:\n " + usage + "\n")
|
||||
|
||||
var options [][]string
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
var dash = "-"
|
||||
if len(f.Name) > 1 {
|
||||
dash = "--"
|
||||
}
|
||||
var opt = " " + dash + f.Name
|
||||
|
||||
if f.DefValue != "false" {
|
||||
opt += "=" + f.DefValue
|
||||
}
|
||||
|
||||
options = append(options, []string{opt, f.Usage})
|
||||
})
|
||||
|
||||
if len(options) > 0 {
|
||||
b.WriteString("\nOptions:\n")
|
||||
optionTable(&b, options)
|
||||
}
|
||||
|
||||
fmt.Println(b.String())
|
||||
}
|
||||
}
|
||||
29
cmd/syncthing/util.go
Normal file
29
cmd/syncthing/util.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
func MetricPrefix(n int64) string {
|
||||
if n > 1e9 {
|
||||
return fmt.Sprintf("%.02f G", float64(n)/1e9)
|
||||
}
|
||||
if n > 1e6 {
|
||||
return fmt.Sprintf("%.02f M", float64(n)/1e6)
|
||||
}
|
||||
if n > 1e3 {
|
||||
return fmt.Sprintf("%.01f k", float64(n)/1e3)
|
||||
}
|
||||
return fmt.Sprintf("%d ", n)
|
||||
}
|
||||
|
||||
func BinaryPrefix(n int64) string {
|
||||
if n > 1<<30 {
|
||||
return fmt.Sprintf("%.02f Gi", float64(n)/(1<<30))
|
||||
}
|
||||
if n > 1<<20 {
|
||||
return fmt.Sprintf("%.02f Mi", float64(n)/(1<<20))
|
||||
}
|
||||
if n > 1<<10 {
|
||||
return fmt.Sprintf("%.01f Ki", float64(n)/(1<<10))
|
||||
}
|
||||
return fmt.Sprintf("%d ", n)
|
||||
}
|
||||
238
cmd/syncthing/walk.go
Normal file
238
cmd/syncthing/walk.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
const BlockSize = 128 * 1024
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version uint32
|
||||
Size int64
|
||||
Blocks []Block
|
||||
}
|
||||
|
||||
func (f File) String() string {
|
||||
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
|
||||
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
|
||||
}
|
||||
|
||||
func (f File) Equals(o File) bool {
|
||||
return f.Modified == o.Modified && f.Version == o.Version
|
||||
}
|
||||
|
||||
func (f File) NewerThan(o File) bool {
|
||||
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
|
||||
}
|
||||
|
||||
func isTempName(name string) bool {
|
||||
return strings.HasPrefix(path.Base(name), ".syncthing.")
|
||||
}
|
||||
|
||||
func tempName(name string, modified int64) string {
|
||||
tdir := path.Dir(name)
|
||||
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
|
||||
return path.Join(tdir, tname)
|
||||
}
|
||||
|
||||
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(m.dir, p)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pn, sn := path.Split(rn); sn == ".stignore" {
|
||||
pn := strings.Trim(pn, "/")
|
||||
bs, _ := ioutil.ReadFile(p)
|
||||
lines := bytes.Split(bs, []byte("\n"))
|
||||
var patterns []string
|
||||
for _, line := range lines {
|
||||
if len(line) > 0 {
|
||||
patterns = append(patterns, string(line))
|
||||
}
|
||||
}
|
||||
ign[pn] = patterns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: %q: %v", p, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if isTempName(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(m.dir, p)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, sn := path.Split(rn); sn == ".stignore" {
|
||||
// We never sync the .stignore files
|
||||
return nil
|
||||
}
|
||||
|
||||
if ignoreFile(ign, rn) {
|
||||
if m.trace["file"] {
|
||||
log.Println("FILE: IGNORE:", rn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode()&os.ModeType == 0 {
|
||||
modified := info.ModTime().Unix()
|
||||
|
||||
m.lmut.RLock()
|
||||
lf, ok := m.local[rn]
|
||||
m.lmut.RUnlock()
|
||||
|
||||
if ok && lf.Modified == modified {
|
||||
if nf := uint32(info.Mode()); nf != lf.Flags {
|
||||
lf.Flags = nf
|
||||
lf.Version++
|
||||
}
|
||||
*res = append(*res, lf)
|
||||
} else {
|
||||
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
|
||||
}
|
||||
if !prev {
|
||||
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
|
||||
}
|
||||
|
||||
if ok {
|
||||
lf.Flags = protocol.FlagInvalid
|
||||
lf.Version++
|
||||
*res = append(*res, lf)
|
||||
}
|
||||
return nil
|
||||
} else if prev && !cur {
|
||||
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
|
||||
}
|
||||
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: Hash %q", p)
|
||||
}
|
||||
fd, err := os.Open(p)
|
||||
if err != nil {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: %q: %v", p, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
blocks, err := Blocks(fd, BlockSize)
|
||||
if err != nil {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: %q: %v", p, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
f := File{
|
||||
Name: rn,
|
||||
Size: info.Size(),
|
||||
Flags: uint32(info.Mode()),
|
||||
Modified: modified,
|
||||
Blocks: blocks,
|
||||
}
|
||||
*res = append(*res, f)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Walk returns the list of files found in the local repository by scanning the
|
||||
// file system. Files are blockwise hashed.
|
||||
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
|
||||
ignore = make(map[string][]string)
|
||||
|
||||
hashFiles := m.walkAndHashFiles(&files, ignore)
|
||||
|
||||
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
|
||||
filepath.Walk(m.dir, hashFiles)
|
||||
|
||||
if followSymlinks {
|
||||
d, err := os.Open(m.dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, info := range fis {
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
dir := path.Join(m.dir, info.Name()) + "/"
|
||||
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
|
||||
filepath.Walk(dir, hashFiles)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode()&os.ModeType == 0 && isTempName(path) {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: Remove %q", path)
|
||||
}
|
||||
os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Model) cleanTempFiles() {
|
||||
filepath.Walk(m.dir, m.cleanTempFile)
|
||||
}
|
||||
|
||||
func ignoreFile(patterns map[string][]string, file string) bool {
|
||||
first, last := path.Split(file)
|
||||
for prefix, pats := range patterns {
|
||||
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
|
||||
for _, pattern := range pats {
|
||||
if match, _ := path.Match(pattern, last); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
83
cmd/syncthing/walk_test.go
Normal file
83
cmd/syncthing/walk_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testdata = []struct {
|
||||
name string
|
||||
size int
|
||||
hash string
|
||||
}{
|
||||
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
|
||||
{"empty", 0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
|
||||
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
|
||||
}
|
||||
|
||||
var correctIgnores = map[string][]string{
|
||||
"": {".*", "quux"},
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
m := NewModel("testdata", 1e6)
|
||||
files, ignores := m.Walk(false)
|
||||
|
||||
if l1, l2 := len(files), len(testdata); l1 != l2 {
|
||||
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
|
||||
}
|
||||
|
||||
for i := range testdata {
|
||||
if n1, n2 := testdata[i].name, files[i].Name; n1 != n2 {
|
||||
t.Errorf("Incorrect file name %q != %q for case #%d", n1, n2, i)
|
||||
}
|
||||
|
||||
if h1, h2 := fmt.Sprintf("%x", files[i].Blocks[0].Hash), testdata[i].hash; h1 != h2 {
|
||||
t.Errorf("Incorrect hash %q != %q for case #%d", h1, h2, i)
|
||||
}
|
||||
|
||||
t0 := time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
|
||||
t1 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
|
||||
if mt := files[i].Modified; mt < t0 || mt > t1 {
|
||||
t.Errorf("Unrealistic modtime %d for test %d", mt, i)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(ignores, correctIgnores) {
|
||||
t.Errorf("Incorrect ignores\n %v\n %v", correctIgnores, ignores)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
var patterns = map[string][]string{
|
||||
"": {"t2"},
|
||||
"foo": {"bar", "z*"},
|
||||
"foo/baz": {"quux", ".*"},
|
||||
}
|
||||
var tests = []struct {
|
||||
f string
|
||||
r bool
|
||||
}{
|
||||
{"foo/bar", true},
|
||||
{"foo/quux", false},
|
||||
{"foo/zuux", true},
|
||||
{"foo/qzuux", false},
|
||||
{"foo/baz/t1", false},
|
||||
{"foo/baz/t2", true},
|
||||
{"foo/baz/bar", true},
|
||||
{"foo/baz/quuxa", false},
|
||||
{"foo/baz/aquux", false},
|
||||
{"foo/baz/.quux", true},
|
||||
{"foo/baz/zquux", true},
|
||||
{"foo/baz/quux", true},
|
||||
{"foo/bazz/quux", false},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
if r := ignoreFile(patterns, tc.f); r != tc.r {
|
||||
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
115
discover/PROTOCOL.md
Normal file
115
discover/PROTOCOL.md
Normal file
@@ -0,0 +1,115 @@
|
||||
Node Discovery Protocol v2
|
||||
==========================
|
||||
|
||||
Mode of Operation
|
||||
-----------------
|
||||
|
||||
There are two distinct modes: "local discovery", performed on a LAN
|
||||
segment (broadcast domain) and "global discovery" performed over the
|
||||
Internet in general with the support of a well known server.
|
||||
|
||||
Local discovery does not use Query packets. Instead Announcement packets
|
||||
are sent periodically and each participating node keeps a table of the
|
||||
announcements it has seen. On multihomed hosts the announcement packets
|
||||
should be sent on each interface that syncthing will accept connections.
|
||||
|
||||
It is recommended that local discovery Announcement packets are sent on
|
||||
a 30 to 60 second interval, possibly with forced transmissions when a
|
||||
previously unknown node is discovered.
|
||||
|
||||
Global discovery is made possible by periodically updating a global server
|
||||
using Announcement packets indentical to those transmitted for local
|
||||
discovery. The node performing discovery will transmit a Query packet to
|
||||
the global server and expect an Announcement packet in response. In case
|
||||
the global server has no knowledge of the queried node ID, there will be
|
||||
no response. A timeout is to be used to determine lookup failure.
|
||||
|
||||
There is no message to unregister from the global server; instead
|
||||
registrations are forgotten after 60 minutes. It is recommended to
|
||||
send Announcement packets to the global server on a 30 minute interval.
|
||||
|
||||
Packet Formats
|
||||
--------------
|
||||
|
||||
The Announcement packet has the following structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic Number (0x029E4C77) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Node ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Node ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Addresses |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Address Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
Address Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of IP |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ IP (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Port Number | 0x0000 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
This is the XDR encoding of:
|
||||
|
||||
struct Announcement {
|
||||
unsigned int MagicNumber;
|
||||
string NodeID<>;
|
||||
Address Addresses<>;
|
||||
}
|
||||
|
||||
struct Address {
|
||||
opaque IP<>;
|
||||
unsigned short PortNumber;
|
||||
}
|
||||
|
||||
NodeID is padded to a multiple of 32 bits and all fields are in sent in
|
||||
network (big endian) byte order. In the Address structure, the IP field
|
||||
can be of three differnt kinds;
|
||||
|
||||
- A zero length indicates that the IP address should be taken from the
|
||||
source address of the announcement packet, be it IPv4 or IPv6. The
|
||||
source address must be a valid unicast address.
|
||||
|
||||
- A four byte length indicates that the address is an IPv4 unicast
|
||||
address.
|
||||
|
||||
- A sixteen byte length indicates that the address is an IPv6 unicast
|
||||
address.
|
||||
|
||||
The Query packet has the following structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic Number (0x23D63A9A) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Node ID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Node ID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
This is the XDR encoding of:
|
||||
|
||||
struct Announcement {
|
||||
unsigned int MagicNumber;
|
||||
string NodeID<>;
|
||||
}
|
||||
|
||||
1
discover/cmd/discosrv/.gitignore
vendored
Normal file
1
discover/cmd/discosrv/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
discosrv
|
||||
237
discover/cmd/discosrv/main.go
Normal file
237
discover/cmd/discosrv/main.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/discover"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Addresses []Address
|
||||
Updated time.Time
|
||||
}
|
||||
|
||||
type Address struct {
|
||||
IP []byte
|
||||
Port uint16
|
||||
}
|
||||
|
||||
var (
|
||||
nodes = make(map[string]Node)
|
||||
lock sync.Mutex
|
||||
queries = 0
|
||||
answered = 0
|
||||
)
|
||||
|
||||
func main() {
|
||||
var debug bool
|
||||
var listen string
|
||||
var timestamp bool
|
||||
|
||||
flag.StringVar(&listen, "listen", ":22025", "Listen address")
|
||||
flag.BoolVar(&debug, "debug", false, "Enable debug output")
|
||||
flag.BoolVar(×tamp, "timestamp", true, "Timestamp the log output")
|
||||
flag.Parse()
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if !timestamp {
|
||||
log.SetFlags(0)
|
||||
}
|
||||
|
||||
addr, _ := net.ResolveUDPAddr("udp", listen)
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(600 * time.Second)
|
||||
|
||||
lock.Lock()
|
||||
|
||||
var deleted = 0
|
||||
for id, node := range nodes {
|
||||
if time.Since(node.Updated) > 60*time.Minute {
|
||||
delete(nodes, id)
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
log.Printf("Expired %d nodes; %d nodes in registry; %d queries (%d answered)", deleted, len(nodes), queries, answered)
|
||||
queries = 0
|
||||
answered = 0
|
||||
|
||||
lock.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
var buf = make([]byte, 1024)
|
||||
for {
|
||||
buf = buf[:cap(buf)]
|
||||
n, addr, err := conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if n < 4 {
|
||||
log.Printf("Received short packet (%d bytes)", n)
|
||||
continue
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
magic := binary.BigEndian.Uint32(buf)
|
||||
|
||||
switch magic {
|
||||
case discover.AnnouncementMagicV1:
|
||||
var pkt discover.AnnounceV1
|
||||
err := pkt.UnmarshalXDR(buf)
|
||||
if err != nil {
|
||||
log.Println("AnnounceV1 Unmarshal:", err)
|
||||
log.Println(hex.Dump(buf))
|
||||
continue
|
||||
}
|
||||
if debug {
|
||||
log.Printf("<- %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
ip := addr.IP.To4()
|
||||
if ip == nil {
|
||||
ip = addr.IP.To16()
|
||||
}
|
||||
node := Node{
|
||||
Addresses: []Address{{
|
||||
IP: ip,
|
||||
Port: pkt.Port,
|
||||
}},
|
||||
Updated: time.Now(),
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
nodes[pkt.NodeID] = node
|
||||
lock.Unlock()
|
||||
|
||||
case discover.QueryMagicV1:
|
||||
var pkt discover.QueryV1
|
||||
err := pkt.UnmarshalXDR(buf)
|
||||
if err != nil {
|
||||
log.Println("QueryV1 Unmarshal:", err)
|
||||
log.Println(hex.Dump(buf))
|
||||
continue
|
||||
}
|
||||
if debug {
|
||||
log.Printf("<- %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
node, ok := nodes[pkt.NodeID]
|
||||
queries++
|
||||
lock.Unlock()
|
||||
|
||||
if ok && len(node.Addresses) > 0 {
|
||||
pkt := discover.AnnounceV1{
|
||||
Magic: discover.AnnouncementMagicV1,
|
||||
NodeID: pkt.NodeID,
|
||||
Port: node.Addresses[0].Port,
|
||||
IP: node.Addresses[0].IP,
|
||||
}
|
||||
if debug {
|
||||
log.Printf("-> %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
tb := pkt.MarshalXDR()
|
||||
_, _, err = conn.WriteMsgUDP(tb, nil, addr)
|
||||
if err != nil {
|
||||
log.Println("QueryV1 response write:", err)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
answered++
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
case discover.AnnouncementMagicV2:
|
||||
var pkt discover.AnnounceV2
|
||||
err := pkt.UnmarshalXDR(buf)
|
||||
if err != nil {
|
||||
log.Println("AnnounceV2 Unmarshal:", err)
|
||||
log.Println(hex.Dump(buf))
|
||||
continue
|
||||
}
|
||||
if debug {
|
||||
log.Printf("<- %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
ip := addr.IP.To4()
|
||||
if ip == nil {
|
||||
ip = addr.IP.To16()
|
||||
}
|
||||
|
||||
var addrs []Address
|
||||
for _, addr := range pkt.Addresses {
|
||||
tip := addr.IP
|
||||
if len(tip) == 0 {
|
||||
tip = ip
|
||||
}
|
||||
addrs = append(addrs, Address{
|
||||
IP: tip,
|
||||
Port: addr.Port,
|
||||
})
|
||||
}
|
||||
|
||||
node := Node{
|
||||
Addresses: addrs,
|
||||
Updated: time.Now(),
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
nodes[pkt.NodeID] = node
|
||||
lock.Unlock()
|
||||
|
||||
case discover.QueryMagicV2:
|
||||
var pkt discover.QueryV2
|
||||
err := pkt.UnmarshalXDR(buf)
|
||||
if err != nil {
|
||||
log.Println("QueryV2 Unmarshal:", err)
|
||||
log.Println(hex.Dump(buf))
|
||||
continue
|
||||
}
|
||||
if debug {
|
||||
log.Printf("<- %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
node, ok := nodes[pkt.NodeID]
|
||||
queries++
|
||||
lock.Unlock()
|
||||
|
||||
if ok && len(node.Addresses) > 0 {
|
||||
pkt := discover.AnnounceV2{
|
||||
Magic: discover.AnnouncementMagicV2,
|
||||
NodeID: pkt.NodeID,
|
||||
}
|
||||
for _, addr := range node.Addresses {
|
||||
pkt.Addresses = append(pkt.Addresses, discover.Address{IP: addr.IP, Port: addr.Port})
|
||||
}
|
||||
if debug {
|
||||
log.Printf("-> %v %#v", addr, pkt)
|
||||
}
|
||||
|
||||
tb := pkt.MarshalXDR()
|
||||
_, _, err = conn.WriteMsgUDP(tb, nil, addr)
|
||||
if err != nil {
|
||||
log.Println("QueryV2 response write:", err)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
answered++
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,121 +1,342 @@
|
||||
/*
|
||||
This is the local node discovery protocol. In principle we might be better
|
||||
served by something more standardized, such as mDNS / DNS-SD. In practice, this
|
||||
was much easier and quicker to get up and running.
|
||||
|
||||
The mode of operation is to periodically (currently once every 30 seconds)
|
||||
transmit a broadcast UDP packet to the well known port number 21025. The packet
|
||||
has the following format:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic Number |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Port Number | Length of NodeID |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ NodeID (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
The sending node's address is not encoded -- it is taken to be the source
|
||||
address of the announcement. Every time such a packet is received, a local
|
||||
table that maps NodeID to Address is updated. When the local node wants to
|
||||
connect to another node with the address specification 'dynamic', this table is
|
||||
consulted.
|
||||
*/
|
||||
package discover
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
)
|
||||
|
||||
const (
|
||||
AnnouncementPort = 21025
|
||||
Debug = false
|
||||
)
|
||||
|
||||
type Discoverer struct {
|
||||
MyID string
|
||||
ListenPort int
|
||||
BroadcastIntv time.Duration
|
||||
MyID string
|
||||
ListenPort int
|
||||
BroadcastIntv time.Duration
|
||||
ExtBroadcastIntv time.Duration
|
||||
|
||||
conn *net.UDPConn
|
||||
registry map[string]string
|
||||
registry map[string][]string
|
||||
registryLock sync.RWMutex
|
||||
extServer string
|
||||
|
||||
localBroadcastTick <-chan time.Time
|
||||
forcedBroadcastTick chan time.Time
|
||||
}
|
||||
|
||||
func NewDiscoverer(id string, port int) (*Discoverer, error) {
|
||||
local4 := &net.UDPAddr{IP: net.IP{0, 0, 0, 0}, Port: 21025}
|
||||
conn, err := net.ListenUDP("udp4", local4)
|
||||
var (
|
||||
ErrIncorrectMagic = errors.New("incorrect magic number")
|
||||
)
|
||||
|
||||
// We tolerate a certain amount of errors because we might be running on
|
||||
// laptops that sleep and wake, have intermittent network connectivity, etc.
|
||||
// When we hit this many errors in succession, we stop.
|
||||
const maxErrors = 30
|
||||
|
||||
func NewDiscoverer(id string, port int, extServer string) (*Discoverer, error) {
|
||||
local := &net.UDPAddr{IP: nil, Port: AnnouncementPort}
|
||||
conn, err := net.ListenUDP("udp", local)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disc := &Discoverer{
|
||||
MyID: id,
|
||||
ListenPort: port,
|
||||
BroadcastIntv: 30 * time.Second,
|
||||
conn: conn,
|
||||
registry: make(map[string]string),
|
||||
MyID: id,
|
||||
ListenPort: port,
|
||||
BroadcastIntv: 30 * time.Second,
|
||||
ExtBroadcastIntv: 1800 * time.Second,
|
||||
|
||||
conn: conn,
|
||||
registry: make(map[string][]string),
|
||||
extServer: extServer,
|
||||
}
|
||||
|
||||
go disc.sendAnnouncements()
|
||||
go disc.recvAnnouncements()
|
||||
|
||||
if disc.ListenPort > 0 {
|
||||
disc.localBroadcastTick = time.Tick(disc.BroadcastIntv)
|
||||
disc.forcedBroadcastTick = make(chan time.Time)
|
||||
go disc.sendAnnouncements()
|
||||
}
|
||||
if len(disc.extServer) > 0 {
|
||||
go disc.sendExtAnnouncements()
|
||||
}
|
||||
|
||||
return disc, nil
|
||||
}
|
||||
|
||||
func (d *Discoverer) sendAnnouncements() {
|
||||
remote4 := &net.UDPAddr{IP: net.IP{255, 255, 255, 255}, Port: 21025}
|
||||
var pkt = AnnounceV2{AnnouncementMagicV2, d.MyID, []Address{{nil, 22000}}}
|
||||
var buf = pkt.MarshalXDR()
|
||||
var errCounter = 0
|
||||
var err error
|
||||
|
||||
idbs := []byte(d.MyID)
|
||||
buf := make([]byte, 4+4+4+len(idbs))
|
||||
|
||||
binary.BigEndian.PutUint32(buf, uint32(0x121025))
|
||||
binary.BigEndian.PutUint16(buf[4:], uint16(d.ListenPort))
|
||||
binary.BigEndian.PutUint16(buf[6:], uint16(len(idbs)))
|
||||
copy(buf[8:], idbs)
|
||||
|
||||
for {
|
||||
_, _, err := d.conn.WriteMsgUDP(buf, nil, remote4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
time.Sleep(d.BroadcastIntv)
|
||||
remote := &net.UDPAddr{
|
||||
IP: net.IP{255, 255, 255, 255},
|
||||
Port: AnnouncementPort,
|
||||
}
|
||||
|
||||
for errCounter < maxErrors {
|
||||
intfs, err := net.Interfaces()
|
||||
if err != nil {
|
||||
log.Printf("discover/listInterfaces: %v; no local announcements", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, intf := range intfs {
|
||||
if intf.Flags&(net.FlagBroadcast|net.FlagLoopback) == net.FlagBroadcast {
|
||||
addrs, err := intf.Addrs()
|
||||
if err != nil {
|
||||
log.Println("discover/listAddrs: warning:", err)
|
||||
errCounter++
|
||||
continue
|
||||
}
|
||||
|
||||
var srcAddr string
|
||||
for _, addr := range addrs {
|
||||
if strings.Contains(addr.String(), ".") {
|
||||
// Found an IPv4 adress
|
||||
parts := strings.Split(addr.String(), "/")
|
||||
srcAddr = parts[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(srcAddr) == 0 {
|
||||
if Debug {
|
||||
log.Println("discover: debug: no source address found on interface", intf.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
iaddr, err := net.ResolveUDPAddr("udp4", srcAddr+":0")
|
||||
if err != nil {
|
||||
log.Println("discover/resolve: warning:", err)
|
||||
errCounter++
|
||||
continue
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp4", iaddr)
|
||||
if err != nil {
|
||||
log.Println("discover/listen: warning:", err)
|
||||
errCounter++
|
||||
continue
|
||||
}
|
||||
|
||||
if Debug {
|
||||
log.Println("discover: debug: send announcement from", conn.LocalAddr(), "to", remote, "on", intf.Name)
|
||||
}
|
||||
|
||||
_, err = conn.WriteTo(buf, remote)
|
||||
if err != nil {
|
||||
// Some interfaces don't seem to support broadcast even though the flags claims they do, i.e. vmnet
|
||||
conn.Close()
|
||||
|
||||
if Debug {
|
||||
log.Println("discover/write: debug:", err)
|
||||
}
|
||||
|
||||
errCounter++
|
||||
continue
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
errCounter = 0
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-d.localBroadcastTick:
|
||||
case <-d.forcedBroadcastTick:
|
||||
}
|
||||
}
|
||||
log.Println("discover/write: local: stopping due to too many errors:", err)
|
||||
}
|
||||
|
||||
func (d *Discoverer) sendExtAnnouncements() {
|
||||
remote, err := net.ResolveUDPAddr("udp", d.extServer)
|
||||
if err != nil {
|
||||
log.Printf("discover/external: %v; no external announcements", err)
|
||||
return
|
||||
}
|
||||
|
||||
var pkt = AnnounceV2{AnnouncementMagicV2, d.MyID, []Address{{nil, 22000}}}
|
||||
var buf = pkt.MarshalXDR()
|
||||
var errCounter = 0
|
||||
|
||||
for errCounter < maxErrors {
|
||||
if Debug {
|
||||
log.Println("send announcement -> ", remote)
|
||||
}
|
||||
_, err = d.conn.WriteTo(buf, remote)
|
||||
if err != nil {
|
||||
log.Println("discover/write: warning:", err)
|
||||
errCounter++
|
||||
} else {
|
||||
errCounter = 0
|
||||
}
|
||||
time.Sleep(d.ExtBroadcastIntv)
|
||||
}
|
||||
log.Printf("discover/write: %v: stopping due to too many errors: %v", remote, err)
|
||||
}
|
||||
|
||||
func (d *Discoverer) recvAnnouncements() {
|
||||
var buf = make([]byte, 1024)
|
||||
for {
|
||||
_, addr, err := d.conn.ReadFromUDP(buf)
|
||||
var errCounter = 0
|
||||
var err error
|
||||
for errCounter < maxErrors {
|
||||
n, addr, err := d.conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
magic := binary.BigEndian.Uint32(buf)
|
||||
if magic != 0x121025 {
|
||||
errCounter++
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
port := binary.BigEndian.Uint16(buf[4:])
|
||||
l := binary.BigEndian.Uint16(buf[6:])
|
||||
idbs := buf[8 : l+8]
|
||||
id := string(idbs)
|
||||
|
||||
if id != d.MyID {
|
||||
nodeAddr := fmt.Sprintf("%s:%d", addr.IP.String(), port)
|
||||
d.registryLock.Lock()
|
||||
if d.registry[id] != nodeAddr {
|
||||
d.registry[id] = nodeAddr
|
||||
if Debug {
|
||||
log.Printf("read announcement:\n%s", hex.Dump(buf[:n]))
|
||||
}
|
||||
|
||||
var pkt AnnounceV2
|
||||
err = pkt.UnmarshalXDR(buf[:n])
|
||||
if err != nil {
|
||||
errCounter++
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if Debug {
|
||||
log.Printf("read announcement: %#v", pkt)
|
||||
}
|
||||
|
||||
errCounter = 0
|
||||
|
||||
if pkt.NodeID != d.MyID {
|
||||
var addrs []string
|
||||
for _, a := range pkt.Addresses {
|
||||
var nodeAddr string
|
||||
if len(a.IP) > 0 {
|
||||
nodeAddr = fmt.Sprintf("%s:%d", ipStr(a.IP), a.Port)
|
||||
} else {
|
||||
nodeAddr = fmt.Sprintf("%s:%d", addr.IP.String(), a.Port)
|
||||
}
|
||||
addrs = append(addrs, nodeAddr)
|
||||
}
|
||||
if Debug {
|
||||
log.Printf("register: %#v", addrs)
|
||||
}
|
||||
d.registryLock.Lock()
|
||||
_, seen := d.registry[pkt.NodeID]
|
||||
if !seen {
|
||||
select {
|
||||
case d.forcedBroadcastTick <- time.Now():
|
||||
}
|
||||
}
|
||||
d.registry[pkt.NodeID] = addrs
|
||||
d.registryLock.Unlock()
|
||||
}
|
||||
}
|
||||
log.Println("discover/read: stopping due to too many errors:", err)
|
||||
}
|
||||
|
||||
func (d *Discoverer) Lookup(node string) (string, bool) {
|
||||
d.registryLock.Lock()
|
||||
defer d.registryLock.Unlock()
|
||||
addr, ok := d.registry[node]
|
||||
return addr, ok
|
||||
func (d *Discoverer) externalLookup(node string) []string {
|
||||
extIP, err := net.ResolveUDPAddr("udp", d.extServer)
|
||||
if err != nil {
|
||||
log.Printf("discover/external: %v; no external lookup", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
conn, err := net.DialUDP("udp", nil, extIP)
|
||||
if err != nil {
|
||||
log.Printf("discover/external: %v; no external lookup", err)
|
||||
return nil
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
if err != nil {
|
||||
log.Printf("discover/external: %v; no external lookup", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
buf := QueryV2{QueryMagicV2, node}.MarshalXDR()
|
||||
_, err = conn.Write(buf)
|
||||
if err != nil {
|
||||
log.Printf("discover/external: %v; no external lookup", err)
|
||||
return nil
|
||||
}
|
||||
buffers.Put(buf)
|
||||
|
||||
buf = buffers.Get(256)
|
||||
defer buffers.Put(buf)
|
||||
|
||||
n, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
// Expected if the server doesn't know about requested node ID
|
||||
return nil
|
||||
}
|
||||
log.Printf("discover/external/read: %v; no external lookup", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if Debug {
|
||||
log.Printf("read external:\n%s", hex.Dump(buf[:n]))
|
||||
}
|
||||
|
||||
var pkt AnnounceV2
|
||||
err = pkt.UnmarshalXDR(buf[:n])
|
||||
if err != nil {
|
||||
log.Println("discover/external/decode:", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if Debug {
|
||||
log.Printf("read external: %#v", pkt)
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
for _, a := range pkt.Addresses {
|
||||
var nodeAddr string
|
||||
if len(a.IP) > 0 {
|
||||
nodeAddr = fmt.Sprintf("%s:%d", ipStr(a.IP), a.Port)
|
||||
}
|
||||
addrs = append(addrs, nodeAddr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (d *Discoverer) Lookup(node string) []string {
|
||||
d.registryLock.Lock()
|
||||
addr, ok := d.registry[node]
|
||||
d.registryLock.Unlock()
|
||||
|
||||
if ok {
|
||||
return addr
|
||||
} else if len(d.extServer) != 0 {
|
||||
// We might want to cache this, but not permanently so it needs some intelligence
|
||||
return d.externalLookup(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ipStr(ip []byte) string {
|
||||
var f = "%d"
|
||||
var s = "."
|
||||
if len(ip) > 4 {
|
||||
f = "%x"
|
||||
s = ":"
|
||||
}
|
||||
var ss = make([]string, len(ip))
|
||||
for i := range ip {
|
||||
ss[i] = fmt.Sprintf(f, ip[i])
|
||||
}
|
||||
return strings.Join(ss, s)
|
||||
}
|
||||
|
||||
39
discover/packets.go
Normal file
39
discover/packets.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package discover
|
||||
|
||||
const (
|
||||
AnnouncementMagicV1 = 0x20121025
|
||||
QueryMagicV1 = 0x19760309
|
||||
)
|
||||
|
||||
type QueryV1 struct {
|
||||
Magic uint32
|
||||
NodeID string // max:64
|
||||
}
|
||||
|
||||
type AnnounceV1 struct {
|
||||
Magic uint32
|
||||
Port uint16
|
||||
NodeID string // max:64
|
||||
IP []byte // max:16
|
||||
}
|
||||
|
||||
const (
|
||||
AnnouncementMagicV2 = 0x029E4C77
|
||||
QueryMagicV2 = 0x23D63A9A
|
||||
)
|
||||
|
||||
type QueryV2 struct {
|
||||
Magic uint32
|
||||
NodeID string // max:64
|
||||
}
|
||||
|
||||
type AnnounceV2 struct {
|
||||
Magic uint32
|
||||
NodeID string // max:64
|
||||
Addresses []Address // max:16
|
||||
}
|
||||
|
||||
type Address struct {
|
||||
IP []byte // max:16
|
||||
Port uint16
|
||||
}
|
||||
220
discover/packets_xdr.go
Normal file
220
discover/packets_xdr.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
|
||||
func (o QueryV1) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o QueryV1) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o QueryV1) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
if len(o.NodeID) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.NodeID)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *QueryV1) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *QueryV1) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *QueryV1) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
o.NodeID = xr.ReadStringMax(64)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o AnnounceV1) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o AnnounceV1) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o AnnounceV1) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
xw.WriteUint16(o.Port)
|
||||
if len(o.NodeID) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.NodeID)
|
||||
if len(o.IP) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteBytes(o.IP)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *AnnounceV1) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *AnnounceV1) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *AnnounceV1) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
o.Port = xr.ReadUint16()
|
||||
o.NodeID = xr.ReadStringMax(64)
|
||||
o.IP = xr.ReadBytesMax(16)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o QueryV2) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o QueryV2) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o QueryV2) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
if len(o.NodeID) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.NodeID)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *QueryV2) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *QueryV2) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *QueryV2) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
o.NodeID = xr.ReadStringMax(64)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o AnnounceV2) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o AnnounceV2) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o AnnounceV2) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
if len(o.NodeID) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.NodeID)
|
||||
if len(o.Addresses) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Addresses)))
|
||||
for i := range o.Addresses {
|
||||
o.Addresses[i].encodeXDR(xw)
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *AnnounceV2) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *AnnounceV2) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *AnnounceV2) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
o.NodeID = xr.ReadStringMax(64)
|
||||
_AddressesSize := int(xr.ReadUint32())
|
||||
if _AddressesSize > 16 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
o.Addresses = make([]Address, _AddressesSize)
|
||||
for i := range o.Addresses {
|
||||
(&o.Addresses[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o Address) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Address) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o Address) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.IP) > 16 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteBytes(o.IP)
|
||||
xw.WriteUint16(o.Port)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *Address) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *Address) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *Address) decodeXDR(xr *xdr.Reader) error {
|
||||
o.IP = xr.ReadBytesMax(16)
|
||||
o.Port = xr.ReadUint16()
|
||||
return xr.Error()
|
||||
}
|
||||
173
files/set.go
Normal file
173
files/set.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package fileset
|
||||
|
||||
import "sync"
|
||||
|
||||
type File struct {
|
||||
Key Key
|
||||
Modified int64
|
||||
Flags uint32
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
type Key struct {
|
||||
Name string
|
||||
Version uint32
|
||||
}
|
||||
|
||||
type fileRecord struct {
|
||||
Usage int
|
||||
File File
|
||||
}
|
||||
|
||||
type bitset uint64
|
||||
|
||||
func (a Key) newerThan(b Key) bool {
|
||||
return a.Version > b.Version
|
||||
}
|
||||
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
files map[Key]fileRecord
|
||||
remoteKey [64]map[string]Key
|
||||
globalAvailability map[string]bitset
|
||||
globalKey map[string]Key
|
||||
}
|
||||
|
||||
func NewSet() *Set {
|
||||
var m = Set{
|
||||
files: make(map[Key]fileRecord),
|
||||
globalAvailability: make(map[string]bitset),
|
||||
globalKey: make(map[string]Key),
|
||||
}
|
||||
return &m
|
||||
}
|
||||
|
||||
func (m *Set) AddLocal(fs []File) {
|
||||
m.mutex.Lock()
|
||||
m.unlockedAddRemote(0, fs)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) SetLocal(fs []File) {
|
||||
m.mutex.Lock()
|
||||
m.unlockedSetRemote(0, fs)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) AddRemote(cid uint, fs []File) {
|
||||
if cid < 1 || cid > 63 {
|
||||
panic("Connection ID must be in the range 1 - 63 inclusive")
|
||||
}
|
||||
m.mutex.Lock()
|
||||
m.unlockedAddRemote(cid, fs)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) SetRemote(cid uint, fs []File) {
|
||||
if cid < 1 || cid > 63 {
|
||||
panic("Connection ID must be in the range 1 - 63 inclusive")
|
||||
}
|
||||
m.mutex.Lock()
|
||||
m.unlockedSetRemote(cid, fs)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *Set) unlockedAddRemote(cid uint, fs []File) {
|
||||
remFiles := m.remoteKey[cid]
|
||||
for _, f := range fs {
|
||||
n := f.Key.Name
|
||||
|
||||
if ck, ok := remFiles[n]; ok && ck == f.Key {
|
||||
// The remote already has exactly this file, skip it
|
||||
continue
|
||||
}
|
||||
|
||||
remFiles[n] = f.Key
|
||||
|
||||
// Keep the block list or increment the usage
|
||||
if br, ok := m.files[f.Key]; !ok {
|
||||
m.files[f.Key] = fileRecord{
|
||||
Usage: 1,
|
||||
File: f,
|
||||
}
|
||||
} else {
|
||||
br.Usage++
|
||||
m.files[f.Key] = br
|
||||
}
|
||||
|
||||
// Update global view
|
||||
gk, ok := m.globalKey[n]
|
||||
switch {
|
||||
case ok && f.Key == gk:
|
||||
av := m.globalAvailability[n]
|
||||
av |= 1 << cid
|
||||
m.globalAvailability[n] = av
|
||||
case f.Key.newerThan(gk):
|
||||
m.globalKey[n] = f.Key
|
||||
m.globalAvailability[n] = 1 << cid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Set) unlockedSetRemote(cid uint, fs []File) {
|
||||
// Decrement usage for all files belonging to this remote, and remove
|
||||
// those that are no longer needed.
|
||||
for _, fk := range m.remoteKey[cid] {
|
||||
br, ok := m.files[fk]
|
||||
switch {
|
||||
case ok && br.Usage == 1:
|
||||
delete(m.files, fk)
|
||||
case ok && br.Usage > 1:
|
||||
br.Usage--
|
||||
m.files[fk] = br
|
||||
}
|
||||
}
|
||||
|
||||
// Clear existing remote remoteKey
|
||||
m.remoteKey[cid] = make(map[string]Key)
|
||||
|
||||
// Recalculate global based on all remaining remoteKey
|
||||
for n := range m.globalKey {
|
||||
var nk Key // newest key
|
||||
var na bitset // newest availability
|
||||
|
||||
for i, rem := range m.remoteKey {
|
||||
if rk, ok := rem[n]; ok {
|
||||
switch {
|
||||
case rk == nk:
|
||||
na |= 1 << uint(i)
|
||||
case rk.newerThan(nk):
|
||||
nk = rk
|
||||
na = 1 << uint(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if na != 0 {
|
||||
// Someone had the file
|
||||
m.globalKey[n] = nk
|
||||
m.globalAvailability[n] = na
|
||||
} else {
|
||||
// Noone had the file
|
||||
delete(m.globalKey, n)
|
||||
delete(m.globalAvailability, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new remote remoteKey to the mix
|
||||
m.unlockedAddRemote(cid, fs)
|
||||
}
|
||||
|
||||
func (m *Set) Need(cid uint) []File {
|
||||
var fs []File
|
||||
m.mutex.Lock()
|
||||
|
||||
for name, gk := range m.globalKey {
|
||||
if gk.newerThan(m.remoteKey[cid][name]) {
|
||||
fs = append(fs, m.files[gk].File)
|
||||
}
|
||||
}
|
||||
|
||||
m.mutex.Unlock()
|
||||
return fs
|
||||
}
|
||||
207
files/set_test.go
Normal file
207
files/set_test.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package fileset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGlobalSet(t *testing.T) {
|
||||
m := NewSet()
|
||||
|
||||
local := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1000}, 0, 0, nil},
|
||||
File{Key{"c", 1000}, 0, 0, nil},
|
||||
File{Key{"d", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
remote := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1001}, 0, 0, nil},
|
||||
File{Key{"c", 1002}, 0, 0, nil},
|
||||
File{Key{"e", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
expectedGlobal := map[string]Key{
|
||||
"a": local[0].Key,
|
||||
"b": remote[1].Key,
|
||||
"c": remote[2].Key,
|
||||
"d": local[3].Key,
|
||||
"e": remote[3].Key,
|
||||
}
|
||||
|
||||
m.SetLocal(local)
|
||||
m.SetRemote(1, remote)
|
||||
|
||||
if !reflect.DeepEqual(m.globalKey, expectedGlobal) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobal)
|
||||
}
|
||||
|
||||
if lb := len(m.files); lb != 7 {
|
||||
t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetLocal10k(b *testing.B) {
|
||||
m := NewSet()
|
||||
|
||||
var local []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
var remote []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
m.SetRemote(1, remote)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.SetLocal(local)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetLocal10(b *testing.B) {
|
||||
m := NewSet()
|
||||
|
||||
var local []File
|
||||
for i := 0; i < 10; i++ {
|
||||
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
var remote []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
m.SetRemote(1, remote)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.SetLocal(local)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAddLocal10k(b *testing.B) {
|
||||
m := NewSet()
|
||||
|
||||
var local []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
var remote []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
m.SetRemote(1, remote)
|
||||
m.SetLocal(local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
for j := range local {
|
||||
local[j].Key.Version++
|
||||
}
|
||||
b.StartTimer()
|
||||
m.AddLocal(local)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAddLocal10(b *testing.B) {
|
||||
m := NewSet()
|
||||
|
||||
var local []File
|
||||
for i := 0; i < 10; i++ {
|
||||
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
var remote []File
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
|
||||
}
|
||||
|
||||
m.SetRemote(1, remote)
|
||||
m.SetLocal(local)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for j := range local {
|
||||
local[j].Key.Version++
|
||||
}
|
||||
m.AddLocal(local)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGlobalReset(t *testing.T) {
|
||||
m := NewSet()
|
||||
|
||||
local := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1000}, 0, 0, nil},
|
||||
File{Key{"c", 1000}, 0, 0, nil},
|
||||
File{Key{"d", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
remote := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1001}, 0, 0, nil},
|
||||
File{Key{"c", 1002}, 0, 0, nil},
|
||||
File{Key{"e", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
expectedGlobalKey := map[string]Key{
|
||||
"a": local[0].Key,
|
||||
"b": local[1].Key,
|
||||
"c": local[2].Key,
|
||||
"d": local[3].Key,
|
||||
}
|
||||
|
||||
m.SetLocal(local)
|
||||
m.SetRemote(1, remote)
|
||||
m.SetRemote(1, nil)
|
||||
|
||||
if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) {
|
||||
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey)
|
||||
}
|
||||
|
||||
if lb := len(m.files); lb != 4 {
|
||||
t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeed(t *testing.T) {
|
||||
m := NewSet()
|
||||
|
||||
local := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1000}, 0, 0, nil},
|
||||
File{Key{"c", 1000}, 0, 0, nil},
|
||||
File{Key{"d", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
remote := []File{
|
||||
File{Key{"a", 1000}, 0, 0, nil},
|
||||
File{Key{"b", 1001}, 0, 0, nil},
|
||||
File{Key{"c", 1002}, 0, 0, nil},
|
||||
File{Key{"e", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
shouldNeed := []File{
|
||||
File{Key{"b", 1001}, 0, 0, nil},
|
||||
File{Key{"c", 1002}, 0, 0, nil},
|
||||
File{Key{"e", 1000}, 0, 0, nil},
|
||||
}
|
||||
|
||||
m.SetLocal(local)
|
||||
m.SetRemote(1, remote)
|
||||
|
||||
need := m.Need(0)
|
||||
if !reflect.DeepEqual(need, shouldNeed) {
|
||||
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
|
||||
}
|
||||
}
|
||||
203
gui/angular.min.js
vendored
Normal file
203
gui/angular.min.js
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
/*
|
||||
AngularJS v1.2.7
|
||||
(c) 2010-2014 Google, Inc. http://angularjs.org
|
||||
License: MIT
|
||||
*/
|
||||
(function(Z,Q,r){'use strict';function F(b){return function(){var a=arguments[0],c,a="["+(b?b+":":"")+a+"] http://errors.angularjs.org/1.2.7/"+(b?b+"/":"")+a;for(c=1;c<arguments.length;c++)a=a+(1==c?"?":"&")+"p"+(c-1)+"="+encodeURIComponent("function"==typeof arguments[c]?arguments[c].toString().replace(/ \{[\s\S]*$/,""):"undefined"==typeof arguments[c]?"undefined":"string"!=typeof arguments[c]?JSON.stringify(arguments[c]):arguments[c]);return Error(a)}}function qb(b){if(null==b||za(b))return!1;var a=
|
||||
b.length;return 1===b.nodeType&&a?!0:D(b)||K(b)||0===a||"number"===typeof a&&0<a&&a-1 in b}function q(b,a,c){var d;if(b)if(L(b))for(d in b)"prototype"==d||("length"==d||"name"==d||b.hasOwnProperty&&!b.hasOwnProperty(d))||a.call(c,b[d],d);else if(b.forEach&&b.forEach!==q)b.forEach(a,c);else if(qb(b))for(d=0;d<b.length;d++)a.call(c,b[d],d);else for(d in b)b.hasOwnProperty(d)&&a.call(c,b[d],d);return b}function Ob(b){var a=[],c;for(c in b)b.hasOwnProperty(c)&&a.push(c);return a.sort()}function Oc(b,
|
||||
a,c){for(var d=Ob(b),e=0;e<d.length;e++)a.call(c,b[d[e]],d[e]);return d}function Pb(b){return function(a,c){b(c,a)}}function Ya(){for(var b=ka.length,a;b;){b--;a=ka[b].charCodeAt(0);if(57==a)return ka[b]="A",ka.join("");if(90==a)ka[b]="0";else return ka[b]=String.fromCharCode(a+1),ka.join("")}ka.unshift("0");return ka.join("")}function Qb(b,a){a?b.$$hashKey=a:delete b.$$hashKey}function t(b){var a=b.$$hashKey;q(arguments,function(a){a!==b&&q(a,function(a,c){b[c]=a})});Qb(b,a);return b}function S(b){return parseInt(b,
|
||||
10)}function Rb(b,a){return t(new (t(function(){},{prototype:b})),a)}function w(){}function Aa(b){return b}function $(b){return function(){return b}}function z(b){return"undefined"===typeof b}function B(b){return"undefined"!==typeof b}function X(b){return null!=b&&"object"===typeof b}function D(b){return"string"===typeof b}function rb(b){return"number"===typeof b}function Ja(b){return"[object Date]"===Za.call(b)}function K(b){return"[object Array]"===Za.call(b)}function L(b){return"function"===typeof b}
|
||||
function $a(b){return"[object RegExp]"===Za.call(b)}function za(b){return b&&b.document&&b.location&&b.alert&&b.setInterval}function Pc(b){return!(!b||!(b.nodeName||b.on&&b.find))}function Qc(b,a,c){var d=[];q(b,function(b,g,f){d.push(a.call(c,b,g,f))});return d}function ab(b,a){if(b.indexOf)return b.indexOf(a);for(var c=0;c<b.length;c++)if(a===b[c])return c;return-1}function Ka(b,a){var c=ab(b,a);0<=c&&b.splice(c,1);return a}function fa(b,a){if(za(b)||b&&b.$evalAsync&&b.$watch)throw La("cpws");if(a){if(b===
|
||||
a)throw La("cpi");if(K(b))for(var c=a.length=0;c<b.length;c++)a.push(fa(b[c]));else{c=a.$$hashKey;q(a,function(b,c){delete a[c]});for(var d in b)a[d]=fa(b[d]);Qb(a,c)}}else(a=b)&&(K(b)?a=fa(b,[]):Ja(b)?a=new Date(b.getTime()):$a(b)?a=RegExp(b.source):X(b)&&(a=fa(b,{})));return a}function Sb(b,a){a=a||{};for(var c in b)b.hasOwnProperty(c)&&("$"!==c.charAt(0)&&"$"!==c.charAt(1))&&(a[c]=b[c]);return a}function ua(b,a){if(b===a)return!0;if(null===b||null===a)return!1;if(b!==b&&a!==a)return!0;var c=typeof b,
|
||||
d;if(c==typeof a&&"object"==c)if(K(b)){if(!K(a))return!1;if((c=b.length)==a.length){for(d=0;d<c;d++)if(!ua(b[d],a[d]))return!1;return!0}}else{if(Ja(b))return Ja(a)&&b.getTime()==a.getTime();if($a(b)&&$a(a))return b.toString()==a.toString();if(b&&b.$evalAsync&&b.$watch||a&&a.$evalAsync&&a.$watch||za(b)||za(a)||K(a))return!1;c={};for(d in b)if("$"!==d.charAt(0)&&!L(b[d])){if(!ua(b[d],a[d]))return!1;c[d]=!0}for(d in a)if(!c.hasOwnProperty(d)&&"$"!==d.charAt(0)&&a[d]!==r&&!L(a[d]))return!1;return!0}return!1}
|
||||
function Tb(){return Q.securityPolicy&&Q.securityPolicy.isActive||Q.querySelector&&!(!Q.querySelector("[ng-csp]")&&!Q.querySelector("[data-ng-csp]"))}function bb(b,a){var c=2<arguments.length?va.call(arguments,2):[];return!L(a)||a instanceof RegExp?a:c.length?function(){return arguments.length?a.apply(b,c.concat(va.call(arguments,0))):a.apply(b,c)}:function(){return arguments.length?a.apply(b,arguments):a.call(b)}}function Rc(b,a){var c=a;"string"===typeof b&&"$"===b.charAt(0)?c=r:za(a)?c="$WINDOW":
|
||||
a&&Q===a?c="$DOCUMENT":a&&(a.$evalAsync&&a.$watch)&&(c="$SCOPE");return c}function pa(b,a){return"undefined"===typeof b?r:JSON.stringify(b,Rc,a?" ":null)}function Ub(b){return D(b)?JSON.parse(b):b}function Ma(b){"function"===typeof b?b=!0:b&&0!==b.length?(b=x(""+b),b=!("f"==b||"0"==b||"false"==b||"no"==b||"n"==b||"[]"==b)):b=!1;return b}function ga(b){b=A(b).clone();try{b.empty()}catch(a){}var c=A("<div>").append(b).html();try{return 3===b[0].nodeType?x(c):c.match(/^(<[^>]+>)/)[1].replace(/^<([\w\-]+)/,
|
||||
function(a,b){return"<"+x(b)})}catch(d){return x(c)}}function Vb(b){try{return decodeURIComponent(b)}catch(a){}}function Wb(b){var a={},c,d;q((b||"").split("&"),function(b){b&&(c=b.split("="),d=Vb(c[0]),B(d)&&(b=B(c[1])?Vb(c[1]):!0,a[d]?K(a[d])?a[d].push(b):a[d]=[a[d],b]:a[d]=b))});return a}function Xb(b){var a=[];q(b,function(b,d){K(b)?q(b,function(b){a.push(wa(d,!0)+(!0===b?"":"="+wa(b,!0)))}):a.push(wa(d,!0)+(!0===b?"":"="+wa(b,!0)))});return a.length?a.join("&"):""}function sb(b){return wa(b,
|
||||
!0).replace(/%26/gi,"&").replace(/%3D/gi,"=").replace(/%2B/gi,"+")}function wa(b,a){return encodeURIComponent(b).replace(/%40/gi,"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%20/g,a?"%20":"+")}function Sc(b,a){function c(a){a&&d.push(a)}var d=[b],e,g,f=["ng:app","ng-app","x-ng-app","data-ng-app"],h=/\sng[:\-]app(:\s*([\w\d_]+);?)?\s/;q(f,function(a){f[a]=!0;c(Q.getElementById(a));a=a.replace(":","\\:");b.querySelectorAll&&(q(b.querySelectorAll("."+a),c),q(b.querySelectorAll("."+
|
||||
a+"\\:"),c),q(b.querySelectorAll("["+a+"]"),c))});q(d,function(a){if(!e){var b=h.exec(" "+a.className+" ");b?(e=a,g=(b[2]||"").replace(/\s+/g,",")):q(a.attributes,function(b){!e&&f[b.name]&&(e=a,g=b.value)})}});e&&a(e,g?[g]:[])}function Yb(b,a){var c=function(){b=A(b);if(b.injector()){var c=b[0]===Q?"document":ga(b);throw La("btstrpd",c);}a=a||[];a.unshift(["$provide",function(a){a.value("$rootElement",b)}]);a.unshift("ng");c=Zb(a);c.invoke(["$rootScope","$rootElement","$compile","$injector","$animate",
|
||||
function(a,b,c,d,e){a.$apply(function(){b.data("$injector",d);c(b)(a)})}]);return c},d=/^NG_DEFER_BOOTSTRAP!/;if(Z&&!d.test(Z.name))return c();Z.name=Z.name.replace(d,"");Na.resumeBootstrap=function(b){q(b,function(b){a.push(b)});c()}}function cb(b,a){a=a||"_";return b.replace(Tc,function(b,d){return(d?a:"")+b.toLowerCase()})}function tb(b,a,c){if(!b)throw La("areq",a||"?",c||"required");return b}function Oa(b,a,c){c&&K(b)&&(b=b[b.length-1]);tb(L(b),a,"not a function, got "+(b&&"object"==typeof b?
|
||||
b.constructor.name||"Object":typeof b));return b}function xa(b,a){if("hasOwnProperty"===b)throw La("badname",a);}function ub(b,a,c){if(!a)return b;a=a.split(".");for(var d,e=b,g=a.length,f=0;f<g;f++)d=a[f],b&&(b=(e=b)[d]);return!c&&L(b)?bb(e,b):b}function vb(b){var a=b[0];b=b[b.length-1];if(a===b)return A(a);var c=[a];do{a=a.nextSibling;if(!a)break;c.push(a)}while(a!==b);return A(c)}function Uc(b){var a=F("$injector"),c=F("ng");b=b.angular||(b.angular={});b.$$minErr=b.$$minErr||F;return b.module||
|
||||
(b.module=function(){var b={};return function(e,g,f){if("hasOwnProperty"===e)throw c("badname","module");g&&b.hasOwnProperty(e)&&(b[e]=null);return b[e]||(b[e]=function(){function b(a,d,e){return function(){c[e||"push"]([a,d,arguments]);return n}}if(!g)throw a("nomod",e);var c=[],d=[],l=b("$injector","invoke"),n={_invokeQueue:c,_runBlocks:d,requires:g,name:e,provider:b("$provide","provider"),factory:b("$provide","factory"),service:b("$provide","service"),value:b("$provide","value"),constant:b("$provide",
|
||||
"constant","unshift"),animation:b("$animateProvider","register"),filter:b("$filterProvider","register"),controller:b("$controllerProvider","register"),directive:b("$compileProvider","directive"),config:l,run:function(a){d.push(a);return this}};f&&l(f);return n}())}}())}function Pa(b){return b.replace(Vc,function(a,b,d,e){return e?d.toUpperCase():d}).replace(Wc,"Moz$1")}function wb(b,a,c,d){function e(b){var e=c&&b?[this.filter(b)]:[this],m=a,k,l,n,p,s,C;if(!d||null!=b)for(;e.length;)for(k=e.shift(),
|
||||
l=0,n=k.length;l<n;l++)for(p=A(k[l]),m?p.triggerHandler("$destroy"):m=!m,s=0,p=(C=p.children()).length;s<p;s++)e.push(Ba(C[s]));return g.apply(this,arguments)}var g=Ba.fn[b],g=g.$original||g;e.$original=g;Ba.fn[b]=e}function O(b){if(b instanceof O)return b;if(!(this instanceof O)){if(D(b)&&"<"!=b.charAt(0))throw xb("nosel");return new O(b)}if(D(b)){var a=Q.createElement("div");a.innerHTML="<div> </div>"+b;a.removeChild(a.firstChild);yb(this,a.childNodes);A(Q.createDocumentFragment()).append(this)}else yb(this,
|
||||
b)}function zb(b){return b.cloneNode(!0)}function Ca(b){$b(b);var a=0;for(b=b.childNodes||[];a<b.length;a++)Ca(b[a])}function ac(b,a,c,d){if(B(d))throw xb("offargs");var e=la(b,"events");la(b,"handle")&&(z(a)?q(e,function(a,c){Ab(b,c,a);delete e[c]}):q(a.split(" "),function(a){z(c)?(Ab(b,a,e[a]),delete e[a]):Ka(e[a]||[],c)}))}function $b(b,a){var c=b[db],d=Qa[c];d&&(a?delete Qa[c].data[a]:(d.handle&&(d.events.$destroy&&d.handle({},"$destroy"),ac(b)),delete Qa[c],b[db]=r))}function la(b,a,c){var d=
|
||||
b[db],d=Qa[d||-1];if(B(c))d||(b[db]=d=++Xc,d=Qa[d]={}),d[a]=c;else return d&&d[a]}function bc(b,a,c){var d=la(b,"data"),e=B(c),g=!e&&B(a),f=g&&!X(a);d||f||la(b,"data",d={});if(e)d[a]=c;else if(g){if(f)return d&&d[a];t(d,a)}else return d}function Bb(b,a){return b.getAttribute?-1<(" "+(b.getAttribute("class")||"")+" ").replace(/[\n\t]/g," ").indexOf(" "+a+" "):!1}function Cb(b,a){a&&b.setAttribute&&q(a.split(" "),function(a){b.setAttribute("class",aa((" "+(b.getAttribute("class")||"")+" ").replace(/[\n\t]/g,
|
||||
" ").replace(" "+aa(a)+" "," ")))})}function Db(b,a){if(a&&b.setAttribute){var c=(" "+(b.getAttribute("class")||"")+" ").replace(/[\n\t]/g," ");q(a.split(" "),function(a){a=aa(a);-1===c.indexOf(" "+a+" ")&&(c+=a+" ")});b.setAttribute("class",aa(c))}}function yb(b,a){if(a){a=a.nodeName||!B(a.length)||za(a)?[a]:a;for(var c=0;c<a.length;c++)b.push(a[c])}}function cc(b,a){return eb(b,"$"+(a||"ngController")+"Controller")}function eb(b,a,c){b=A(b);9==b[0].nodeType&&(b=b.find("html"));for(a=K(a)?a:[a];b.length;){for(var d=
|
||||
0,e=a.length;d<e;d++)if((c=b.data(a[d]))!==r)return c;b=b.parent()}}function dc(b){for(var a=0,c=b.childNodes;a<c.length;a++)Ca(c[a]);for(;b.firstChild;)b.removeChild(b.firstChild)}function ec(b,a){var c=fb[a.toLowerCase()];return c&&fc[b.nodeName]&&c}function Yc(b,a){var c=function(c,e){c.preventDefault||(c.preventDefault=function(){c.returnValue=!1});c.stopPropagation||(c.stopPropagation=function(){c.cancelBubble=!0});c.target||(c.target=c.srcElement||Q);if(z(c.defaultPrevented)){var g=c.preventDefault;
|
||||
c.preventDefault=function(){c.defaultPrevented=!0;g.call(c)};c.defaultPrevented=!1}c.isDefaultPrevented=function(){return c.defaultPrevented||!1===c.returnValue};var f=Sb(a[e||c.type]||[]);q(f,function(a){a.call(b,c)});8>=M?(c.preventDefault=null,c.stopPropagation=null,c.isDefaultPrevented=null):(delete c.preventDefault,delete c.stopPropagation,delete c.isDefaultPrevented)};c.elem=b;return c}function Da(b){var a=typeof b,c;"object"==a&&null!==b?"function"==typeof(c=b.$$hashKey)?c=b.$$hashKey():c===
|
||||
r&&(c=b.$$hashKey=Ya()):c=b;return a+":"+c}function Ra(b){q(b,this.put,this)}function gc(b){var a,c;"function"==typeof b?(a=b.$inject)||(a=[],b.length&&(c=b.toString().replace(Zc,""),c=c.match($c),q(c[1].split(ad),function(b){b.replace(bd,function(b,c,d){a.push(d)})})),b.$inject=a):K(b)?(c=b.length-1,Oa(b[c],"fn"),a=b.slice(0,c)):Oa(b,"fn",!0);return a}function Zb(b){function a(a){return function(b,c){if(X(b))q(b,Pb(a));else return a(b,c)}}function c(a,b){xa(a,"service");if(L(b)||K(b))b=n.instantiate(b);
|
||||
if(!b.$get)throw Sa("pget",a);return l[a+h]=b}function d(a,b){return c(a,{$get:b})}function e(a){var b=[],c,d,g,h;q(a,function(a){if(!k.get(a)){k.put(a,!0);try{if(D(a))for(c=Ta(a),b=b.concat(e(c.requires)).concat(c._runBlocks),d=c._invokeQueue,g=0,h=d.length;g<h;g++){var f=d[g],m=n.get(f[0]);m[f[1]].apply(m,f[2])}else L(a)?b.push(n.invoke(a)):K(a)?b.push(n.invoke(a)):Oa(a,"module")}catch(l){throw K(a)&&(a=a[a.length-1]),l.message&&(l.stack&&-1==l.stack.indexOf(l.message))&&(l=l.message+"\n"+l.stack),
|
||||
Sa("modulerr",a,l.stack||l.message||l);}}});return b}function g(a,b){function c(d){if(a.hasOwnProperty(d)){if(a[d]===f)throw Sa("cdep",m.join(" <- "));return a[d]}try{return m.unshift(d),a[d]=f,a[d]=b(d)}catch(e){throw a[d]===f&&delete a[d],e;}finally{m.shift()}}function d(a,b,e){var g=[],h=gc(a),f,k,m;k=0;for(f=h.length;k<f;k++){m=h[k];if("string"!==typeof m)throw Sa("itkn",m);g.push(e&&e.hasOwnProperty(m)?e[m]:c(m))}a.$inject||(a=a[f]);return a.apply(b,g)}return{invoke:d,instantiate:function(a,
|
||||
b){var c=function(){},e;c.prototype=(K(a)?a[a.length-1]:a).prototype;c=new c;e=d(a,c,b);return X(e)||L(e)?e:c},get:c,annotate:gc,has:function(b){return l.hasOwnProperty(b+h)||a.hasOwnProperty(b)}}}var f={},h="Provider",m=[],k=new Ra,l={$provide:{provider:a(c),factory:a(d),service:a(function(a,b){return d(a,["$injector",function(a){return a.instantiate(b)}])}),value:a(function(a,b){return d(a,$(b))}),constant:a(function(a,b){xa(a,"constant");l[a]=b;p[a]=b}),decorator:function(a,b){var c=n.get(a+h),
|
||||
d=c.$get;c.$get=function(){var a=s.invoke(d,c);return s.invoke(b,null,{$delegate:a})}}}},n=l.$injector=g(l,function(){throw Sa("unpr",m.join(" <- "));}),p={},s=p.$injector=g(p,function(a){a=n.get(a+h);return s.invoke(a.$get,a)});q(e(b),function(a){s.invoke(a||w)});return s}function cd(){var b=!0;this.disableAutoScrolling=function(){b=!1};this.$get=["$window","$location","$rootScope",function(a,c,d){function e(a){var b=null;q(a,function(a){b||"a"!==x(a.nodeName)||(b=a)});return b}function g(){var b=
|
||||
c.hash(),d;b?(d=f.getElementById(b))?d.scrollIntoView():(d=e(f.getElementsByName(b)))?d.scrollIntoView():"top"===b&&a.scrollTo(0,0):a.scrollTo(0,0)}var f=a.document;b&&d.$watch(function(){return c.hash()},function(){d.$evalAsync(g)});return g}]}function dd(b,a,c,d){function e(a){try{a.apply(null,va.call(arguments,1))}finally{if(C--,0===C)for(;y.length;)try{y.pop()()}catch(b){c.error(b)}}}function g(a,b){(function T(){q(E,function(a){a()});u=b(T,a)})()}function f(){v=null;R!=h.url()&&(R=h.url(),q(ha,
|
||||
function(a){a(h.url())}))}var h=this,m=a[0],k=b.location,l=b.history,n=b.setTimeout,p=b.clearTimeout,s={};h.isMock=!1;var C=0,y=[];h.$$completeOutstandingRequest=e;h.$$incOutstandingRequestCount=function(){C++};h.notifyWhenNoOutstandingRequests=function(a){q(E,function(a){a()});0===C?a():y.push(a)};var E=[],u;h.addPollFn=function(a){z(u)&&g(100,n);E.push(a);return a};var R=k.href,H=a.find("base"),v=null;h.url=function(a,c){k!==b.location&&(k=b.location);l!==b.history&&(l=b.history);if(a){if(R!=a)return R=
|
||||
a,d.history?c?l.replaceState(null,"",a):(l.pushState(null,"",a),H.attr("href",H.attr("href"))):(v=a,c?k.replace(a):k.href=a),h}else return v||k.href.replace(/%27/g,"'")};var ha=[],N=!1;h.onUrlChange=function(a){if(!N){if(d.history)A(b).on("popstate",f);if(d.hashchange)A(b).on("hashchange",f);else h.addPollFn(f);N=!0}ha.push(a);return a};h.baseHref=function(){var a=H.attr("href");return a?a.replace(/^(https?\:)?\/\/[^\/]*/,""):""};var V={},J="",ba=h.baseHref();h.cookies=function(a,b){var d,e,g,h;if(a)b===
|
||||
r?m.cookie=escape(a)+"=;path="+ba+";expires=Thu, 01 Jan 1970 00:00:00 GMT":D(b)&&(d=(m.cookie=escape(a)+"="+escape(b)+";path="+ba).length+1,4096<d&&c.warn("Cookie '"+a+"' possibly not set or overflowed because it was too large ("+d+" > 4096 bytes)!"));else{if(m.cookie!==J)for(J=m.cookie,d=J.split("; "),V={},g=0;g<d.length;g++)e=d[g],h=e.indexOf("="),0<h&&(a=unescape(e.substring(0,h)),V[a]===r&&(V[a]=unescape(e.substring(h+1))));return V}};h.defer=function(a,b){var c;C++;c=n(function(){delete s[c];
|
||||
e(a)},b||0);s[c]=!0;return c};h.defer.cancel=function(a){return s[a]?(delete s[a],p(a),e(w),!0):!1}}function ed(){this.$get=["$window","$log","$sniffer","$document",function(b,a,c,d){return new dd(b,d,a,c)}]}function fd(){this.$get=function(){function b(b,d){function e(a){a!=n&&(p?p==a&&(p=a.n):p=a,g(a.n,a.p),g(a,n),n=a,n.n=null)}function g(a,b){a!=b&&(a&&(a.p=b),b&&(b.n=a))}if(b in a)throw F("$cacheFactory")("iid",b);var f=0,h=t({},d,{id:b}),m={},k=d&&d.capacity||Number.MAX_VALUE,l={},n=null,p=null;
|
||||
return a[b]={put:function(a,b){var c=l[a]||(l[a]={key:a});e(c);if(!z(b))return a in m||f++,m[a]=b,f>k&&this.remove(p.key),b},get:function(a){var b=l[a];if(b)return e(b),m[a]},remove:function(a){var b=l[a];b&&(b==n&&(n=b.p),b==p&&(p=b.n),g(b.n,b.p),delete l[a],delete m[a],f--)},removeAll:function(){m={};f=0;l={};n=p=null},destroy:function(){l=h=m=null;delete a[b]},info:function(){return t({},h,{size:f})}}}var a={};b.info=function(){var b={};q(a,function(a,e){b[e]=a.info()});return b};b.get=function(b){return a[b]};
|
||||
return b}}function gd(){this.$get=["$cacheFactory",function(b){return b("templates")}]}function ic(b,a){var c={},d="Directive",e=/^\s*directive\:\s*([\d\w\-_]+)\s+(.*)$/,g=/(([\d\w\-_]+)(?:\:([^;]+))?;?)/,f=/^(on[a-z]+|formaction)$/;this.directive=function m(a,e){xa(a,"directive");D(a)?(tb(e,"directiveFactory"),c.hasOwnProperty(a)||(c[a]=[],b.factory(a+d,["$injector","$exceptionHandler",function(b,d){var e=[];q(c[a],function(c,g){try{var f=b.invoke(c);L(f)?f={compile:$(f)}:!f.compile&&f.link&&(f.compile=
|
||||
$(f.link));f.priority=f.priority||0;f.index=g;f.name=f.name||a;f.require=f.require||f.controller&&f.name;f.restrict=f.restrict||"A";e.push(f)}catch(m){d(m)}});return e}])),c[a].push(e)):q(a,Pb(m));return this};this.aHrefSanitizationWhitelist=function(b){return B(b)?(a.aHrefSanitizationWhitelist(b),this):a.aHrefSanitizationWhitelist()};this.imgSrcSanitizationWhitelist=function(b){return B(b)?(a.imgSrcSanitizationWhitelist(b),this):a.imgSrcSanitizationWhitelist()};this.$get=["$injector","$interpolate",
|
||||
"$exceptionHandler","$http","$templateCache","$parse","$controller","$rootScope","$document","$sce","$animate","$$sanitizeUri",function(a,b,l,n,p,s,C,y,E,u,R,H){function v(a,b,c,d,e){a instanceof A||(a=A(a));q(a,function(b,c){3==b.nodeType&&b.nodeValue.match(/\S+/)&&(a[c]=A(b).wrap("<span></span>").parent()[0])});var g=N(a,b,a,c,d,e);ha(a,"ng-scope");return function(b,c,d){tb(b,"scope");var e=c?Ea.clone.call(a):a;q(d,function(a,b){e.data("$"+b+"Controller",a)});d=0;for(var f=e.length;d<f;d++){var k=
|
||||
e[d].nodeType;1!==k&&9!==k||e.eq(d).data("$scope",b)}c&&c(e,b);g&&g(b,e,e);return e}}function ha(a,b){try{a.addClass(b)}catch(c){}}function N(a,b,c,d,e,g){function f(a,c,d,e){var g,m,l,s,n,p,I;g=c.length;var C=Array(g);for(n=0;n<g;n++)C[n]=c[n];I=n=0;for(p=k.length;n<p;I++)m=C[I],c=k[n++],g=k[n++],l=A(m),c?(c.scope?(s=a.$new(),l.data("$scope",s)):s=a,(l=c.transclude)||!e&&b?c(g,s,m,d,V(a,l||b)):c(g,s,m,d,e)):g&&g(a,m.childNodes,r,e)}for(var k=[],m,l,s,n,p=0;p<a.length;p++)m=new Eb,l=J(a[p],[],m,0===
|
||||
p?d:r,e),(g=l.length?ia(l,a[p],m,b,c,null,[],[],g):null)&&g.scope&&ha(A(a[p]),"ng-scope"),m=g&&g.terminal||!(s=a[p].childNodes)||!s.length?null:N(s,g?g.transclude:b),k.push(g,m),n=n||g||m,g=null;return n?f:null}function V(a,b){return function(c,d,e){var g=!1;c||(c=a.$new(),g=c.$$transcluded=!0);d=b(c,d,e);if(g)d.on("$destroy",bb(c,c.$destroy));return d}}function J(a,b,c,d,f){var k=c.$attr,m;switch(a.nodeType){case 1:T(b,ma(Fa(a).toLowerCase()),"E",d,f);var l,s,n;m=a.attributes;for(var p=0,C=m&&m.length;p<
|
||||
C;p++){var y=!1,R=!1;l=m[p];if(!M||8<=M||l.specified){s=l.name;n=ma(s);W.test(n)&&(s=cb(n.substr(6),"-"));var v=n.replace(/(Start|End)$/,"");n===v+"Start"&&(y=s,R=s.substr(0,s.length-5)+"end",s=s.substr(0,s.length-6));n=ma(s.toLowerCase());k[n]=s;c[n]=l=aa(l.value);ec(a,n)&&(c[n]=!0);S(a,b,l,n);T(b,n,"A",d,f,y,R)}}a=a.className;if(D(a)&&""!==a)for(;m=g.exec(a);)n=ma(m[2]),T(b,n,"C",d,f)&&(c[n]=aa(m[3])),a=a.substr(m.index+m[0].length);break;case 3:F(b,a.nodeValue);break;case 8:try{if(m=e.exec(a.nodeValue))n=
|
||||
ma(m[1]),T(b,n,"M",d,f)&&(c[n]=aa(m[2]))}catch(E){}}b.sort(z);return b}function ba(a,b,c){var d=[],e=0;if(b&&a.hasAttribute&&a.hasAttribute(b)){do{if(!a)throw ja("uterdir",b,c);1==a.nodeType&&(a.hasAttribute(b)&&e++,a.hasAttribute(c)&&e--);d.push(a);a=a.nextSibling}while(0<e)}else d.push(a);return A(d)}function P(a,b,c){return function(d,e,g,f,m){e=ba(e[0],b,c);return a(d,e,g,f,m)}}function ia(a,c,d,e,g,f,m,n,p){function y(a,b,c,d){if(a){c&&(a=P(a,c,d));a.require=G.require;if(H===G||G.$$isolateScope)a=
|
||||
jc(a,{isolateScope:!0});m.push(a)}if(b){c&&(b=P(b,c,d));b.require=G.require;if(H===G||G.$$isolateScope)b=jc(b,{isolateScope:!0});n.push(b)}}function R(a,b,c){var d,e="data",g=!1;if(D(a)){for(;"^"==(d=a.charAt(0))||"?"==d;)a=a.substr(1),"^"==d&&(e="inheritedData"),g=g||"?"==d;d=null;c&&"data"===e&&(d=c[a]);d=d||b[e]("$"+a+"Controller");if(!d&&!g)throw ja("ctreq",a,ca);}else K(a)&&(d=[],q(a,function(a){d.push(R(a,b,c))}));return d}function E(a,e,g,f,p){function y(a,b){var c;2>arguments.length&&(b=a,
|
||||
a=r);z&&(c=ba);return p(a,b,c)}var I,v,N,u,P,J,ba={},gb;I=c===g?d:Sb(d,new Eb(A(g),d.$attr));v=I.$$element;if(H){var T=/^\s*([@=&])(\??)\s*(\w*)\s*$/;f=A(g);J=e.$new(!0);ia&&ia===H.$$originalDirective?f.data("$isolateScope",J):f.data("$isolateScopeNoTemplate",J);ha(f,"ng-isolate-scope");q(H.scope,function(a,c){var d=a.match(T)||[],g=d[3]||c,f="?"==d[2],d=d[1],m,l,n,p;J.$$isolateBindings[c]=d+g;switch(d){case "@":I.$observe(g,function(a){J[c]=a});I.$$observers[g].$$scope=e;I[g]&&(J[c]=b(I[g])(e));
|
||||
break;case "=":if(f&&!I[g])break;l=s(I[g]);p=l.literal?ua:function(a,b){return a===b};n=l.assign||function(){m=J[c]=l(e);throw ja("nonassign",I[g],H.name);};m=J[c]=l(e);J.$watch(function(){var a=l(e);p(a,J[c])||(p(a,m)?n(e,a=J[c]):J[c]=a);return m=a},null,l.literal);break;case "&":l=s(I[g]);J[c]=function(a){return l(e,a)};break;default:throw ja("iscp",H.name,c,a);}})}gb=p&&y;V&&q(V,function(a){var b={$scope:a===H||a.$$isolateScope?J:e,$element:v,$attrs:I,$transclude:gb},c;P=a.controller;"@"==P&&(P=
|
||||
I[a.name]);c=C(P,b);ba[a.name]=c;z||v.data("$"+a.name+"Controller",c);a.controllerAs&&(b.$scope[a.controllerAs]=c)});f=0;for(N=m.length;f<N;f++)try{u=m[f],u(u.isolateScope?J:e,v,I,u.require&&R(u.require,v,ba),gb)}catch(G){l(G,ga(v))}f=e;H&&(H.template||null===H.templateUrl)&&(f=J);a&&a(f,g.childNodes,r,p);for(f=n.length-1;0<=f;f--)try{u=n[f],u(u.isolateScope?J:e,v,I,u.require&&R(u.require,v,ba),gb)}catch(B){l(B,ga(v))}}p=p||{};var N=-Number.MAX_VALUE,u,V=p.controllerDirectives,H=p.newIsolateScopeDirective,
|
||||
ia=p.templateDirective;p=p.nonTlbTranscludeDirective;for(var T=!1,z=!1,t=d.$$element=A(c),G,ca,U,F=e,O,M=0,na=a.length;M<na;M++){G=a[M];var Ua=G.$$start,S=G.$$end;Ua&&(t=ba(c,Ua,S));U=r;if(N>G.priority)break;if(U=G.scope)u=u||G,G.templateUrl||(x("new/isolated scope",H,G,t),X(U)&&(H=G));ca=G.name;!G.templateUrl&&G.controller&&(U=G.controller,V=V||{},x("'"+ca+"' controller",V[ca],G,t),V[ca]=G);if(U=G.transclude)T=!0,G.$$tlb||(x("transclusion",p,G,t),p=G),"element"==U?(z=!0,N=G.priority,U=ba(c,Ua,S),
|
||||
t=d.$$element=A(Q.createComment(" "+ca+": "+d[ca]+" ")),c=t[0],hb(g,A(va.call(U,0)),c),F=v(U,e,N,f&&f.name,{nonTlbTranscludeDirective:p})):(U=A(zb(c)).contents(),t.empty(),F=v(U,e));if(G.template)if(x("template",ia,G,t),ia=G,U=L(G.template)?G.template(t,d):G.template,U=Y(U),G.replace){f=G;U=A("<div>"+aa(U)+"</div>").contents();c=U[0];if(1!=U.length||1!==c.nodeType)throw ja("tplrt",ca,"");hb(g,t,c);na={$attr:{}};U=J(c,[],na);var W=a.splice(M+1,a.length-(M+1));H&&hc(U);a=a.concat(U).concat(W);B(d,na);
|
||||
na=a.length}else t.html(U);if(G.templateUrl)x("template",ia,G,t),ia=G,G.replace&&(f=G),E=w(a.splice(M,a.length-M),t,d,g,F,m,n,{controllerDirectives:V,newIsolateScopeDirective:H,templateDirective:ia,nonTlbTranscludeDirective:p}),na=a.length;else if(G.compile)try{O=G.compile(t,d,F),L(O)?y(null,O,Ua,S):O&&y(O.pre,O.post,Ua,S)}catch(Z){l(Z,ga(t))}G.terminal&&(E.terminal=!0,N=Math.max(N,G.priority))}E.scope=u&&!0===u.scope;E.transclude=T&&F;return E}function hc(a){for(var b=0,c=a.length;b<c;b++)a[b]=Rb(a[b],
|
||||
{$$isolateScope:!0})}function T(b,e,g,f,k,s,n){if(e===k)return null;k=null;if(c.hasOwnProperty(e)){var p;e=a.get(e+d);for(var C=0,y=e.length;C<y;C++)try{p=e[C],(f===r||f>p.priority)&&-1!=p.restrict.indexOf(g)&&(s&&(p=Rb(p,{$$start:s,$$end:n})),b.push(p),k=p)}catch(v){l(v)}}return k}function B(a,b){var c=b.$attr,d=a.$attr,e=a.$$element;q(a,function(d,e){"$"!=e.charAt(0)&&(b[e]&&(d+=("style"===e?";":" ")+b[e]),a.$set(e,d,!0,c[e]))});q(b,function(b,g){"class"==g?(ha(e,b),a["class"]=(a["class"]?a["class"]+
|
||||
" ":"")+b):"style"==g?(e.attr("style",e.attr("style")+";"+b),a.style=(a.style?a.style+";":"")+b):"$"==g.charAt(0)||a.hasOwnProperty(g)||(a[g]=b,d[g]=c[g])})}function w(a,b,c,d,e,g,f,m){var k=[],l,s,C=b[0],y=a.shift(),v=t({},y,{templateUrl:null,transclude:null,replace:null,$$originalDirective:y}),R=L(y.templateUrl)?y.templateUrl(b,c):y.templateUrl;b.empty();n.get(u.getTrustedResourceUrl(R),{cache:p}).success(function(n){var p,E;n=Y(n);if(y.replace){n=A("<div>"+aa(n)+"</div>").contents();p=n[0];if(1!=
|
||||
n.length||1!==p.nodeType)throw ja("tplrt",y.name,R);n={$attr:{}};hb(d,b,p);var u=J(p,[],n);X(y.scope)&&hc(u);a=u.concat(a);B(c,n)}else p=C,b.html(n);a.unshift(v);l=ia(a,p,c,e,b,y,g,f,m);q(d,function(a,c){a==p&&(d[c]=b[0])});for(s=N(b[0].childNodes,e);k.length;){n=k.shift();E=k.shift();var H=k.shift(),ha=k.shift(),u=b[0];E!==C&&(u=zb(p),hb(H,A(E),u));E=l.transclude?V(n,l.transclude):ha;l(s,n,u,d,E)}k=null}).error(function(a,b,c,d){throw ja("tpload",d.url);});return function(a,b,c,d,e){k?(k.push(b),
|
||||
k.push(c),k.push(d),k.push(e)):l(s,b,c,d,e)}}function z(a,b){var c=b.priority-a.priority;return 0!==c?c:a.name!==b.name?a.name<b.name?-1:1:a.index-b.index}function x(a,b,c,d){if(b)throw ja("multidir",b.name,c.name,a,ga(d));}function F(a,c){var d=b(c,!0);d&&a.push({priority:0,compile:$(function(a,b){var c=b.parent(),e=c.data("$binding")||[];e.push(d);ha(c.data("$binding",e),"ng-binding");a.$watch(d,function(a){b[0].nodeValue=a})})})}function O(a,b){if("srcdoc"==b)return u.HTML;var c=Fa(a);if("xlinkHref"==
|
||||
b||"FORM"==c&&"action"==b||"IMG"!=c&&("src"==b||"ngSrc"==b))return u.RESOURCE_URL}function S(a,c,d,e){var g=b(d,!0);if(g){if("multiple"===e&&"SELECT"===Fa(a))throw ja("selmulti",ga(a));c.push({priority:100,compile:function(){return{pre:function(c,d,m){d=m.$$observers||(m.$$observers={});if(f.test(e))throw ja("nodomevents");if(g=b(m[e],!0,O(a,e)))m[e]=g(c),(d[e]||(d[e]=[])).$$inter=!0,(m.$$observers&&m.$$observers[e].$$scope||c).$watch(g,function(a,b){"class"===e&&a!=b?m.$updateClass(a,b):m.$set(e,
|
||||
a)})}}}})}}function hb(a,b,c){var d=b[0],e=b.length,g=d.parentNode,f,m;if(a)for(f=0,m=a.length;f<m;f++)if(a[f]==d){a[f++]=c;m=f+e-1;for(var k=a.length;f<k;f++,m++)m<k?a[f]=a[m]:delete a[f];a.length-=e-1;break}g&&g.replaceChild(c,d);a=Q.createDocumentFragment();a.appendChild(d);c[A.expando]=d[A.expando];d=1;for(e=b.length;d<e;d++)g=b[d],A(g).remove(),a.appendChild(g),delete b[d];b[0]=c;b.length=1}function jc(a,b){return t(function(){return a.apply(null,arguments)},a,b)}var Eb=function(a,b){this.$$element=
|
||||
a;this.$attr=b||{}};Eb.prototype={$normalize:ma,$addClass:function(a){a&&0<a.length&&R.addClass(this.$$element,a)},$removeClass:function(a){a&&0<a.length&&R.removeClass(this.$$element,a)},$updateClass:function(a,b){this.$removeClass(kc(b,a));this.$addClass(kc(a,b))},$set:function(a,b,c,d){var e=ec(this.$$element[0],a);e&&(this.$$element.prop(a,b),d=e);this[a]=b;d?this.$attr[a]=d:(d=this.$attr[a])||(this.$attr[a]=d=cb(a,"-"));e=Fa(this.$$element);if("A"===e&&"href"===a||"IMG"===e&&"src"===a)this[a]=
|
||||
b=H(b,"src"===a);!1!==c&&(null===b||b===r?this.$$element.removeAttr(d):this.$$element.attr(d,b));(c=this.$$observers)&&q(c[a],function(a){try{a(b)}catch(c){l(c)}})},$observe:function(a,b){var c=this,d=c.$$observers||(c.$$observers={}),e=d[a]||(d[a]=[]);e.push(b);y.$evalAsync(function(){e.$$inter||b(c[a])});return b}};var ca=b.startSymbol(),na=b.endSymbol(),Y="{{"==ca||"}}"==na?Aa:function(a){return a.replace(/\{\{/g,ca).replace(/}}/g,na)},W=/^ngAttr[A-Z]/;return v}]}function ma(b){return Pa(b.replace(hd,
|
||||
""))}function kc(b,a){var c="",d=b.split(/\s+/),e=a.split(/\s+/),g=0;a:for(;g<d.length;g++){for(var f=d[g],h=0;h<e.length;h++)if(f==e[h])continue a;c+=(0<c.length?" ":"")+f}return c}function id(){var b={},a=/^(\S+)(\s+as\s+(\w+))?$/;this.register=function(a,d){xa(a,"controller");X(a)?t(b,a):b[a]=d};this.$get=["$injector","$window",function(c,d){return function(e,g){var f,h,m;D(e)&&(f=e.match(a),h=f[1],m=f[3],e=b.hasOwnProperty(h)?b[h]:ub(g.$scope,h,!0)||ub(d,h,!0),Oa(e,h,!0));f=c.instantiate(e,g);
|
||||
if(m){if(!g||"object"!=typeof g.$scope)throw F("$controller")("noscp",h||e.name,m);g.$scope[m]=f}return f}}]}function jd(){this.$get=["$window",function(b){return A(b.document)}]}function kd(){this.$get=["$log",function(b){return function(a,c){b.error.apply(b,arguments)}}]}function lc(b){var a={},c,d,e;if(!b)return a;q(b.split("\n"),function(b){e=b.indexOf(":");c=x(aa(b.substr(0,e)));d=aa(b.substr(e+1));c&&(a[c]=a[c]?a[c]+(", "+d):d)});return a}function mc(b){var a=X(b)?b:r;return function(c){a||
|
||||
(a=lc(b));return c?a[x(c)]||null:a}}function nc(b,a,c){if(L(c))return c(b,a);q(c,function(c){b=c(b,a)});return b}function ld(){var b=/^\s*(\[|\{[^\{])/,a=/[\}\]]\s*$/,c=/^\)\]\}',?\n/,d={"Content-Type":"application/json;charset=utf-8"},e=this.defaults={transformResponse:[function(d){D(d)&&(d=d.replace(c,""),b.test(d)&&a.test(d)&&(d=Ub(d)));return d}],transformRequest:[function(a){return X(a)&&"[object File]"!==Za.call(a)?pa(a):a}],headers:{common:{Accept:"application/json, text/plain, */*"},post:d,
|
||||
put:d,patch:d},xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN"},g=this.interceptors=[],f=this.responseInterceptors=[];this.$get=["$httpBackend","$browser","$cacheFactory","$rootScope","$q","$injector",function(a,b,c,d,n,p){function s(a){function c(a){var b=t({},a,{data:nc(a.data,a.headers,d.transformResponse)});return 200<=a.status&&300>a.status?b:n.reject(b)}var d={transformRequest:e.transformRequest,transformResponse:e.transformResponse},g=function(a){function b(a){var c;q(a,function(b,
|
||||
d){L(b)&&(c=b(),null!=c?a[d]=c:delete a[d])})}var c=e.headers,d=t({},a.headers),g,f,c=t({},c.common,c[x(a.method)]);b(c);b(d);a:for(g in c){a=x(g);for(f in d)if(x(f)===a)continue a;d[g]=c[g]}return d}(a);t(d,a);d.headers=g;d.method=Ga(d.method);(a=Fb(d.url)?b.cookies()[d.xsrfCookieName||e.xsrfCookieName]:r)&&(g[d.xsrfHeaderName||e.xsrfHeaderName]=a);var f=[function(a){g=a.headers;var b=nc(a.data,mc(g),a.transformRequest);z(a.data)&&q(g,function(a,b){"content-type"===x(b)&&delete g[b]});z(a.withCredentials)&&
|
||||
!z(e.withCredentials)&&(a.withCredentials=e.withCredentials);return C(a,b,g).then(c,c)},r],h=n.when(d);for(q(u,function(a){(a.request||a.requestError)&&f.unshift(a.request,a.requestError);(a.response||a.responseError)&&f.push(a.response,a.responseError)});f.length;){a=f.shift();var k=f.shift(),h=h.then(a,k)}h.success=function(a){h.then(function(b){a(b.data,b.status,b.headers,d)});return h};h.error=function(a){h.then(null,function(b){a(b.data,b.status,b.headers,d)});return h};return h}function C(b,
|
||||
c,g){function f(a,b,c){u&&(200<=a&&300>a?u.put(r,[a,b,lc(c)]):u.remove(r));m(b,a,c);d.$$phase||d.$apply()}function m(a,c,d){c=Math.max(c,0);(200<=c&&300>c?p.resolve:p.reject)({data:a,status:c,headers:mc(d),config:b})}function k(){var a=ab(s.pendingRequests,b);-1!==a&&s.pendingRequests.splice(a,1)}var p=n.defer(),C=p.promise,u,q,r=y(b.url,b.params);s.pendingRequests.push(b);C.then(k,k);(b.cache||e.cache)&&(!1!==b.cache&&"GET"==b.method)&&(u=X(b.cache)?b.cache:X(e.cache)?e.cache:E);if(u)if(q=u.get(r),
|
||||
B(q)){if(q.then)return q.then(k,k),q;K(q)?m(q[1],q[0],fa(q[2])):m(q,200,{})}else u.put(r,C);z(q)&&a(b.method,r,c,f,g,b.timeout,b.withCredentials,b.responseType);return C}function y(a,b){if(!b)return a;var c=[];Oc(b,function(a,b){null===a||z(a)||(K(a)||(a=[a]),q(a,function(a){X(a)&&(a=pa(a));c.push(wa(b)+"="+wa(a))}))});return a+(-1==a.indexOf("?")?"?":"&")+c.join("&")}var E=c("$http"),u=[];q(g,function(a){u.unshift(D(a)?p.get(a):p.invoke(a))});q(f,function(a,b){var c=D(a)?p.get(a):p.invoke(a);u.splice(b,
|
||||
0,{response:function(a){return c(n.when(a))},responseError:function(a){return c(n.reject(a))}})});s.pendingRequests=[];(function(a){q(arguments,function(a){s[a]=function(b,c){return s(t(c||{},{method:a,url:b}))}})})("get","delete","head","jsonp");(function(a){q(arguments,function(a){s[a]=function(b,c,d){return s(t(d||{},{method:a,url:b,data:c}))}})})("post","put");s.defaults=e;return s}]}function md(b){return 8>=M&&"patch"===x(b)?new ActiveXObject("Microsoft.XMLHTTP"):new Z.XMLHttpRequest}function nd(){this.$get=
|
||||
["$browser","$window","$document",function(b,a,c){return od(b,md,b.defer,a.angular.callbacks,c[0])}]}function od(b,a,c,d,e){function g(a,b){var c=e.createElement("script"),d=function(){c.onreadystatechange=c.onload=c.onerror=null;e.body.removeChild(c);b&&b()};c.type="text/javascript";c.src=a;M&&8>=M?c.onreadystatechange=function(){/loaded|complete/.test(c.readyState)&&d()}:c.onload=c.onerror=function(){d()};e.body.appendChild(c);return d}var f=-1;return function(e,m,k,l,n,p,s,C){function y(){u=f;
|
||||
H&&H();v&&v.abort()}function E(a,d,e,g){var f=qa(m).protocol;r&&c.cancel(r);H=v=null;d="file"==f&&0===d?e?200:404:d;a(1223==d?204:d,e,g);b.$$completeOutstandingRequest(w)}var u;b.$$incOutstandingRequestCount();m=m||b.url();if("jsonp"==x(e)){var R="_"+(d.counter++).toString(36);d[R]=function(a){d[R].data=a};var H=g(m.replace("JSON_CALLBACK","angular.callbacks."+R),function(){d[R].data?E(l,200,d[R].data):E(l,u||-2);delete d[R]})}else{var v=a(e);v.open(e,m,!0);q(n,function(a,b){B(a)&&v.setRequestHeader(b,
|
||||
a)});v.onreadystatechange=function(){if(v&&4==v.readyState){var a=null,b=null;u!==f&&(a=v.getAllResponseHeaders(),b=v.responseType?v.response:v.responseText);E(l,u||v.status,b,a)}};s&&(v.withCredentials=!0);C&&(v.responseType=C);v.send(k||null)}if(0<p)var r=c(y,p);else p&&p.then&&p.then(y)}}function pd(){var b="{{",a="}}";this.startSymbol=function(a){return a?(b=a,this):b};this.endSymbol=function(b){return b?(a=b,this):a};this.$get=["$parse","$exceptionHandler","$sce",function(c,d,e){function g(g,
|
||||
k,l){for(var n,p,s=0,C=[],y=g.length,E=!1,u=[];s<y;)-1!=(n=g.indexOf(b,s))&&-1!=(p=g.indexOf(a,n+f))?(s!=n&&C.push(g.substring(s,n)),C.push(s=c(E=g.substring(n+f,p))),s.exp=E,s=p+h,E=!0):(s!=y&&C.push(g.substring(s)),s=y);(y=C.length)||(C.push(""),y=1);if(l&&1<C.length)throw oc("noconcat",g);if(!k||E)return u.length=y,s=function(a){try{for(var b=0,c=y,f;b<c;b++)"function"==typeof(f=C[b])&&(f=f(a),f=l?e.getTrusted(l,f):e.valueOf(f),null===f||z(f)?f="":"string"!=typeof f&&(f=pa(f))),u[b]=f;return u.join("")}catch(h){a=
|
||||
oc("interr",g,h.toString()),d(a)}},s.exp=g,s.parts=C,s}var f=b.length,h=a.length;g.startSymbol=function(){return b};g.endSymbol=function(){return a};return g}]}function qd(){this.$get=["$rootScope","$window","$q",function(b,a,c){function d(d,f,h,m){var k=a.setInterval,l=a.clearInterval,n=c.defer(),p=n.promise,s=0,C=B(m)&&!m;h=B(h)?h:0;p.then(null,null,d);p.$$intervalId=k(function(){n.notify(s++);0<h&&s>=h&&(n.resolve(s),l(p.$$intervalId),delete e[p.$$intervalId]);C||b.$apply()},f);e[p.$$intervalId]=
|
||||
n;return p}var e={};d.cancel=function(a){return a&&a.$$intervalId in e?(e[a.$$intervalId].reject("canceled"),clearInterval(a.$$intervalId),delete e[a.$$intervalId],!0):!1};return d}]}function rd(){this.$get=function(){return{id:"en-us",NUMBER_FORMATS:{DECIMAL_SEP:".",GROUP_SEP:",",PATTERNS:[{minInt:1,minFrac:0,maxFrac:3,posPre:"",posSuf:"",negPre:"-",negSuf:"",gSize:3,lgSize:3},{minInt:1,minFrac:2,maxFrac:2,posPre:"\u00a4",posSuf:"",negPre:"(\u00a4",negSuf:")",gSize:3,lgSize:3}],CURRENCY_SYM:"$"},
|
||||
DATETIME_FORMATS:{MONTH:"January February March April May June July August September October November December".split(" "),SHORTMONTH:"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),DAY:"Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),SHORTDAY:"Sun Mon Tue Wed Thu Fri Sat".split(" "),AMPMS:["AM","PM"],medium:"MMM d, y h:mm:ss a","short":"M/d/yy h:mm a",fullDate:"EEEE, MMMM d, y",longDate:"MMMM d, y",mediumDate:"MMM d, y",shortDate:"M/d/yy",mediumTime:"h:mm:ss a",
|
||||
shortTime:"h:mm a"},pluralCat:function(b){return 1===b?"one":"other"}}}}function pc(b){b=b.split("/");for(var a=b.length;a--;)b[a]=sb(b[a]);return b.join("/")}function qc(b,a,c){b=qa(b,c);a.$$protocol=b.protocol;a.$$host=b.hostname;a.$$port=S(b.port)||sd[b.protocol]||null}function rc(b,a,c){var d="/"!==b.charAt(0);d&&(b="/"+b);b=qa(b,c);a.$$path=decodeURIComponent(d&&"/"===b.pathname.charAt(0)?b.pathname.substring(1):b.pathname);a.$$search=Wb(b.search);a.$$hash=decodeURIComponent(b.hash);a.$$path&&
|
||||
"/"!=a.$$path.charAt(0)&&(a.$$path="/"+a.$$path)}function oa(b,a){if(0===a.indexOf(b))return a.substr(b.length)}function Va(b){var a=b.indexOf("#");return-1==a?b:b.substr(0,a)}function Gb(b){return b.substr(0,Va(b).lastIndexOf("/")+1)}function sc(b,a){this.$$html5=!0;a=a||"";var c=Gb(b);qc(b,this,b);this.$$parse=function(a){var e=oa(c,a);if(!D(e))throw Hb("ipthprfx",a,c);rc(e,this,b);this.$$path||(this.$$path="/");this.$$compose()};this.$$compose=function(){var a=Xb(this.$$search),b=this.$$hash?"#"+
|
||||
sb(this.$$hash):"";this.$$url=pc(this.$$path)+(a?"?"+a:"")+b;this.$$absUrl=c+this.$$url.substr(1)};this.$$rewrite=function(d){var e;if((e=oa(b,d))!==r)return d=e,(e=oa(a,e))!==r?c+(oa("/",e)||e):b+d;if((e=oa(c,d))!==r)return c+e;if(c==d+"/")return c}}function Ib(b,a){var c=Gb(b);qc(b,this,b);this.$$parse=function(d){var e=oa(b,d)||oa(c,d),e="#"==e.charAt(0)?oa(a,e):this.$$html5?e:"";if(!D(e))throw Hb("ihshprfx",d,a);rc(e,this,b);d=this.$$path;var g=/^\/?.*?:(\/.*)/;0===e.indexOf(b)&&(e=e.replace(b,
|
||||
""));g.exec(e)||(d=(e=g.exec(d))?e[1]:d);this.$$path=d;this.$$compose()};this.$$compose=function(){var c=Xb(this.$$search),e=this.$$hash?"#"+sb(this.$$hash):"";this.$$url=pc(this.$$path)+(c?"?"+c:"")+e;this.$$absUrl=b+(this.$$url?a+this.$$url:"")};this.$$rewrite=function(a){if(Va(b)==Va(a))return a}}function tc(b,a){this.$$html5=!0;Ib.apply(this,arguments);var c=Gb(b);this.$$rewrite=function(d){var e;if(b==Va(d))return d;if(e=oa(c,d))return b+a+e;if(c===d+"/")return c}}function ib(b){return function(){return this[b]}}
|
||||
function uc(b,a){return function(c){if(z(c))return this[b];this[b]=a(c);this.$$compose();return this}}function td(){var b="",a=!1;this.hashPrefix=function(a){return B(a)?(b=a,this):b};this.html5Mode=function(b){return B(b)?(a=b,this):a};this.$get=["$rootScope","$browser","$sniffer","$rootElement",function(c,d,e,g){function f(a){c.$broadcast("$locationChangeSuccess",h.absUrl(),a)}var h,m=d.baseHref(),k=d.url();a?(m=k.substring(0,k.indexOf("/",k.indexOf("//")+2))+(m||"/"),e=e.history?sc:tc):(m=Va(k),
|
||||
e=Ib);h=new e(m,"#"+b);h.$$parse(h.$$rewrite(k));g.on("click",function(a){if(!a.ctrlKey&&!a.metaKey&&2!=a.which){for(var b=A(a.target);"a"!==x(b[0].nodeName);)if(b[0]===g[0]||!(b=b.parent())[0])return;var e=b.prop("href");X(e)&&"[object SVGAnimatedString]"===e.toString()&&(e=qa(e.animVal).href);var f=h.$$rewrite(e);e&&(!b.attr("target")&&f&&!a.isDefaultPrevented())&&(a.preventDefault(),f!=d.url()&&(h.$$parse(f),c.$apply(),Z.angular["ff-684208-preventDefault"]=!0))}});h.absUrl()!=k&&d.url(h.absUrl(),
|
||||
!0);d.onUrlChange(function(a){h.absUrl()!=a&&(c.$evalAsync(function(){var b=h.absUrl();h.$$parse(a);c.$broadcast("$locationChangeStart",a,b).defaultPrevented?(h.$$parse(b),d.url(b)):f(b)}),c.$$phase||c.$digest())});var l=0;c.$watch(function(){var a=d.url(),b=h.$$replace;l&&a==h.absUrl()||(l++,c.$evalAsync(function(){c.$broadcast("$locationChangeStart",h.absUrl(),a).defaultPrevented?h.$$parse(a):(d.url(h.absUrl(),b),f(a))}));h.$$replace=!1;return l});return h}]}function ud(){var b=!0,a=this;this.debugEnabled=
|
||||
function(a){return B(a)?(b=a,this):b};this.$get=["$window",function(c){function d(a){a instanceof Error&&(a.stack?a=a.message&&-1===a.stack.indexOf(a.message)?"Error: "+a.message+"\n"+a.stack:a.stack:a.sourceURL&&(a=a.message+"\n"+a.sourceURL+":"+a.line));return a}function e(a){var b=c.console||{},e=b[a]||b.log||w;a=!1;try{a=!!e.apply}catch(m){}return a?function(){var a=[];q(arguments,function(b){a.push(d(b))});return e.apply(b,a)}:function(a,b){e(a,null==b?"":b)}}return{log:e("log"),info:e("info"),
|
||||
warn:e("warn"),error:e("error"),debug:function(){var c=e("debug");return function(){b&&c.apply(a,arguments)}}()}}]}function da(b,a){if("constructor"===b)throw ya("isecfld",a);return b}function Wa(b,a){if(b){if(b.constructor===b)throw ya("isecfn",a);if(b.document&&b.location&&b.alert&&b.setInterval)throw ya("isecwindow",a);if(b.children&&(b.nodeName||b.on&&b.find))throw ya("isecdom",a);}return b}function jb(b,a,c,d,e){e=e||{};a=a.split(".");for(var g,f=0;1<a.length;f++){g=da(a.shift(),d);var h=b[g];
|
||||
h||(h={},b[g]=h);b=h;b.then&&e.unwrapPromises&&(ra(d),"$$v"in b||function(a){a.then(function(b){a.$$v=b})}(b),b.$$v===r&&(b.$$v={}),b=b.$$v)}g=da(a.shift(),d);return b[g]=c}function vc(b,a,c,d,e,g,f){da(b,g);da(a,g);da(c,g);da(d,g);da(e,g);return f.unwrapPromises?function(f,m){var k=m&&m.hasOwnProperty(b)?m:f,l;if(null==k)return k;(k=k[b])&&k.then&&(ra(g),"$$v"in k||(l=k,l.$$v=r,l.then(function(a){l.$$v=a})),k=k.$$v);if(null==k)return a?r:k;(k=k[a])&&k.then&&(ra(g),"$$v"in k||(l=k,l.$$v=r,l.then(function(a){l.$$v=
|
||||
a})),k=k.$$v);if(null==k)return c?r:k;(k=k[c])&&k.then&&(ra(g),"$$v"in k||(l=k,l.$$v=r,l.then(function(a){l.$$v=a})),k=k.$$v);if(null==k)return d?r:k;(k=k[d])&&k.then&&(ra(g),"$$v"in k||(l=k,l.$$v=r,l.then(function(a){l.$$v=a})),k=k.$$v);if(null==k)return e?r:k;(k=k[e])&&k.then&&(ra(g),"$$v"in k||(l=k,l.$$v=r,l.then(function(a){l.$$v=a})),k=k.$$v);return k}:function(g,f){var k=f&&f.hasOwnProperty(b)?f:g;if(null==k)return k;k=k[b];if(null==k)return a?r:k;k=k[a];if(null==k)return c?r:k;k=k[c];if(null==
|
||||
k)return d?r:k;k=k[d];return null==k?e?r:k:k=k[e]}}function vd(b,a){da(b,a);return function(a,d){return null==a?r:(d&&d.hasOwnProperty(b)?d:a)[b]}}function wd(b,a,c){da(b,c);da(a,c);return function(c,e){if(null==c)return r;c=(e&&e.hasOwnProperty(b)?e:c)[b];return null==c?r:c[a]}}function wc(b,a,c){if(Jb.hasOwnProperty(b))return Jb[b];var d=b.split("."),e=d.length,g;if(a.unwrapPromises||1!==e)if(a.unwrapPromises||2!==e)if(a.csp)g=6>e?vc(d[0],d[1],d[2],d[3],d[4],c,a):function(b,g){var f=0,h;do h=vc(d[f++],
|
||||
d[f++],d[f++],d[f++],d[f++],c,a)(b,g),g=r,b=h;while(f<e);return h};else{var f="var p;\n";q(d,function(b,d){da(b,c);f+="if(s == null) return undefined;\ns="+(d?"s":'((k&&k.hasOwnProperty("'+b+'"))?k:s)')+'["'+b+'"];\n'+(a.unwrapPromises?'if (s && s.then) {\n pw("'+c.replace(/(["\r\n])/g,"\\$1")+'");\n if (!("$$v" in s)) {\n p=s;\n p.$$v = undefined;\n p.then(function(v) {p.$$v=v;});\n}\n s=s.$$v\n}\n':"")});var f=f+"return s;",h=new Function("s","k","pw",f);h.toString=$(f);g=a.unwrapPromises?function(a,
|
||||
b){return h(a,b,ra)}:h}else g=wd(d[0],d[1],c);else g=vd(d[0],c);"hasOwnProperty"!==b&&(Jb[b]=g);return g}function xd(){var b={},a={csp:!1,unwrapPromises:!1,logPromiseWarnings:!0};this.unwrapPromises=function(b){return B(b)?(a.unwrapPromises=!!b,this):a.unwrapPromises};this.logPromiseWarnings=function(b){return B(b)?(a.logPromiseWarnings=b,this):a.logPromiseWarnings};this.$get=["$filter","$sniffer","$log",function(c,d,e){a.csp=d.csp;ra=function(b){a.logPromiseWarnings&&!xc.hasOwnProperty(b)&&(xc[b]=
|
||||
!0,e.warn("[$parse] Promise found in the expression `"+b+"`. Automatic unwrapping of promises in Angular expressions is deprecated."))};return function(d){var e;switch(typeof d){case "string":if(b.hasOwnProperty(d))return b[d];e=new Kb(a);e=(new Xa(e,c,a)).parse(d,!1);"hasOwnProperty"!==d&&(b[d]=e);return e;case "function":return d;default:return w}}}]}function yd(){this.$get=["$rootScope","$exceptionHandler",function(b,a){return zd(function(a){b.$evalAsync(a)},a)}]}function zd(b,a){function c(a){return a}
|
||||
function d(a){return f(a)}var e=function(){var h=[],m,k;return k={resolve:function(a){if(h){var c=h;h=r;m=g(a);c.length&&b(function(){for(var a,b=0,d=c.length;b<d;b++)a=c[b],m.then(a[0],a[1],a[2])})}},reject:function(a){k.resolve(f(a))},notify:function(a){if(h){var c=h;h.length&&b(function(){for(var b,d=0,e=c.length;d<e;d++)b=c[d],b[2](a)})}},promise:{then:function(b,g,f){var k=e(),C=function(d){try{k.resolve((L(b)?b:c)(d))}catch(e){k.reject(e),a(e)}},y=function(b){try{k.resolve((L(g)?g:d)(b))}catch(c){k.reject(c),
|
||||
a(c)}},E=function(b){try{k.notify((L(f)?f:c)(b))}catch(d){a(d)}};h?h.push([C,y,E]):m.then(C,y,E);return k.promise},"catch":function(a){return this.then(null,a)},"finally":function(a){function b(a,c){var d=e();c?d.resolve(a):d.reject(a);return d.promise}function d(e,g){var f=null;try{f=(a||c)()}catch(h){return b(h,!1)}return f&&L(f.then)?f.then(function(){return b(e,g)},function(a){return b(a,!1)}):b(e,g)}return this.then(function(a){return d(a,!0)},function(a){return d(a,!1)})}}}},g=function(a){return a&&
|
||||
L(a.then)?a:{then:function(c){var d=e();b(function(){d.resolve(c(a))});return d.promise}}},f=function(c){return{then:function(g,f){var l=e();b(function(){try{l.resolve((L(f)?f:d)(c))}catch(b){l.reject(b),a(b)}});return l.promise}}};return{defer:e,reject:f,when:function(h,m,k,l){var n=e(),p,s=function(b){try{return(L(m)?m:c)(b)}catch(d){return a(d),f(d)}},C=function(b){try{return(L(k)?k:d)(b)}catch(c){return a(c),f(c)}},y=function(b){try{return(L(l)?l:c)(b)}catch(d){a(d)}};b(function(){g(h).then(function(a){p||
|
||||
(p=!0,n.resolve(g(a).then(s,C,y)))},function(a){p||(p=!0,n.resolve(C(a)))},function(a){p||n.notify(y(a))})});return n.promise},all:function(a){var b=e(),c=0,d=K(a)?[]:{};q(a,function(a,e){c++;g(a).then(function(a){d.hasOwnProperty(e)||(d[e]=a,--c||b.resolve(d))},function(a){d.hasOwnProperty(e)||b.reject(a)})});0===c&&b.resolve(d);return b.promise}}}function Ad(){var b=10,a=F("$rootScope"),c=null;this.digestTtl=function(a){arguments.length&&(b=a);return b};this.$get=["$injector","$exceptionHandler",
|
||||
"$parse","$browser",function(d,e,g,f){function h(){this.$id=Ya();this.$$phase=this.$parent=this.$$watchers=this.$$nextSibling=this.$$prevSibling=this.$$childHead=this.$$childTail=null;this["this"]=this.$root=this;this.$$destroyed=!1;this.$$asyncQueue=[];this.$$postDigestQueue=[];this.$$listeners={};this.$$listenerCount={};this.$$isolateBindings={}}function m(b){if(p.$$phase)throw a("inprog",p.$$phase);p.$$phase=b}function k(a,b){var c=g(a);Oa(c,b);return c}function l(a,b,c){do a.$$listenerCount[c]-=
|
||||
b,0===a.$$listenerCount[c]&&delete a.$$listenerCount[c];while(a=a.$parent)}function n(){}h.prototype={constructor:h,$new:function(a){a?(a=new h,a.$root=this.$root,a.$$asyncQueue=this.$$asyncQueue,a.$$postDigestQueue=this.$$postDigestQueue):(a=function(){},a.prototype=this,a=new a,a.$id=Ya());a["this"]=a;a.$$listeners={};a.$$listenerCount={};a.$parent=this;a.$$watchers=a.$$nextSibling=a.$$childHead=a.$$childTail=null;a.$$prevSibling=this.$$childTail;this.$$childHead?this.$$childTail=this.$$childTail.$$nextSibling=
|
||||
a:this.$$childHead=this.$$childTail=a;return a},$watch:function(a,b,d){var e=k(a,"watch"),g=this.$$watchers,f={fn:b,last:n,get:e,exp:a,eq:!!d};c=null;if(!L(b)){var h=k(b||w,"listener");f.fn=function(a,b,c){h(c)}}if("string"==typeof a&&e.constant){var m=f.fn;f.fn=function(a,b,c){m.call(this,a,b,c);Ka(g,f)}}g||(g=this.$$watchers=[]);g.unshift(f);return function(){Ka(g,f);c=null}},$watchCollection:function(a,b){var c=this,d,e,f=0,h=g(a),m=[],k={},l=0;return this.$watch(function(){e=h(c);var a,b;if(X(e))if(qb(e))for(d!==
|
||||
m&&(d=m,l=d.length=0,f++),a=e.length,l!==a&&(f++,d.length=l=a),b=0;b<a;b++)d[b]!==e[b]&&(f++,d[b]=e[b]);else{d!==k&&(d=k={},l=0,f++);a=0;for(b in e)e.hasOwnProperty(b)&&(a++,d.hasOwnProperty(b)?d[b]!==e[b]&&(f++,d[b]=e[b]):(l++,d[b]=e[b],f++));if(l>a)for(b in f++,d)d.hasOwnProperty(b)&&!e.hasOwnProperty(b)&&(l--,delete d[b])}else d!==e&&(d=e,f++);return f},function(){b(e,d,c)})},$digest:function(){var d,f,g,h,k=this.$$asyncQueue,l=this.$$postDigestQueue,q,v,r=b,N,V=[],J,A,P;m("$digest");c=null;do{v=
|
||||
!1;for(N=this;k.length;){try{P=k.shift(),P.scope.$eval(P.expression)}catch(B){p.$$phase=null,e(B)}c=null}a:do{if(h=N.$$watchers)for(q=h.length;q--;)try{if(d=h[q])if((f=d.get(N))!==(g=d.last)&&!(d.eq?ua(f,g):"number"==typeof f&&"number"==typeof g&&isNaN(f)&&isNaN(g)))v=!0,c=d,d.last=d.eq?fa(f):f,d.fn(f,g===n?f:g,N),5>r&&(J=4-r,V[J]||(V[J]=[]),A=L(d.exp)?"fn: "+(d.exp.name||d.exp.toString()):d.exp,A+="; newVal: "+pa(f)+"; oldVal: "+pa(g),V[J].push(A));else if(d===c){v=!1;break a}}catch(t){p.$$phase=
|
||||
null,e(t)}if(!(h=N.$$childHead||N!==this&&N.$$nextSibling))for(;N!==this&&!(h=N.$$nextSibling);)N=N.$parent}while(N=h);if(v&&!r--)throw p.$$phase=null,a("infdig",b,pa(V));}while(v||k.length);for(p.$$phase=null;l.length;)try{l.shift()()}catch(z){e(z)}},$destroy:function(){if(!this.$$destroyed){var a=this.$parent;this.$broadcast("$destroy");this.$$destroyed=!0;this!==p&&(q(this.$$listenerCount,bb(null,l,this)),a.$$childHead==this&&(a.$$childHead=this.$$nextSibling),a.$$childTail==this&&(a.$$childTail=
|
||||
this.$$prevSibling),this.$$prevSibling&&(this.$$prevSibling.$$nextSibling=this.$$nextSibling),this.$$nextSibling&&(this.$$nextSibling.$$prevSibling=this.$$prevSibling),this.$parent=this.$$nextSibling=this.$$prevSibling=this.$$childHead=this.$$childTail=null)}},$eval:function(a,b){return g(a)(this,b)},$evalAsync:function(a){p.$$phase||p.$$asyncQueue.length||f.defer(function(){p.$$asyncQueue.length&&p.$digest()});this.$$asyncQueue.push({scope:this,expression:a})},$$postDigest:function(a){this.$$postDigestQueue.push(a)},
|
||||
$apply:function(a){try{return m("$apply"),this.$eval(a)}catch(b){e(b)}finally{p.$$phase=null;try{p.$digest()}catch(c){throw e(c),c;}}},$on:function(a,b){var c=this.$$listeners[a];c||(this.$$listeners[a]=c=[]);c.push(b);var d=this;do d.$$listenerCount[a]||(d.$$listenerCount[a]=0),d.$$listenerCount[a]++;while(d=d.$parent);var e=this;return function(){c[ab(c,b)]=null;l(e,1,a)}},$emit:function(a,b){var c=[],d,f=this,g=!1,h={name:a,targetScope:f,stopPropagation:function(){g=!0},preventDefault:function(){h.defaultPrevented=
|
||||
!0},defaultPrevented:!1},m=[h].concat(va.call(arguments,1)),k,l;do{d=f.$$listeners[a]||c;h.currentScope=f;k=0;for(l=d.length;k<l;k++)if(d[k])try{d[k].apply(null,m)}catch(p){e(p)}else d.splice(k,1),k--,l--;if(g)break;f=f.$parent}while(f);return h},$broadcast:function(a,b){for(var c=this,d=this,f={name:a,targetScope:this,preventDefault:function(){f.defaultPrevented=!0},defaultPrevented:!1},g=[f].concat(va.call(arguments,1)),h,k;c=d;){f.currentScope=c;d=c.$$listeners[a]||[];h=0;for(k=d.length;h<k;h++)if(d[h])try{d[h].apply(null,
|
||||
g)}catch(m){e(m)}else d.splice(h,1),h--,k--;if(!(d=c.$$listenerCount[a]&&c.$$childHead||c!==this&&c.$$nextSibling))for(;c!==this&&!(d=c.$$nextSibling);)c=c.$parent}return f}};var p=new h;return p}]}function Bd(){var b=/^\s*(https?|ftp|mailto|tel|file):/,a=/^\s*(https?|ftp|file):|data:image\//;this.aHrefSanitizationWhitelist=function(a){return B(a)?(b=a,this):b};this.imgSrcSanitizationWhitelist=function(b){return B(b)?(a=b,this):a};this.$get=function(){return function(c,d){var e=d?a:b,g;if(!M||8<=
|
||||
M)if(g=qa(c).href,""!==g&&!g.match(e))return"unsafe:"+g;return c}}}function Cd(b){if("self"===b)return b;if(D(b)){if(-1<b.indexOf("***"))throw sa("iwcard",b);b=b.replace(/([-()\[\]{}+?*.$\^|,:#<!\\])/g,"\\$1").replace(/\x08/g,"\\x08").replace("\\*\\*",".*").replace("\\*","[^:/.?&;]*");return RegExp("^"+b+"$")}if($a(b))return RegExp("^"+b.source+"$");throw sa("imatcher");}function yc(b){var a=[];B(b)&&q(b,function(b){a.push(Cd(b))});return a}function Dd(){this.SCE_CONTEXTS=ea;var b=["self"],a=[];this.resourceUrlWhitelist=
|
||||
function(a){arguments.length&&(b=yc(a));return b};this.resourceUrlBlacklist=function(b){arguments.length&&(a=yc(b));return a};this.$get=["$injector",function(c){function d(a){var b=function(a){this.$$unwrapTrustedValue=function(){return a}};a&&(b.prototype=new a);b.prototype.valueOf=function(){return this.$$unwrapTrustedValue()};b.prototype.toString=function(){return this.$$unwrapTrustedValue().toString()};return b}var e=function(a){throw sa("unsafe");};c.has("$sanitize")&&(e=c.get("$sanitize"));
|
||||
var g=d(),f={};f[ea.HTML]=d(g);f[ea.CSS]=d(g);f[ea.URL]=d(g);f[ea.JS]=d(g);f[ea.RESOURCE_URL]=d(f[ea.URL]);return{trustAs:function(a,b){var c=f.hasOwnProperty(a)?f[a]:null;if(!c)throw sa("icontext",a,b);if(null===b||b===r||""===b)return b;if("string"!==typeof b)throw sa("itype",a);return new c(b)},getTrusted:function(c,d){if(null===d||d===r||""===d)return d;var g=f.hasOwnProperty(c)?f[c]:null;if(g&&d instanceof g)return d.$$unwrapTrustedValue();if(c===ea.RESOURCE_URL){var g=qa(d.toString()),l,n,p=
|
||||
!1;l=0;for(n=b.length;l<n;l++)if("self"===b[l]?Fb(g):b[l].exec(g.href)){p=!0;break}if(p)for(l=0,n=a.length;l<n;l++)if("self"===a[l]?Fb(g):a[l].exec(g.href)){p=!1;break}if(p)return d;throw sa("insecurl",d.toString());}if(c===ea.HTML)return e(d);throw sa("unsafe");},valueOf:function(a){return a instanceof g?a.$$unwrapTrustedValue():a}}}]}function Ed(){var b=!0;this.enabled=function(a){arguments.length&&(b=!!a);return b};this.$get=["$parse","$sniffer","$sceDelegate",function(a,c,d){if(b&&c.msie&&8>c.msieDocumentMode)throw sa("iequirks");
|
||||
var e=fa(ea);e.isEnabled=function(){return b};e.trustAs=d.trustAs;e.getTrusted=d.getTrusted;e.valueOf=d.valueOf;b||(e.trustAs=e.getTrusted=function(a,b){return b},e.valueOf=Aa);e.parseAs=function(b,c){var d=a(c);return d.literal&&d.constant?d:function(a,c){return e.getTrusted(b,d(a,c))}};var g=e.parseAs,f=e.getTrusted,h=e.trustAs;q(ea,function(a,b){var c=x(b);e[Pa("parse_as_"+c)]=function(b){return g(a,b)};e[Pa("get_trusted_"+c)]=function(b){return f(a,b)};e[Pa("trust_as_"+c)]=function(b){return h(a,
|
||||
b)}});return e}]}function Fd(){this.$get=["$window","$document",function(b,a){var c={},d=S((/android (\d+)/.exec(x((b.navigator||{}).userAgent))||[])[1]),e=/Boxee/i.test((b.navigator||{}).userAgent),g=a[0]||{},f=g.documentMode,h,m=/^(Moz|webkit|O|ms)(?=[A-Z])/,k=g.body&&g.body.style,l=!1,n=!1;if(k){for(var p in k)if(l=m.exec(p)){h=l[0];h=h.substr(0,1).toUpperCase()+h.substr(1);break}h||(h="WebkitOpacity"in k&&"webkit");l=!!("transition"in k||h+"Transition"in k);n=!!("animation"in k||h+"Animation"in
|
||||
k);!d||l&&n||(l=D(g.body.style.webkitTransition),n=D(g.body.style.webkitAnimation))}return{history:!(!b.history||!b.history.pushState||4>d||e),hashchange:"onhashchange"in b&&(!f||7<f),hasEvent:function(a){if("input"==a&&9==M)return!1;if(z(c[a])){var b=g.createElement("div");c[a]="on"+a in b}return c[a]},csp:Tb(),vendorPrefix:h,transitions:l,animations:n,android:d,msie:M,msieDocumentMode:f}}]}function Gd(){this.$get=["$rootScope","$browser","$q","$exceptionHandler",function(b,a,c,d){function e(e,h,
|
||||
m){var k=c.defer(),l=k.promise,n=B(m)&&!m;h=a.defer(function(){try{k.resolve(e())}catch(a){k.reject(a),d(a)}finally{delete g[l.$$timeoutId]}n||b.$apply()},h);l.$$timeoutId=h;g[h]=k;return l}var g={};e.cancel=function(b){return b&&b.$$timeoutId in g?(g[b.$$timeoutId].reject("canceled"),delete g[b.$$timeoutId],a.defer.cancel(b.$$timeoutId)):!1};return e}]}function qa(b,a){var c=b;M&&(Y.setAttribute("href",c),c=Y.href);Y.setAttribute("href",c);return{href:Y.href,protocol:Y.protocol?Y.protocol.replace(/:$/,
|
||||
""):"",host:Y.host,search:Y.search?Y.search.replace(/^\?/,""):"",hash:Y.hash?Y.hash.replace(/^#/,""):"",hostname:Y.hostname,port:Y.port,pathname:"/"===Y.pathname.charAt(0)?Y.pathname:"/"+Y.pathname}}function Fb(b){b=D(b)?qa(b):b;return b.protocol===zc.protocol&&b.host===zc.host}function Hd(){this.$get=$(Z)}function Ac(b){function a(d,e){if(X(d)){var g={};q(d,function(b,c){g[c]=a(c,b)});return g}return b.factory(d+c,e)}var c="Filter";this.register=a;this.$get=["$injector",function(a){return function(b){return a.get(b+
|
||||
c)}}];a("currency",Bc);a("date",Cc);a("filter",Id);a("json",Jd);a("limitTo",Kd);a("lowercase",Ld);a("number",Dc);a("orderBy",Ec);a("uppercase",Md)}function Id(){return function(b,a,c){if(!K(b))return b;var d=typeof c,e=[];e.check=function(a){for(var b=0;b<e.length;b++)if(!e[b](a))return!1;return!0};"function"!==d&&(c="boolean"===d&&c?function(a,b){return Na.equals(a,b)}:function(a,b){b=(""+b).toLowerCase();return-1<(""+a).toLowerCase().indexOf(b)});var g=function(a,b){if("string"==typeof b&&"!"===
|
||||
b.charAt(0))return!g(a,b.substr(1));switch(typeof a){case "boolean":case "number":case "string":return c(a,b);case "object":switch(typeof b){case "object":return c(a,b);default:for(var d in a)if("$"!==d.charAt(0)&&g(a[d],b))return!0}return!1;case "array":for(d=0;d<a.length;d++)if(g(a[d],b))return!0;return!1;default:return!1}};switch(typeof a){case "boolean":case "number":case "string":a={$:a};case "object":for(var f in a)"$"==f?function(){if(a[f]){var b=f;e.push(function(c){return g(c,a[b])})}}():
|
||||
function(){if("undefined"!=typeof a[f]){var b=f;e.push(function(c){return g(ub(c,b),a[b])})}}();break;case "function":e.push(a);break;default:return b}for(var d=[],h=0;h<b.length;h++){var m=b[h];e.check(m)&&d.push(m)}return d}}function Bc(b){var a=b.NUMBER_FORMATS;return function(b,d){z(d)&&(d=a.CURRENCY_SYM);return Fc(b,a.PATTERNS[1],a.GROUP_SEP,a.DECIMAL_SEP,2).replace(/\u00A4/g,d)}}function Dc(b){var a=b.NUMBER_FORMATS;return function(b,d){return Fc(b,a.PATTERNS[0],a.GROUP_SEP,a.DECIMAL_SEP,d)}}
|
||||
function Fc(b,a,c,d,e){if(isNaN(b)||!isFinite(b))return"";var g=0>b;b=Math.abs(b);var f=b+"",h="",m=[],k=!1;if(-1!==f.indexOf("e")){var l=f.match(/([\d\.]+)e(-?)(\d+)/);l&&"-"==l[2]&&l[3]>e+1?f="0":(h=f,k=!0)}if(k)0<e&&(-1<b&&1>b)&&(h=b.toFixed(e));else{f=(f.split(Gc)[1]||"").length;z(e)&&(e=Math.min(Math.max(a.minFrac,f),a.maxFrac));f=Math.pow(10,e);b=Math.round(b*f)/f;b=(""+b).split(Gc);f=b[0];b=b[1]||"";var l=0,n=a.lgSize,p=a.gSize;if(f.length>=n+p)for(l=f.length-n,k=0;k<l;k++)0===(l-k)%p&&0!==
|
||||
k&&(h+=c),h+=f.charAt(k);for(k=l;k<f.length;k++)0===(f.length-k)%n&&0!==k&&(h+=c),h+=f.charAt(k);for(;b.length<e;)b+="0";e&&"0"!==e&&(h+=d+b.substr(0,e))}m.push(g?a.negPre:a.posPre);m.push(h);m.push(g?a.negSuf:a.posSuf);return m.join("")}function Lb(b,a,c){var d="";0>b&&(d="-",b=-b);for(b=""+b;b.length<a;)b="0"+b;c&&(b=b.substr(b.length-a));return d+b}function W(b,a,c,d){c=c||0;return function(e){e=e["get"+b]();if(0<c||e>-c)e+=c;0===e&&-12==c&&(e=12);return Lb(e,a,d)}}function kb(b,a){return function(c,
|
||||
d){var e=c["get"+b](),g=Ga(a?"SHORT"+b:b);return d[g][e]}}function Cc(b){function a(a){var b;if(b=a.match(c)){a=new Date(0);var g=0,f=0,h=b[8]?a.setUTCFullYear:a.setFullYear,m=b[8]?a.setUTCHours:a.setHours;b[9]&&(g=S(b[9]+b[10]),f=S(b[9]+b[11]));h.call(a,S(b[1]),S(b[2])-1,S(b[3]));g=S(b[4]||0)-g;f=S(b[5]||0)-f;h=S(b[6]||0);b=Math.round(1E3*parseFloat("0."+(b[7]||0)));m.call(a,g,f,h,b)}return a}var c=/^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?::?(\d\d)(?::?(\d\d)(?:\.(\d+))?)?)?(Z|([+-])(\d\d):?(\d\d))?)?$/;
|
||||
return function(c,e){var g="",f=[],h,m;e=e||"mediumDate";e=b.DATETIME_FORMATS[e]||e;D(c)&&(c=Nd.test(c)?S(c):a(c));rb(c)&&(c=new Date(c));if(!Ja(c))return c;for(;e;)(m=Od.exec(e))?(f=f.concat(va.call(m,1)),e=f.pop()):(f.push(e),e=null);q(f,function(a){h=Pd[a];g+=h?h(c,b.DATETIME_FORMATS):a.replace(/(^'|'$)/g,"").replace(/''/g,"'")});return g}}function Jd(){return function(b){return pa(b,!0)}}function Kd(){return function(b,a){if(!K(b)&&!D(b))return b;a=S(a);if(D(b))return a?0<=a?b.slice(0,a):b.slice(a,
|
||||
b.length):"";var c=[],d,e;a>b.length?a=b.length:a<-b.length&&(a=-b.length);0<a?(d=0,e=a):(d=b.length+a,e=b.length);for(;d<e;d++)c.push(b[d]);return c}}function Ec(b){return function(a,c,d){function e(a,b){return Ma(b)?function(b,c){return a(c,b)}:a}if(!K(a)||!c)return a;c=K(c)?c:[c];c=Qc(c,function(a){var c=!1,d=a||Aa;if(D(a)){if("+"==a.charAt(0)||"-"==a.charAt(0))c="-"==a.charAt(0),a=a.substring(1);d=b(a)}return e(function(a,b){var c;c=d(a);var e=d(b),f=typeof c,g=typeof e;f==g?("string"==f&&(c=
|
||||
c.toLowerCase(),e=e.toLowerCase()),c=c===e?0:c<e?-1:1):c=f<g?-1:1;return c},c)});for(var g=[],f=0;f<a.length;f++)g.push(a[f]);return g.sort(e(function(a,b){for(var d=0;d<c.length;d++){var e=c[d](a,b);if(0!==e)return e}return 0},d))}}function ta(b){L(b)&&(b={link:b});b.restrict=b.restrict||"AC";return $(b)}function Hc(b,a){function c(a,c){c=c?"-"+cb(c,"-"):"";b.removeClass((a?lb:mb)+c).addClass((a?mb:lb)+c)}var d=this,e=b.parent().controller("form")||nb,g=0,f=d.$error={},h=[];d.$name=a.name||a.ngForm;
|
||||
d.$dirty=!1;d.$pristine=!0;d.$valid=!0;d.$invalid=!1;e.$addControl(d);b.addClass(Ha);c(!0);d.$addControl=function(a){xa(a.$name,"input");h.push(a);a.$name&&(d[a.$name]=a)};d.$removeControl=function(a){a.$name&&d[a.$name]===a&&delete d[a.$name];q(f,function(b,c){d.$setValidity(c,!0,a)});Ka(h,a)};d.$setValidity=function(a,b,h){var n=f[a];if(b)n&&(Ka(n,h),n.length||(g--,g||(c(b),d.$valid=!0,d.$invalid=!1),f[a]=!1,c(!0,a),e.$setValidity(a,!0,d)));else{g||c(b);if(n){if(-1!=ab(n,h))return}else f[a]=n=[],
|
||||
g++,c(!1,a),e.$setValidity(a,!1,d);n.push(h);d.$valid=!1;d.$invalid=!0}};d.$setDirty=function(){b.removeClass(Ha).addClass(ob);d.$dirty=!0;d.$pristine=!1;e.$setDirty()};d.$setPristine=function(){b.removeClass(ob).addClass(Ha);d.$dirty=!1;d.$pristine=!0;q(h,function(a){a.$setPristine()})}}function pb(b,a,c,d,e,g){if(!e.android){var f=!1;a.on("compositionstart",function(a){f=!0});a.on("compositionend",function(){f=!1})}var h=function(){if(!f){var e=a.val();Ma(c.ngTrim||"T")&&(e=aa(e));d.$viewValue!==
|
||||
e&&(b.$$phase?d.$setViewValue(e):b.$apply(function(){d.$setViewValue(e)}))}};if(e.hasEvent("input"))a.on("input",h);else{var m,k=function(){m||(m=g.defer(function(){h();m=null}))};a.on("keydown",function(a){a=a.keyCode;91===a||(15<a&&19>a||37<=a&&40>=a)||k()});if(e.hasEvent("paste"))a.on("paste cut",k)}a.on("change",h);d.$render=function(){a.val(d.$isEmpty(d.$viewValue)?"":d.$viewValue)};var l=c.ngPattern,n=function(a,b){if(d.$isEmpty(b)||a.test(b))return d.$setValidity("pattern",!0),b;d.$setValidity("pattern",
|
||||
!1);return r};l&&((e=l.match(/^\/(.*)\/([gim]*)$/))?(l=RegExp(e[1],e[2]),e=function(a){return n(l,a)}):e=function(c){var d=b.$eval(l);if(!d||!d.test)throw F("ngPattern")("noregexp",l,d,ga(a));return n(d,c)},d.$formatters.push(e),d.$parsers.push(e));if(c.ngMinlength){var p=S(c.ngMinlength);e=function(a){if(!d.$isEmpty(a)&&a.length<p)return d.$setValidity("minlength",!1),r;d.$setValidity("minlength",!0);return a};d.$parsers.push(e);d.$formatters.push(e)}if(c.ngMaxlength){var s=S(c.ngMaxlength);e=function(a){if(!d.$isEmpty(a)&&
|
||||
a.length>s)return d.$setValidity("maxlength",!1),r;d.$setValidity("maxlength",!0);return a};d.$parsers.push(e);d.$formatters.push(e)}}function Mb(b,a){b="ngClass"+b;return function(){return{restrict:"AC",link:function(c,d,e){function g(b){if(!0===a||c.$index%2===a){var d=f(b||"");h?ua(b,h)||e.$updateClass(d,f(h)):e.$addClass(d)}h=fa(b)}function f(a){if(K(a))return a.join(" ");if(X(a)){var b=[];q(a,function(a,c){a&&b.push(c)});return b.join(" ")}return a}var h;c.$watch(e[b],g,!0);e.$observe("class",
|
||||
function(a){g(c.$eval(e[b]))});"ngClass"!==b&&c.$watch("$index",function(d,g){var h=d&1;if(h!==g&1){var n=f(c.$eval(e[b]));h===a?e.$addClass(n):e.$removeClass(n)}})}}}}var x=function(b){return D(b)?b.toLowerCase():b},Ga=function(b){return D(b)?b.toUpperCase():b},M,A,Ba,va=[].slice,Qd=[].push,Za=Object.prototype.toString,La=F("ng"),Na=Z.angular||(Z.angular={}),Ta,Fa,ka=["0","0","0"];M=S((/msie (\d+)/.exec(x(navigator.userAgent))||[])[1]);isNaN(M)&&(M=S((/trident\/.*; rv:(\d+)/.exec(x(navigator.userAgent))||
|
||||
[])[1]));w.$inject=[];Aa.$inject=[];var aa=function(){return String.prototype.trim?function(b){return D(b)?b.trim():b}:function(b){return D(b)?b.replace(/^\s\s*/,"").replace(/\s\s*$/,""):b}}();Fa=9>M?function(b){b=b.nodeName?b:b[0];return b.scopeName&&"HTML"!=b.scopeName?Ga(b.scopeName+":"+b.nodeName):b.nodeName}:function(b){return b.nodeName?b.nodeName:b[0].nodeName};var Tc=/[A-Z]/g,Rd={full:"1.2.7",major:1,minor:2,dot:7,codeName:"emoji-clairvoyance"},Qa=O.cache={},db=O.expando="ng-"+(new Date).getTime(),
|
||||
Xc=1,Ic=Z.document.addEventListener?function(b,a,c){b.addEventListener(a,c,!1)}:function(b,a,c){b.attachEvent("on"+a,c)},Ab=Z.document.removeEventListener?function(b,a,c){b.removeEventListener(a,c,!1)}:function(b,a,c){b.detachEvent("on"+a,c)},Vc=/([\:\-\_]+(.))/g,Wc=/^moz([A-Z])/,xb=F("jqLite"),Ea=O.prototype={ready:function(b){function a(){c||(c=!0,b())}var c=!1;"complete"===Q.readyState?setTimeout(a):(this.on("DOMContentLoaded",a),O(Z).on("load",a))},toString:function(){var b=[];q(this,function(a){b.push(""+
|
||||
a)});return"["+b.join(", ")+"]"},eq:function(b){return 0<=b?A(this[b]):A(this[this.length+b])},length:0,push:Qd,sort:[].sort,splice:[].splice},fb={};q("multiple selected checked disabled readOnly required open".split(" "),function(b){fb[x(b)]=b});var fc={};q("input select option textarea button form details".split(" "),function(b){fc[Ga(b)]=!0});q({data:bc,inheritedData:eb,scope:function(b){return A(b).data("$scope")||eb(b.parentNode||b,["$isolateScope","$scope"])},isolateScope:function(b){return A(b).data("$isolateScope")||
|
||||
A(b).data("$isolateScopeNoTemplate")},controller:cc,injector:function(b){return eb(b,"$injector")},removeAttr:function(b,a){b.removeAttribute(a)},hasClass:Bb,css:function(b,a,c){a=Pa(a);if(B(c))b.style[a]=c;else{var d;8>=M&&(d=b.currentStyle&&b.currentStyle[a],""===d&&(d="auto"));d=d||b.style[a];8>=M&&(d=""===d?r:d);return d}},attr:function(b,a,c){var d=x(a);if(fb[d])if(B(c))c?(b[a]=!0,b.setAttribute(a,d)):(b[a]=!1,b.removeAttribute(d));else return b[a]||(b.attributes.getNamedItem(a)||w).specified?
|
||||
d:r;else if(B(c))b.setAttribute(a,c);else if(b.getAttribute)return b=b.getAttribute(a,2),null===b?r:b},prop:function(b,a,c){if(B(c))b[a]=c;else return b[a]},text:function(){function b(b,d){var e=a[b.nodeType];if(z(d))return e?b[e]:"";b[e]=d}var a=[];9>M?(a[1]="innerText",a[3]="nodeValue"):a[1]=a[3]="textContent";b.$dv="";return b}(),val:function(b,a){if(z(a)){if("SELECT"===Fa(b)&&b.multiple){var c=[];q(b.options,function(a){a.selected&&c.push(a.value||a.text)});return 0===c.length?null:c}return b.value}b.value=
|
||||
a},html:function(b,a){if(z(a))return b.innerHTML;for(var c=0,d=b.childNodes;c<d.length;c++)Ca(d[c]);b.innerHTML=a},empty:dc},function(b,a){O.prototype[a]=function(a,d){var e,g;if(b!==dc&&(2==b.length&&b!==Bb&&b!==cc?a:d)===r){if(X(a)){for(e=0;e<this.length;e++)if(b===bc)b(this[e],a);else for(g in a)b(this[e],g,a[g]);return this}e=b.$dv;g=e===r?Math.min(this.length,1):this.length;for(var f=0;f<g;f++){var h=b(this[f],a,d);e=e?e+h:h}return e}for(e=0;e<this.length;e++)b(this[e],a,d);return this}});q({removeData:$b,
|
||||
dealoc:Ca,on:function a(c,d,e,g){if(B(g))throw xb("onargs");var f=la(c,"events"),h=la(c,"handle");f||la(c,"events",f={});h||la(c,"handle",h=Yc(c,f));q(d.split(" "),function(d){var g=f[d];if(!g){if("mouseenter"==d||"mouseleave"==d){var l=Q.body.contains||Q.body.compareDocumentPosition?function(a,c){var d=9===a.nodeType?a.documentElement:a,e=c&&c.parentNode;return a===e||!!(e&&1===e.nodeType&&(d.contains?d.contains(e):a.compareDocumentPosition&&a.compareDocumentPosition(e)&16))}:function(a,c){if(c)for(;c=
|
||||
c.parentNode;)if(c===a)return!0;return!1};f[d]=[];a(c,{mouseleave:"mouseout",mouseenter:"mouseover"}[d],function(a){var c=a.relatedTarget;c&&(c===this||l(this,c))||h(a,d)})}else Ic(c,d,h),f[d]=[];g=f[d]}g.push(e)})},off:ac,one:function(a,c,d){a=A(a);a.on(c,function g(){a.off(c,d);a.off(c,g)});a.on(c,d)},replaceWith:function(a,c){var d,e=a.parentNode;Ca(a);q(new O(c),function(c){d?e.insertBefore(c,d.nextSibling):e.replaceChild(c,a);d=c})},children:function(a){var c=[];q(a.childNodes,function(a){1===
|
||||
a.nodeType&&c.push(a)});return c},contents:function(a){return a.childNodes||[]},append:function(a,c){q(new O(c),function(c){1!==a.nodeType&&11!==a.nodeType||a.appendChild(c)})},prepend:function(a,c){if(1===a.nodeType){var d=a.firstChild;q(new O(c),function(c){a.insertBefore(c,d)})}},wrap:function(a,c){c=A(c)[0];var d=a.parentNode;d&&d.replaceChild(c,a);c.appendChild(a)},remove:function(a){Ca(a);var c=a.parentNode;c&&c.removeChild(a)},after:function(a,c){var d=a,e=a.parentNode;q(new O(c),function(a){e.insertBefore(a,
|
||||
d.nextSibling);d=a})},addClass:Db,removeClass:Cb,toggleClass:function(a,c,d){z(d)&&(d=!Bb(a,c));(d?Db:Cb)(a,c)},parent:function(a){return(a=a.parentNode)&&11!==a.nodeType?a:null},next:function(a){if(a.nextElementSibling)return a.nextElementSibling;for(a=a.nextSibling;null!=a&&1!==a.nodeType;)a=a.nextSibling;return a},find:function(a,c){return a.getElementsByTagName?a.getElementsByTagName(c):[]},clone:zb,triggerHandler:function(a,c,d){c=(la(a,"events")||{})[c];d=d||[];var e=[{preventDefault:w,stopPropagation:w}];
|
||||
q(c,function(c){c.apply(a,e.concat(d))})}},function(a,c){O.prototype[c]=function(c,e,g){for(var f,h=0;h<this.length;h++)z(f)?(f=a(this[h],c,e,g),B(f)&&(f=A(f))):yb(f,a(this[h],c,e,g));return B(f)?f:this};O.prototype.bind=O.prototype.on;O.prototype.unbind=O.prototype.off});Ra.prototype={put:function(a,c){this[Da(a)]=c},get:function(a){return this[Da(a)]},remove:function(a){var c=this[a=Da(a)];delete this[a];return c}};var $c=/^function\s*[^\(]*\(\s*([^\)]*)\)/m,ad=/,/,bd=/^\s*(_?)(\S+?)\1\s*$/,Zc=
|
||||
/((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg,Sa=F("$injector"),Sd=F("$animate"),Td=["$provide",function(a){this.$$selectors={};this.register=function(c,d){var e=c+"-animation";if(c&&"."!=c.charAt(0))throw Sd("notcsel",c);this.$$selectors[c.substr(1)]=e;a.factory(e,d)};this.classNameFilter=function(a){1===arguments.length&&(this.$$classNameFilter=a instanceof RegExp?a:null);return this.$$classNameFilter};this.$get=["$timeout",function(a){return{enter:function(d,e,g,f){g?g.after(d):(e&&e[0]||(e=g.parent()),e.append(d));
|
||||
f&&a(f,0,!1)},leave:function(d,e){d.remove();e&&a(e,0,!1)},move:function(a,c,g,f){this.enter(a,c,g,f)},addClass:function(d,e,g){e=D(e)?e:K(e)?e.join(" "):"";q(d,function(a){Db(a,e)});g&&a(g,0,!1)},removeClass:function(d,e,g){e=D(e)?e:K(e)?e.join(" "):"";q(d,function(a){Cb(a,e)});g&&a(g,0,!1)},enabled:w}}]}],ja=F("$compile");ic.$inject=["$provide","$$sanitizeUriProvider"];var hd=/^(x[\:\-_]|data[\:\-_])/i,oc=F("$interpolate"),Ud=/^([^\?#]*)(\?([^#]*))?(#(.*))?$/,sd={http:80,https:443,ftp:21},Hb=F("$location");
|
||||
tc.prototype=Ib.prototype=sc.prototype={$$html5:!1,$$replace:!1,absUrl:ib("$$absUrl"),url:function(a,c){if(z(a))return this.$$url;var d=Ud.exec(a);d[1]&&this.path(decodeURIComponent(d[1]));(d[2]||d[1])&&this.search(d[3]||"");this.hash(d[5]||"",c);return this},protocol:ib("$$protocol"),host:ib("$$host"),port:ib("$$port"),path:uc("$$path",function(a){return"/"==a.charAt(0)?a:"/"+a}),search:function(a,c){switch(arguments.length){case 0:return this.$$search;case 1:if(D(a))this.$$search=Wb(a);else if(X(a))this.$$search=
|
||||
a;else throw Hb("isrcharg");break;default:z(c)||null===c?delete this.$$search[a]:this.$$search[a]=c}this.$$compose();return this},hash:uc("$$hash",Aa),replace:function(){this.$$replace=!0;return this}};var ya=F("$parse"),xc={},ra,Ia={"null":function(){return null},"true":function(){return!0},"false":function(){return!1},undefined:w,"+":function(a,c,d,e){d=d(a,c);e=e(a,c);return B(d)?B(e)?d+e:d:B(e)?e:r},"-":function(a,c,d,e){d=d(a,c);e=e(a,c);return(B(d)?d:0)-(B(e)?e:0)},"*":function(a,c,d,e){return d(a,
|
||||
c)*e(a,c)},"/":function(a,c,d,e){return d(a,c)/e(a,c)},"%":function(a,c,d,e){return d(a,c)%e(a,c)},"^":function(a,c,d,e){return d(a,c)^e(a,c)},"=":w,"===":function(a,c,d,e){return d(a,c)===e(a,c)},"!==":function(a,c,d,e){return d(a,c)!==e(a,c)},"==":function(a,c,d,e){return d(a,c)==e(a,c)},"!=":function(a,c,d,e){return d(a,c)!=e(a,c)},"<":function(a,c,d,e){return d(a,c)<e(a,c)},">":function(a,c,d,e){return d(a,c)>e(a,c)},"<=":function(a,c,d,e){return d(a,c)<=e(a,c)},">=":function(a,c,d,e){return d(a,
|
||||
c)>=e(a,c)},"&&":function(a,c,d,e){return d(a,c)&&e(a,c)},"||":function(a,c,d,e){return d(a,c)||e(a,c)},"&":function(a,c,d,e){return d(a,c)&e(a,c)},"|":function(a,c,d,e){return e(a,c)(a,c,d(a,c))},"!":function(a,c,d){return!d(a,c)}},Vd={n:"\n",f:"\f",r:"\r",t:"\t",v:"\v","'":"'",'"':'"'},Kb=function(a){this.options=a};Kb.prototype={constructor:Kb,lex:function(a){this.text=a;this.index=0;this.ch=r;this.lastCh=":";this.tokens=[];var c;for(a=[];this.index<this.text.length;){this.ch=this.text.charAt(this.index);
|
||||
if(this.is("\"'"))this.readString(this.ch);else if(this.isNumber(this.ch)||this.is(".")&&this.isNumber(this.peek()))this.readNumber();else if(this.isIdent(this.ch))this.readIdent(),this.was("{,")&&("{"===a[0]&&(c=this.tokens[this.tokens.length-1]))&&(c.json=-1===c.text.indexOf("."));else if(this.is("(){}[].,;:?"))this.tokens.push({index:this.index,text:this.ch,json:this.was(":[,")&&this.is("{[")||this.is("}]:,")}),this.is("{[")&&a.unshift(this.ch),this.is("}]")&&a.shift(),this.index++;else if(this.isWhitespace(this.ch)){this.index++;
|
||||
continue}else{var d=this.ch+this.peek(),e=d+this.peek(2),g=Ia[this.ch],f=Ia[d],h=Ia[e];h?(this.tokens.push({index:this.index,text:e,fn:h}),this.index+=3):f?(this.tokens.push({index:this.index,text:d,fn:f}),this.index+=2):g?(this.tokens.push({index:this.index,text:this.ch,fn:g,json:this.was("[,:")&&this.is("+-")}),this.index+=1):this.throwError("Unexpected next character ",this.index,this.index+1)}this.lastCh=this.ch}return this.tokens},is:function(a){return-1!==a.indexOf(this.ch)},was:function(a){return-1!==
|
||||
a.indexOf(this.lastCh)},peek:function(a){a=a||1;return this.index+a<this.text.length?this.text.charAt(this.index+a):!1},isNumber:function(a){return"0"<=a&&"9">=a},isWhitespace:function(a){return" "===a||"\r"===a||"\t"===a||"\n"===a||"\v"===a||"\u00a0"===a},isIdent:function(a){return"a"<=a&&"z">=a||"A"<=a&&"Z">=a||"_"===a||"$"===a},isExpOperator:function(a){return"-"===a||"+"===a||this.isNumber(a)},throwError:function(a,c,d){d=d||this.index;c=B(c)?"s "+c+"-"+this.index+" ["+this.text.substring(c,d)+
|
||||
"]":" "+d;throw ya("lexerr",a,c,this.text);},readNumber:function(){for(var a="",c=this.index;this.index<this.text.length;){var d=x(this.text.charAt(this.index));if("."==d||this.isNumber(d))a+=d;else{var e=this.peek();if("e"==d&&this.isExpOperator(e))a+=d;else if(this.isExpOperator(d)&&e&&this.isNumber(e)&&"e"==a.charAt(a.length-1))a+=d;else if(!this.isExpOperator(d)||e&&this.isNumber(e)||"e"!=a.charAt(a.length-1))break;else this.throwError("Invalid exponent")}this.index++}a*=1;this.tokens.push({index:c,
|
||||
text:a,json:!0,fn:function(){return a}})},readIdent:function(){for(var a=this,c="",d=this.index,e,g,f,h;this.index<this.text.length;){h=this.text.charAt(this.index);if("."===h||this.isIdent(h)||this.isNumber(h))"."===h&&(e=this.index),c+=h;else break;this.index++}if(e)for(g=this.index;g<this.text.length;){h=this.text.charAt(g);if("("===h){f=c.substr(e-d+1);c=c.substr(0,e-d);this.index=g;break}if(this.isWhitespace(h))g++;else break}d={index:d,text:c};if(Ia.hasOwnProperty(c))d.fn=Ia[c],d.json=Ia[c];
|
||||
else{var m=wc(c,this.options,this.text);d.fn=t(function(a,c){return m(a,c)},{assign:function(d,e){return jb(d,c,e,a.text,a.options)}})}this.tokens.push(d);f&&(this.tokens.push({index:e,text:".",json:!1}),this.tokens.push({index:e+1,text:f,json:!1}))},readString:function(a){var c=this.index;this.index++;for(var d="",e=a,g=!1;this.index<this.text.length;){var f=this.text.charAt(this.index),e=e+f;if(g)"u"===f?(f=this.text.substring(this.index+1,this.index+5),f.match(/[\da-f]{4}/i)||this.throwError("Invalid unicode escape [\\u"+
|
||||
f+"]"),this.index+=4,d+=String.fromCharCode(parseInt(f,16))):d=(g=Vd[f])?d+g:d+f,g=!1;else if("\\"===f)g=!0;else{if(f===a){this.index++;this.tokens.push({index:c,text:e,string:d,json:!0,fn:function(){return d}});return}d+=f}this.index++}this.throwError("Unterminated quote",c)}};var Xa=function(a,c,d){this.lexer=a;this.$filter=c;this.options=d};Xa.ZERO=function(){return 0};Xa.prototype={constructor:Xa,parse:function(a,c){this.text=a;this.json=c;this.tokens=this.lexer.lex(a);c&&(this.assignment=this.logicalOR,
|
||||
this.functionCall=this.fieldAccess=this.objectIndex=this.filterChain=function(){this.throwError("is not valid json",{text:a,index:0})});var d=c?this.primary():this.statements();0!==this.tokens.length&&this.throwError("is an unexpected token",this.tokens[0]);d.literal=!!d.literal;d.constant=!!d.constant;return d},primary:function(){var a;if(this.expect("("))a=this.filterChain(),this.consume(")");else if(this.expect("["))a=this.arrayDeclaration();else if(this.expect("{"))a=this.object();else{var c=
|
||||
this.expect();(a=c.fn)||this.throwError("not a primary expression",c);c.json&&(a.constant=!0,a.literal=!0)}for(var d;c=this.expect("(","[",".");)"("===c.text?(a=this.functionCall(a,d),d=null):"["===c.text?(d=a,a=this.objectIndex(a)):"."===c.text?(d=a,a=this.fieldAccess(a)):this.throwError("IMPOSSIBLE");return a},throwError:function(a,c){throw ya("syntax",c.text,a,c.index+1,this.text,this.text.substring(c.index));},peekToken:function(){if(0===this.tokens.length)throw ya("ueoe",this.text);return this.tokens[0]},
|
||||
peek:function(a,c,d,e){if(0<this.tokens.length){var g=this.tokens[0],f=g.text;if(f===a||f===c||f===d||f===e||!(a||c||d||e))return g}return!1},expect:function(a,c,d,e){return(a=this.peek(a,c,d,e))?(this.json&&!a.json&&this.throwError("is not valid json",a),this.tokens.shift(),a):!1},consume:function(a){this.expect(a)||this.throwError("is unexpected, expecting ["+a+"]",this.peek())},unaryFn:function(a,c){return t(function(d,e){return a(d,e,c)},{constant:c.constant})},ternaryFn:function(a,c,d){return t(function(e,
|
||||
g){return a(e,g)?c(e,g):d(e,g)},{constant:a.constant&&c.constant&&d.constant})},binaryFn:function(a,c,d){return t(function(e,g){return c(e,g,a,d)},{constant:a.constant&&d.constant})},statements:function(){for(var a=[];;)if(0<this.tokens.length&&!this.peek("}",")",";","]")&&a.push(this.filterChain()),!this.expect(";"))return 1===a.length?a[0]:function(c,d){for(var e,g=0;g<a.length;g++){var f=a[g];f&&(e=f(c,d))}return e}},filterChain:function(){for(var a=this.expression(),c;;)if(c=this.expect("|"))a=
|
||||
this.binaryFn(a,c.fn,this.filter());else return a},filter:function(){for(var a=this.expect(),c=this.$filter(a.text),d=[];;)if(a=this.expect(":"))d.push(this.expression());else{var e=function(a,e,h){h=[h];for(var m=0;m<d.length;m++)h.push(d[m](a,e));return c.apply(a,h)};return function(){return e}}},expression:function(){return this.assignment()},assignment:function(){var a=this.ternary(),c,d;return(d=this.expect("="))?(a.assign||this.throwError("implies assignment but ["+this.text.substring(0,d.index)+
|
||||
"] can not be assigned to",d),c=this.ternary(),function(d,g){return a.assign(d,c(d,g),g)}):a},ternary:function(){var a=this.logicalOR(),c,d;if(this.expect("?")){c=this.ternary();if(d=this.expect(":"))return this.ternaryFn(a,c,this.ternary());this.throwError("expected :",d)}else return a},logicalOR:function(){for(var a=this.logicalAND(),c;;)if(c=this.expect("||"))a=this.binaryFn(a,c.fn,this.logicalAND());else return a},logicalAND:function(){var a=this.equality(),c;if(c=this.expect("&&"))a=this.binaryFn(a,
|
||||
c.fn,this.logicalAND());return a},equality:function(){var a=this.relational(),c;if(c=this.expect("==","!=","===","!=="))a=this.binaryFn(a,c.fn,this.equality());return a},relational:function(){var a=this.additive(),c;if(c=this.expect("<",">","<=",">="))a=this.binaryFn(a,c.fn,this.relational());return a},additive:function(){for(var a=this.multiplicative(),c;c=this.expect("+","-");)a=this.binaryFn(a,c.fn,this.multiplicative());return a},multiplicative:function(){for(var a=this.unary(),c;c=this.expect("*",
|
||||
"/","%");)a=this.binaryFn(a,c.fn,this.unary());return a},unary:function(){var a;return this.expect("+")?this.primary():(a=this.expect("-"))?this.binaryFn(Xa.ZERO,a.fn,this.unary()):(a=this.expect("!"))?this.unaryFn(a.fn,this.unary()):this.primary()},fieldAccess:function(a){var c=this,d=this.expect().text,e=wc(d,this.options,this.text);return t(function(c,d,h){return e(h||a(c,d),d)},{assign:function(e,f,h){return jb(a(e,h),d,f,c.text,c.options)}})},objectIndex:function(a){var c=this,d=this.expression();
|
||||
this.consume("]");return t(function(e,g){var f=a(e,g),h=d(e,g),m;if(!f)return r;(f=Wa(f[h],c.text))&&(f.then&&c.options.unwrapPromises)&&(m=f,"$$v"in f||(m.$$v=r,m.then(function(a){m.$$v=a})),f=f.$$v);return f},{assign:function(e,g,f){var h=d(e,f);return Wa(a(e,f),c.text)[h]=g}})},functionCall:function(a,c){var d=[];if(")"!==this.peekToken().text){do d.push(this.expression());while(this.expect(","))}this.consume(")");var e=this;return function(g,f){for(var h=[],m=c?c(g,f):g,k=0;k<d.length;k++)h.push(d[k](g,
|
||||
f));k=a(g,f,m)||w;Wa(m,e.text);Wa(k,e.text);h=k.apply?k.apply(m,h):k(h[0],h[1],h[2],h[3],h[4]);return Wa(h,e.text)}},arrayDeclaration:function(){var a=[],c=!0;if("]"!==this.peekToken().text){do{var d=this.expression();a.push(d);d.constant||(c=!1)}while(this.expect(","))}this.consume("]");return t(function(c,d){for(var f=[],h=0;h<a.length;h++)f.push(a[h](c,d));return f},{literal:!0,constant:c})},object:function(){var a=[],c=!0;if("}"!==this.peekToken().text){do{var d=this.expect(),d=d.string||d.text;
|
||||
this.consume(":");var e=this.expression();a.push({key:d,value:e});e.constant||(c=!1)}while(this.expect(","))}this.consume("}");return t(function(c,d){for(var e={},m=0;m<a.length;m++){var k=a[m];e[k.key]=k.value(c,d)}return e},{literal:!0,constant:c})}};var Jb={},sa=F("$sce"),ea={HTML:"html",CSS:"css",URL:"url",RESOURCE_URL:"resourceUrl",JS:"js"},Y=Q.createElement("a"),zc=qa(Z.location.href,!0);Ac.$inject=["$provide"];Bc.$inject=["$locale"];Dc.$inject=["$locale"];var Gc=".",Pd={yyyy:W("FullYear",4),
|
||||
yy:W("FullYear",2,0,!0),y:W("FullYear",1),MMMM:kb("Month"),MMM:kb("Month",!0),MM:W("Month",2,1),M:W("Month",1,1),dd:W("Date",2),d:W("Date",1),HH:W("Hours",2),H:W("Hours",1),hh:W("Hours",2,-12),h:W("Hours",1,-12),mm:W("Minutes",2),m:W("Minutes",1),ss:W("Seconds",2),s:W("Seconds",1),sss:W("Milliseconds",3),EEEE:kb("Day"),EEE:kb("Day",!0),a:function(a,c){return 12>a.getHours()?c.AMPMS[0]:c.AMPMS[1]},Z:function(a){a=-1*a.getTimezoneOffset();return a=(0<=a?"+":"")+(Lb(Math[0<a?"floor":"ceil"](a/60),2)+
|
||||
Lb(Math.abs(a%60),2))}},Od=/((?:[^yMdHhmsaZE']+)|(?:'(?:[^']|'')*')|(?:E+|y+|M+|d+|H+|h+|m+|s+|a|Z))(.*)/,Nd=/^\-?\d+$/;Cc.$inject=["$locale"];var Ld=$(x),Md=$(Ga);Ec.$inject=["$parse"];var Wd=$({restrict:"E",compile:function(a,c){8>=M&&(c.href||c.name||c.$set("href",""),a.append(Q.createComment("IE fix")));if(!c.href&&!c.name)return function(a,c){c.on("click",function(a){c.attr("href")||a.preventDefault()})}}}),Nb={};q(fb,function(a,c){if("multiple"!=a){var d=ma("ng-"+c);Nb[d]=function(){return{priority:100,
|
||||
compile:function(){return function(a,g,f){a.$watch(f[d],function(a){f.$set(c,!!a)})}}}}}});q(["src","srcset","href"],function(a){var c=ma("ng-"+a);Nb[c]=function(){return{priority:99,link:function(d,e,g){g.$observe(c,function(c){c&&(g.$set(a,c),M&&e.prop(a,g[a]))})}}}});var nb={$addControl:w,$removeControl:w,$setValidity:w,$setDirty:w,$setPristine:w};Hc.$inject=["$element","$attrs","$scope"];var Jc=function(a){return["$timeout",function(c){return{name:"form",restrict:a?"EAC":"E",controller:Hc,compile:function(){return{pre:function(a,
|
||||
e,g,f){if(!g.action){var h=function(a){a.preventDefault?a.preventDefault():a.returnValue=!1};Ic(e[0],"submit",h);e.on("$destroy",function(){c(function(){Ab(e[0],"submit",h)},0,!1)})}var m=e.parent().controller("form"),k=g.name||g.ngForm;k&&jb(a,k,f,k);if(m)e.on("$destroy",function(){m.$removeControl(f);k&&jb(a,k,r,k);t(f,nb)})}}}}}]},Xd=Jc(),Yd=Jc(!0),Zd=/^(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?$/,$d=/^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}$/,ae=
|
||||
/^\s*(\-|\+)?(\d+|(\d*(\.\d*)))\s*$/,Kc={text:pb,number:function(a,c,d,e,g,f){pb(a,c,d,e,g,f);e.$parsers.push(function(a){var c=e.$isEmpty(a);if(c||ae.test(a))return e.$setValidity("number",!0),""===a?null:c?a:parseFloat(a);e.$setValidity("number",!1);return r});e.$formatters.push(function(a){return e.$isEmpty(a)?"":""+a});d.min&&(a=function(a){var c=parseFloat(d.min);if(!e.$isEmpty(a)&&a<c)return e.$setValidity("min",!1),r;e.$setValidity("min",!0);return a},e.$parsers.push(a),e.$formatters.push(a));
|
||||
d.max&&(a=function(a){var c=parseFloat(d.max);if(!e.$isEmpty(a)&&a>c)return e.$setValidity("max",!1),r;e.$setValidity("max",!0);return a},e.$parsers.push(a),e.$formatters.push(a));e.$formatters.push(function(a){if(e.$isEmpty(a)||rb(a))return e.$setValidity("number",!0),a;e.$setValidity("number",!1);return r})},url:function(a,c,d,e,g,f){pb(a,c,d,e,g,f);a=function(a){if(e.$isEmpty(a)||Zd.test(a))return e.$setValidity("url",!0),a;e.$setValidity("url",!1);return r};e.$formatters.push(a);e.$parsers.push(a)},
|
||||
email:function(a,c,d,e,g,f){pb(a,c,d,e,g,f);a=function(a){if(e.$isEmpty(a)||$d.test(a))return e.$setValidity("email",!0),a;e.$setValidity("email",!1);return r};e.$formatters.push(a);e.$parsers.push(a)},radio:function(a,c,d,e){z(d.name)&&c.attr("name",Ya());c.on("click",function(){c[0].checked&&a.$apply(function(){e.$setViewValue(d.value)})});e.$render=function(){c[0].checked=d.value==e.$viewValue};d.$observe("value",e.$render)},checkbox:function(a,c,d,e){var g=d.ngTrueValue,f=d.ngFalseValue;D(g)||
|
||||
(g=!0);D(f)||(f=!1);c.on("click",function(){a.$apply(function(){e.$setViewValue(c[0].checked)})});e.$render=function(){c[0].checked=e.$viewValue};e.$isEmpty=function(a){return a!==g};e.$formatters.push(function(a){return a===g});e.$parsers.push(function(a){return a?g:f})},hidden:w,button:w,submit:w,reset:w},Lc=["$browser","$sniffer",function(a,c){return{restrict:"E",require:"?ngModel",link:function(d,e,g,f){f&&(Kc[x(g.type)]||Kc.text)(d,e,g,f,c,a)}}}],mb="ng-valid",lb="ng-invalid",Ha="ng-pristine",
|
||||
ob="ng-dirty",be=["$scope","$exceptionHandler","$attrs","$element","$parse",function(a,c,d,e,g){function f(a,c){c=c?"-"+cb(c,"-"):"";e.removeClass((a?lb:mb)+c).addClass((a?mb:lb)+c)}this.$modelValue=this.$viewValue=Number.NaN;this.$parsers=[];this.$formatters=[];this.$viewChangeListeners=[];this.$pristine=!0;this.$dirty=!1;this.$valid=!0;this.$invalid=!1;this.$name=d.name;var h=g(d.ngModel),m=h.assign;if(!m)throw F("ngModel")("nonassign",d.ngModel,ga(e));this.$render=w;this.$isEmpty=function(a){return z(a)||
|
||||
""===a||null===a||a!==a};var k=e.inheritedData("$formController")||nb,l=0,n=this.$error={};e.addClass(Ha);f(!0);this.$setValidity=function(a,c){n[a]!==!c&&(c?(n[a]&&l--,l||(f(!0),this.$valid=!0,this.$invalid=!1)):(f(!1),this.$invalid=!0,this.$valid=!1,l++),n[a]=!c,f(c,a),k.$setValidity(a,c,this))};this.$setPristine=function(){this.$dirty=!1;this.$pristine=!0;e.removeClass(ob).addClass(Ha)};this.$setViewValue=function(d){this.$viewValue=d;this.$pristine&&(this.$dirty=!0,this.$pristine=!1,e.removeClass(Ha).addClass(ob),
|
||||
k.$setDirty());q(this.$parsers,function(a){d=a(d)});this.$modelValue!==d&&(this.$modelValue=d,m(a,d),q(this.$viewChangeListeners,function(a){try{a()}catch(d){c(d)}}))};var p=this;a.$watch(function(){var c=h(a);if(p.$modelValue!==c){var d=p.$formatters,e=d.length;for(p.$modelValue=c;e--;)c=d[e](c);p.$viewValue!==c&&(p.$viewValue=c,p.$render())}return c})}],ce=function(){return{require:["ngModel","^?form"],controller:be,link:function(a,c,d,e){var g=e[0],f=e[1]||nb;f.$addControl(g);a.$on("$destroy",
|
||||
function(){f.$removeControl(g)})}}},de=$({require:"ngModel",link:function(a,c,d,e){e.$viewChangeListeners.push(function(){a.$eval(d.ngChange)})}}),Mc=function(){return{require:"?ngModel",link:function(a,c,d,e){if(e){d.required=!0;var g=function(a){if(d.required&&e.$isEmpty(a))e.$setValidity("required",!1);else return e.$setValidity("required",!0),a};e.$formatters.push(g);e.$parsers.unshift(g);d.$observe("required",function(){g(e.$viewValue)})}}}},ee=function(){return{require:"ngModel",link:function(a,
|
||||
c,d,e){var g=(a=/\/(.*)\//.exec(d.ngList))&&RegExp(a[1])||d.ngList||",";e.$parsers.push(function(a){if(!z(a)){var c=[];a&&q(a.split(g),function(a){a&&c.push(aa(a))});return c}});e.$formatters.push(function(a){return K(a)?a.join(", "):r});e.$isEmpty=function(a){return!a||!a.length}}}},fe=/^(true|false|\d+)$/,ge=function(){return{priority:100,compile:function(a,c){return fe.test(c.ngValue)?function(a,c,g){g.$set("value",a.$eval(g.ngValue))}:function(a,c,g){a.$watch(g.ngValue,function(a){g.$set("value",
|
||||
a)})}}}},he=ta(function(a,c,d){c.addClass("ng-binding").data("$binding",d.ngBind);a.$watch(d.ngBind,function(a){c.text(a==r?"":a)})}),ie=["$interpolate",function(a){return function(c,d,e){c=a(d.attr(e.$attr.ngBindTemplate));d.addClass("ng-binding").data("$binding",c);e.$observe("ngBindTemplate",function(a){d.text(a)})}}],je=["$sce","$parse",function(a,c){return function(d,e,g){e.addClass("ng-binding").data("$binding",g.ngBindHtml);var f=c(g.ngBindHtml);d.$watch(function(){return(f(d)||"").toString()},
|
||||
function(c){e.html(a.getTrustedHtml(f(d))||"")})}}],ke=Mb("",!0),le=Mb("Odd",0),me=Mb("Even",1),ne=ta({compile:function(a,c){c.$set("ngCloak",r);a.removeClass("ng-cloak")}}),oe=[function(){return{scope:!0,controller:"@",priority:500}}],Nc={};q("click dblclick mousedown mouseup mouseover mouseout mousemove mouseenter mouseleave keydown keyup keypress submit focus blur copy cut paste".split(" "),function(a){var c=ma("ng-"+a);Nc[c]=["$parse",function(d){return{compile:function(e,g){var f=d(g[c]);return function(c,
|
||||
d,e){d.on(x(a),function(a){c.$apply(function(){f(c,{$event:a})})})}}}}]});var pe=["$animate",function(a){return{transclude:"element",priority:600,terminal:!0,restrict:"A",$$tlb:!0,link:function(c,d,e,g,f){var h,m;c.$watch(e.ngIf,function(g){Ma(g)?m||(m=c.$new(),f(m,function(c){c[c.length++]=Q.createComment(" end ngIf: "+e.ngIf+" ");h={clone:c};a.enter(c,d.parent(),d)})):(m&&(m.$destroy(),m=null),h&&(a.leave(vb(h.clone)),h=null))})}}}],qe=["$http","$templateCache","$anchorScroll","$animate","$sce",
|
||||
function(a,c,d,e,g){return{restrict:"ECA",priority:400,terminal:!0,transclude:"element",controller:Na.noop,compile:function(f,h){var m=h.ngInclude||h.src,k=h.onload||"",l=h.autoscroll;return function(f,h,q,r,y){var A=0,u,t,H=function(){u&&(u.$destroy(),u=null);t&&(e.leave(t),t=null)};f.$watch(g.parseAsResourceUrl(m),function(g){var m=function(){!B(l)||l&&!f.$eval(l)||d()},q=++A;g?(a.get(g,{cache:c}).success(function(a){if(q===A){var c=f.$new();r.template=a;a=y(c,function(a){H();e.enter(a,null,h,m)});
|
||||
u=c;t=a;u.$emit("$includeContentLoaded");f.$eval(k)}}).error(function(){q===A&&H()}),f.$emit("$includeContentRequested")):(H(),r.template=null)})}}}}],re=["$compile",function(a){return{restrict:"ECA",priority:-400,require:"ngInclude",link:function(c,d,e,g){d.html(g.template);a(d.contents())(c)}}}],se=ta({priority:450,compile:function(){return{pre:function(a,c,d){a.$eval(d.ngInit)}}}}),te=ta({terminal:!0,priority:1E3}),ue=["$locale","$interpolate",function(a,c){var d=/{}/g;return{restrict:"EA",link:function(e,
|
||||
g,f){var h=f.count,m=f.$attr.when&&g.attr(f.$attr.when),k=f.offset||0,l=e.$eval(m)||{},n={},p=c.startSymbol(),s=c.endSymbol(),r=/^when(Minus)?(.+)$/;q(f,function(a,c){r.test(c)&&(l[x(c.replace("when","").replace("Minus","-"))]=g.attr(f.$attr[c]))});q(l,function(a,e){n[e]=c(a.replace(d,p+h+"-"+k+s))});e.$watch(function(){var c=parseFloat(e.$eval(h));if(isNaN(c))return"";c in l||(c=a.pluralCat(c-k));return n[c](e,g,!0)},function(a){g.text(a)})}}}],ve=["$parse","$animate",function(a,c){var d=F("ngRepeat");
|
||||
return{transclude:"element",priority:1E3,terminal:!0,$$tlb:!0,link:function(e,g,f,h,m){var k=f.ngRepeat,l=k.match(/^\s*([\s\S]+?)\s+in\s+([\s\S]+?)(?:\s+track\s+by\s+([\s\S]+?))?\s*$/),n,p,s,r,y,t,u={$id:Da};if(!l)throw d("iexp",k);f=l[1];h=l[2];(l=l[3])?(n=a(l),p=function(a,c,d){t&&(u[t]=a);u[y]=c;u.$index=d;return n(e,u)}):(s=function(a,c){return Da(c)},r=function(a){return a});l=f.match(/^(?:([\$\w]+)|\(([\$\w]+)\s*,\s*([\$\w]+)\))$/);if(!l)throw d("iidexp",f);y=l[3]||l[1];t=l[2];var B={};e.$watchCollection(h,
|
||||
function(a){var f,h,l=g[0],n,u={},z,P,D,x,T,w,F=[];if(qb(a))T=a,n=p||s;else{n=p||r;T=[];for(D in a)a.hasOwnProperty(D)&&"$"!=D.charAt(0)&&T.push(D);T.sort()}z=T.length;h=F.length=T.length;for(f=0;f<h;f++)if(D=a===T?f:T[f],x=a[D],x=n(D,x,f),xa(x,"`track by` id"),B.hasOwnProperty(x))w=B[x],delete B[x],u[x]=w,F[f]=w;else{if(u.hasOwnProperty(x))throw q(F,function(a){a&&a.scope&&(B[a.id]=a)}),d("dupes",k,x);F[f]={id:x};u[x]=!1}for(D in B)B.hasOwnProperty(D)&&(w=B[D],f=vb(w.clone),c.leave(f),q(f,function(a){a.$$NG_REMOVED=
|
||||
!0}),w.scope.$destroy());f=0;for(h=T.length;f<h;f++){D=a===T?f:T[f];x=a[D];w=F[f];F[f-1]&&(l=F[f-1].clone[F[f-1].clone.length-1]);if(w.scope){P=w.scope;n=l;do n=n.nextSibling;while(n&&n.$$NG_REMOVED);w.clone[0]!=n&&c.move(vb(w.clone),null,A(l));l=w.clone[w.clone.length-1]}else P=e.$new();P[y]=x;t&&(P[t]=D);P.$index=f;P.$first=0===f;P.$last=f===z-1;P.$middle=!(P.$first||P.$last);P.$odd=!(P.$even=0===(f&1));w.scope||m(P,function(a){a[a.length++]=Q.createComment(" end ngRepeat: "+k+" ");c.enter(a,null,
|
||||
A(l));l=a;w.scope=P;w.clone=a;u[w.id]=w})}B=u})}}}],we=["$animate",function(a){return function(c,d,e){c.$watch(e.ngShow,function(c){a[Ma(c)?"removeClass":"addClass"](d,"ng-hide")})}}],xe=["$animate",function(a){return function(c,d,e){c.$watch(e.ngHide,function(c){a[Ma(c)?"addClass":"removeClass"](d,"ng-hide")})}}],ye=ta(function(a,c,d){a.$watch(d.ngStyle,function(a,d){d&&a!==d&&q(d,function(a,d){c.css(d,"")});a&&c.css(a)},!0)}),ze=["$animate",function(a){return{restrict:"EA",require:"ngSwitch",controller:["$scope",
|
||||
function(){this.cases={}}],link:function(c,d,e,g){var f,h,m=[];c.$watch(e.ngSwitch||e.on,function(d){for(var l=0,n=m.length;l<n;l++)m[l].$destroy(),a.leave(h[l]);h=[];m=[];if(f=g.cases["!"+d]||g.cases["?"])c.$eval(e.change),q(f,function(d){var e=c.$new();m.push(e);d.transclude(e,function(c){var e=d.element;h.push(c);a.enter(c,e.parent(),e)})})})}}}],Ae=ta({transclude:"element",priority:800,require:"^ngSwitch",compile:function(a,c){return function(a,e,g,f,h){f.cases["!"+c.ngSwitchWhen]=f.cases["!"+
|
||||
c.ngSwitchWhen]||[];f.cases["!"+c.ngSwitchWhen].push({transclude:h,element:e})}}}),Be=ta({transclude:"element",priority:800,require:"^ngSwitch",link:function(a,c,d,e,g){e.cases["?"]=e.cases["?"]||[];e.cases["?"].push({transclude:g,element:c})}}),Ce=ta({controller:["$element","$transclude",function(a,c){if(!c)throw F("ngTransclude")("orphan",ga(a));this.$transclude=c}],link:function(a,c,d,e){e.$transclude(function(a){c.empty();c.append(a)})}}),De=["$templateCache",function(a){return{restrict:"E",terminal:!0,
|
||||
compile:function(c,d){"text/ng-template"==d.type&&a.put(d.id,c[0].text)}}}],Ee=F("ngOptions"),Fe=$({terminal:!0}),Ge=["$compile","$parse",function(a,c){var d=/^\s*(.*?)(?:\s+as\s+(.*?))?(?:\s+group\s+by\s+(.*))?\s+for\s+(?:([\$\w][\$\w]*)|(?:\(\s*([\$\w][\$\w]*)\s*,\s*([\$\w][\$\w]*)\s*\)))\s+in\s+(.*?)(?:\s+track\s+by\s+(.*?))?$/,e={$setViewValue:w};return{restrict:"E",require:["select","?ngModel"],controller:["$element","$scope","$attrs",function(a,c,d){var m=this,k={},l=e,n;m.databound=d.ngModel;
|
||||
m.init=function(a,c,d){l=a;n=d};m.addOption=function(c){xa(c,'"option value"');k[c]=!0;l.$viewValue==c&&(a.val(c),n.parent()&&n.remove())};m.removeOption=function(a){this.hasOption(a)&&(delete k[a],l.$viewValue==a&&this.renderUnknownOption(a))};m.renderUnknownOption=function(c){c="? "+Da(c)+" ?";n.val(c);a.prepend(n);a.val(c);n.prop("selected",!0)};m.hasOption=function(a){return k.hasOwnProperty(a)};c.$on("$destroy",function(){m.renderUnknownOption=w})}],link:function(e,f,h,m){function k(a,c,d,e){d.$render=
|
||||
function(){var a=d.$viewValue;e.hasOption(a)?(x.parent()&&x.remove(),c.val(a),""===a&&w.prop("selected",!0)):z(a)&&w?c.val(""):e.renderUnknownOption(a)};c.on("change",function(){a.$apply(function(){x.parent()&&x.remove();d.$setViewValue(c.val())})})}function l(a,c,d){var e;d.$render=function(){var a=new Ra(d.$viewValue);q(c.find("option"),function(c){c.selected=B(a.get(c.value))})};a.$watch(function(){ua(e,d.$viewValue)||(e=fa(d.$viewValue),d.$render())});c.on("change",function(){a.$apply(function(){var a=
|
||||
[];q(c.find("option"),function(c){c.selected&&a.push(c.value)});d.$setViewValue(a)})})}function n(e,f,g){function h(){var a={"":[]},c=[""],d,k,r,t,v;t=g.$modelValue;v=A(e)||[];var C=n?Ob(v):v,F,I,z;I={};r=!1;var E,H;if(s)if(w&&K(t))for(r=new Ra([]),z=0;z<t.length;z++)I[l]=t[z],r.put(w(e,I),t[z]);else r=new Ra(t);for(z=0;F=C.length,z<F;z++){k=z;if(n){k=C[z];if("$"===k.charAt(0))continue;I[n]=k}I[l]=v[k];d=p(e,I)||"";(k=a[d])||(k=a[d]=[],c.push(d));s?d=B(r.remove(w?w(e,I):q(e,I))):(w?(d={},d[l]=t,d=
|
||||
w(e,d)===w(e,I)):d=t===q(e,I),r=r||d);E=m(e,I);E=B(E)?E:"";k.push({id:w?w(e,I):n?C[z]:z,label:E,selected:d})}s||(y||null===t?a[""].unshift({id:"",label:"",selected:!r}):r||a[""].unshift({id:"?",label:"",selected:!0}));I=0;for(C=c.length;I<C;I++){d=c[I];k=a[d];x.length<=I?(t={element:D.clone().attr("label",d),label:k.label},v=[t],x.push(v),f.append(t.element)):(v=x[I],t=v[0],t.label!=d&&t.element.attr("label",t.label=d));E=null;z=0;for(F=k.length;z<F;z++)r=k[z],(d=v[z+1])?(E=d.element,d.label!==r.label&&
|
||||
E.text(d.label=r.label),d.id!==r.id&&E.val(d.id=r.id),E[0].selected!==r.selected&&E.prop("selected",d.selected=r.selected)):(""===r.id&&y?H=y:(H=u.clone()).val(r.id).attr("selected",r.selected).text(r.label),v.push({element:H,label:r.label,id:r.id,selected:r.selected}),E?E.after(H):t.element.append(H),E=H);for(z++;v.length>z;)v.pop().element.remove()}for(;x.length>I;)x.pop()[0].element.remove()}var k;if(!(k=t.match(d)))throw Ee("iexp",t,ga(f));var m=c(k[2]||k[1]),l=k[4]||k[6],n=k[5],p=c(k[3]||""),
|
||||
q=c(k[2]?k[1]:l),A=c(k[7]),w=k[8]?c(k[8]):null,x=[[{element:f,label:""}]];y&&(a(y)(e),y.removeClass("ng-scope"),y.remove());f.empty();f.on("change",function(){e.$apply(function(){var a,c=A(e)||[],d={},h,k,m,p,t,u,v;if(s)for(k=[],p=0,u=x.length;p<u;p++)for(a=x[p],m=1,t=a.length;m<t;m++){if((h=a[m].element)[0].selected){h=h.val();n&&(d[n]=h);if(w)for(v=0;v<c.length&&(d[l]=c[v],w(e,d)!=h);v++);else d[l]=c[h];k.push(q(e,d))}}else if(h=f.val(),"?"==h)k=r;else if(""===h)k=null;else if(w)for(v=0;v<c.length;v++){if(d[l]=
|
||||
c[v],w(e,d)==h){k=q(e,d);break}}else d[l]=c[h],n&&(d[n]=h),k=q(e,d);g.$setViewValue(k)})});g.$render=h;e.$watch(h)}if(m[1]){var p=m[0];m=m[1];var s=h.multiple,t=h.ngOptions,y=!1,w,u=A(Q.createElement("option")),D=A(Q.createElement("optgroup")),x=u.clone();h=0;for(var v=f.children(),F=v.length;h<F;h++)if(""===v[h].value){w=y=v.eq(h);break}p.init(m,y,x);s&&(m.$isEmpty=function(a){return!a||0===a.length});t?n(e,f,m):s?l(e,f,m):k(e,f,m,p)}}}}],He=["$interpolate",function(a){var c={addOption:w,removeOption:w};
|
||||
return{restrict:"E",priority:100,compile:function(d,e){if(z(e.value)){var g=a(d.text(),!0);g||e.$set("value",d.text())}return function(a,d,e){var k=d.parent(),l=k.data("$selectController")||k.parent().data("$selectController");l&&l.databound?d.prop("selected",!1):l=c;g?a.$watch(g,function(a,c){e.$set("value",a);a!==c&&l.removeOption(c);l.addOption(a)}):l.addOption(e.value);d.on("$destroy",function(){l.removeOption(e.value)})}}}}],Ie=$({restrict:"E",terminal:!0});(Ba=Z.jQuery)?(A=Ba,t(Ba.fn,{scope:Ea.scope,
|
||||
isolateScope:Ea.isolateScope,controller:Ea.controller,injector:Ea.injector,inheritedData:Ea.inheritedData}),wb("remove",!0,!0,!1),wb("empty",!1,!1,!1),wb("html",!1,!1,!0)):A=O;Na.element=A;(function(a){t(a,{bootstrap:Yb,copy:fa,extend:t,equals:ua,element:A,forEach:q,injector:Zb,noop:w,bind:bb,toJson:pa,fromJson:Ub,identity:Aa,isUndefined:z,isDefined:B,isString:D,isFunction:L,isObject:X,isNumber:rb,isElement:Pc,isArray:K,version:Rd,isDate:Ja,lowercase:x,uppercase:Ga,callbacks:{counter:0},$$minErr:F,
|
||||
$$csp:Tb});Ta=Uc(Z);try{Ta("ngLocale")}catch(c){Ta("ngLocale",[]).provider("$locale",rd)}Ta("ng",["ngLocale"],["$provide",function(a){a.provider({$$sanitizeUri:Bd});a.provider("$compile",ic).directive({a:Wd,input:Lc,textarea:Lc,form:Xd,script:De,select:Ge,style:Ie,option:He,ngBind:he,ngBindHtml:je,ngBindTemplate:ie,ngClass:ke,ngClassEven:me,ngClassOdd:le,ngCloak:ne,ngController:oe,ngForm:Yd,ngHide:xe,ngIf:pe,ngInclude:qe,ngInit:se,ngNonBindable:te,ngPluralize:ue,ngRepeat:ve,ngShow:we,ngStyle:ye,ngSwitch:ze,
|
||||
ngSwitchWhen:Ae,ngSwitchDefault:Be,ngOptions:Fe,ngTransclude:Ce,ngModel:ce,ngList:ee,ngChange:de,required:Mc,ngRequired:Mc,ngValue:ge}).directive({ngInclude:re}).directive(Nb).directive(Nc);a.provider({$anchorScroll:cd,$animate:Td,$browser:ed,$cacheFactory:fd,$controller:id,$document:jd,$exceptionHandler:kd,$filter:Ac,$interpolate:pd,$interval:qd,$http:ld,$httpBackend:nd,$location:td,$log:ud,$parse:xd,$rootScope:Ad,$q:yd,$sce:Ed,$sceDelegate:Dd,$sniffer:Fd,$templateCache:gd,$timeout:Gd,$window:Hd})}])})(Na);
|
||||
A(Q).ready(function(){Sc(Q,Yb)})})(window,document);!angular.$$csp()&&angular.element(document).find("head").prepend('<style type="text/css">@charset "UTF-8";[ng\\:cloak],[ng-cloak],[data-ng-cloak],[x-ng-cloak],.ng-cloak,.x-ng-cloak,.ng-hide{display:none !important;}ng\\:form{display:block;}</style>');
|
||||
//# sourceMappingURL=angular.min.js.map
|
||||
422
gui/app.js
Normal file
422
gui/app.js
Normal file
@@ -0,0 +1,422 @@
|
||||
/*jslint browser: true, continue: true, plusplus: true */
|
||||
/*global $: false, angular: false */
|
||||
|
||||
'use strict';
|
||||
|
||||
var syncthing = angular.module('syncthing', []);
|
||||
|
||||
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
var prevDate = 0,
|
||||
modelGetOK = true;
|
||||
|
||||
$scope.connections = {};
|
||||
$scope.config = {};
|
||||
$scope.myID = '';
|
||||
$scope.nodes = [];
|
||||
$scope.configInSync = true;
|
||||
$scope.errors = [];
|
||||
$scope.seenError = '';
|
||||
|
||||
// Strings before bools look better
|
||||
$scope.settings = [
|
||||
{id: 'ListenStr', descr: 'Sync Protocol Listen Addresses', type: 'text', restart: true},
|
||||
{id: 'GUIAddress', descr: 'GUI Listen Address', type: 'text', restart: true},
|
||||
{id: 'MaxSendKbps', descr: 'Outgoing Rate Limit (KBps)', type: 'number', restart: true},
|
||||
{id: 'RescanIntervalS', descr: 'Rescan Interval (s)', type: 'number', restart: true},
|
||||
{id: 'ReconnectIntervalS', descr: 'Reconnect Interval (s)', type: 'number', restart: true},
|
||||
{id: 'ParallelRequests', descr: 'Max Outstanding Requests', type: 'number', restart: true},
|
||||
{id: 'MaxChangeKbps', descr: 'Max File Change Rate (KBps)', type: 'number', restart: true},
|
||||
|
||||
{id: 'ReadOnly', descr: 'Read Only', type: 'bool', restart: true},
|
||||
{id: 'AllowDelete', descr: 'Allow Delete', type: 'bool', restart: true},
|
||||
{id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true},
|
||||
{id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
|
||||
{id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
|
||||
];
|
||||
|
||||
function modelGetSucceeded() {
|
||||
if (!modelGetOK) {
|
||||
$('#networkError').modal('hide');
|
||||
modelGetOK = true;
|
||||
}
|
||||
}
|
||||
|
||||
function modelGetFailed() {
|
||||
if (modelGetOK) {
|
||||
$('#networkError').modal({backdrop: 'static', keyboard: false});
|
||||
modelGetOK = false;
|
||||
}
|
||||
}
|
||||
|
||||
function nodeCompare(a, b) {
|
||||
if (a.NodeID === $scope.myID) {
|
||||
return -1;
|
||||
}
|
||||
if (b.NodeID === $scope.myID) {
|
||||
return 1;
|
||||
}
|
||||
if (a.NodeID < b.NodeID) {
|
||||
return -1;
|
||||
}
|
||||
return a.NodeID > b.NodeID;
|
||||
}
|
||||
|
||||
$http.get('/rest/version').success(function (data) {
|
||||
$scope.version = data;
|
||||
});
|
||||
$http.get('/rest/system').success(function (data) {
|
||||
$scope.system = data;
|
||||
$scope.myID = data.myID;
|
||||
|
||||
$http.get('/rest/config').success(function (data) {
|
||||
$scope.config = data;
|
||||
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
|
||||
|
||||
var nodes = $scope.config.Repositories[0].Nodes;
|
||||
nodes.sort(nodeCompare);
|
||||
$scope.nodes = nodes;
|
||||
});
|
||||
$http.get('/rest/config/sync').success(function (data) {
|
||||
$scope.configInSync = data.configInSync;
|
||||
});
|
||||
});
|
||||
|
||||
$scope.refresh = function () {
|
||||
$http.get('/rest/system').success(function (data) {
|
||||
$scope.system = data;
|
||||
});
|
||||
$http.get('/rest/model').success(function (data) {
|
||||
$scope.model = data;
|
||||
modelGetSucceeded();
|
||||
}).error(function () {
|
||||
modelGetFailed();
|
||||
});
|
||||
$http.get('/rest/connections').success(function (data) {
|
||||
var now = Date.now(),
|
||||
td = (now - prevDate) / 1000,
|
||||
id;
|
||||
|
||||
prevDate = now;
|
||||
$scope.inbps = 0;
|
||||
$scope.outbps = 0;
|
||||
|
||||
for (id in data) {
|
||||
if (!data.hasOwnProperty(id)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
data[id].inbps = Math.max(0, 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td);
|
||||
data[id].outbps = Math.max(0, 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td);
|
||||
} catch (e) {
|
||||
data[id].inbps = 0;
|
||||
data[id].outbps = 0;
|
||||
}
|
||||
$scope.inbps += data[id].inbps;
|
||||
$scope.outbps += data[id].outbps;
|
||||
}
|
||||
$scope.connections = data;
|
||||
});
|
||||
$http.get('/rest/need').success(function (data) {
|
||||
var i, name;
|
||||
for (i = 0; i < data.length; i++) {
|
||||
name = data[i].Name.split('/');
|
||||
data[i].ShortName = name[name.length - 1];
|
||||
}
|
||||
data.sort(function (a, b) {
|
||||
if (a.ShortName < b.ShortName) {
|
||||
return -1;
|
||||
}
|
||||
if (a.ShortName > b.ShortName) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
});
|
||||
$scope.need = data;
|
||||
});
|
||||
$http.get('/rest/errors').success(function (data) {
|
||||
$scope.errors = data;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.nodeStatus = function (nodeCfg) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
if (conn.Completion === 100) {
|
||||
return 'In Sync';
|
||||
} else {
|
||||
return 'Syncing (' + conn.Completion + '%)';
|
||||
}
|
||||
}
|
||||
|
||||
return 'Disconnected';
|
||||
};
|
||||
|
||||
$scope.nodeIcon = function (nodeCfg) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
if (conn.Completion === 100) {
|
||||
return 'ok';
|
||||
} else {
|
||||
return 'refresh';
|
||||
}
|
||||
}
|
||||
|
||||
return 'minus';
|
||||
};
|
||||
|
||||
$scope.nodeClass = function (nodeCfg) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
if (conn.Completion === 100) {
|
||||
return 'success';
|
||||
} else {
|
||||
return 'primary';
|
||||
}
|
||||
}
|
||||
|
||||
return 'info';
|
||||
};
|
||||
|
||||
$scope.nodeAddr = function (nodeCfg) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
return conn.Address;
|
||||
}
|
||||
return '(unknown address)';
|
||||
};
|
||||
|
||||
$scope.nodeCompletion = function (nodeCfg) {
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
return conn.Completion + '%';
|
||||
}
|
||||
return '';
|
||||
};
|
||||
|
||||
$scope.nodeVer = function (nodeCfg) {
|
||||
if (nodeCfg.NodeID === $scope.myID) {
|
||||
return $scope.version;
|
||||
}
|
||||
var conn = $scope.connections[nodeCfg.NodeID];
|
||||
if (conn) {
|
||||
return conn.ClientVersion;
|
||||
}
|
||||
return '(unknown version)';
|
||||
};
|
||||
|
||||
$scope.nodeName = function (nodeCfg) {
|
||||
if (nodeCfg.Name) {
|
||||
return nodeCfg.Name;
|
||||
}
|
||||
return nodeCfg.NodeID.substr(0, 6);
|
||||
};
|
||||
|
||||
$scope.saveSettings = function () {
|
||||
$scope.configInSync = false;
|
||||
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
|
||||
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
$('#settingsTable').collapse('hide');
|
||||
};
|
||||
|
||||
$scope.restart = function () {
|
||||
$http.post('/rest/restart');
|
||||
$scope.configInSync = true;
|
||||
};
|
||||
|
||||
$scope.editNode = function (nodeCfg) {
|
||||
$scope.currentNode = nodeCfg;
|
||||
$scope.editingExisting = true;
|
||||
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
|
||||
$('#editNode').modal({backdrop: 'static', keyboard: false});
|
||||
};
|
||||
|
||||
$scope.addNode = function () {
|
||||
$scope.currentNode = {NodeID: '', AddressesStr: 'dynamic'};
|
||||
$scope.editingExisting = false;
|
||||
$('#editNode').modal({backdrop: 'static', keyboard: false});
|
||||
};
|
||||
|
||||
$scope.deleteNode = function () {
|
||||
var newNodes = [], i;
|
||||
|
||||
$('#editNode').modal('hide');
|
||||
if (!$scope.editingExisting) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < $scope.nodes.length; i++) {
|
||||
if ($scope.nodes[i].NodeID !== $scope.currentNode.NodeID) {
|
||||
newNodes.push($scope.nodes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
$scope.nodes = newNodes;
|
||||
$scope.config.Repositories[0].Nodes = newNodes;
|
||||
|
||||
$scope.configInSync = false;
|
||||
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
};
|
||||
|
||||
$scope.saveNode = function () {
|
||||
var nodeCfg, done, i;
|
||||
|
||||
$scope.configInSync = false;
|
||||
$('#editNode').modal('hide');
|
||||
nodeCfg = $scope.currentNode;
|
||||
nodeCfg.Addresses = nodeCfg.AddressesStr.split(',').map(function (x) { return x.trim(); });
|
||||
|
||||
done = false;
|
||||
for (i = 0; i < $scope.nodes.length; i++) {
|
||||
if ($scope.nodes[i].NodeID === nodeCfg.NodeID) {
|
||||
$scope.nodes[i] = nodeCfg;
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!done) {
|
||||
$scope.nodes.push(nodeCfg);
|
||||
}
|
||||
|
||||
$scope.nodes.sort(nodeCompare);
|
||||
$scope.config.Repositories[0].Nodes = $scope.nodes;
|
||||
|
||||
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
};
|
||||
|
||||
$scope.otherNodes = function () {
|
||||
var nodes = [], i, n;
|
||||
|
||||
for (i = 0; i < $scope.nodes.length; i++) {
|
||||
n = $scope.nodes[i];
|
||||
if (n.NodeID !== $scope.myID) {
|
||||
nodes.push(n);
|
||||
}
|
||||
}
|
||||
return nodes;
|
||||
};
|
||||
|
||||
$scope.thisNode = function () {
|
||||
var i, n;
|
||||
|
||||
for (i = 0; i < $scope.nodes.length; i++) {
|
||||
n = $scope.nodes[i];
|
||||
if (n.NodeID === $scope.myID) {
|
||||
return [n];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$scope.errorList = function () {
|
||||
var errors = [];
|
||||
for (var i = 0; i < $scope.errors.length; i++) {
|
||||
var e = $scope.errors[i];
|
||||
if (e.Time > $scope.seenError) {
|
||||
errors.push(e);
|
||||
}
|
||||
}
|
||||
return errors;
|
||||
};
|
||||
|
||||
$scope.clearErrors = function () {
|
||||
$scope.seenError = $scope.errors[$scope.errors.length - 1].Time;
|
||||
};
|
||||
|
||||
$scope.friendlyNodes = function (str) {
|
||||
for (var i = 0; i < $scope.nodes.length; i++) {
|
||||
var cfg = $scope.nodes[i];
|
||||
str = str.replace(cfg.NodeID, $scope.nodeName(cfg));
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
$scope.refresh();
|
||||
setInterval($scope.refresh, 10000);
|
||||
});
|
||||
|
||||
function decimals(val, num) {
|
||||
var digits, decs;
|
||||
|
||||
if (val === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
digits = Math.floor(Math.log(Math.abs(val)) / Math.log(10));
|
||||
decs = Math.max(0, num - digits);
|
||||
return decs;
|
||||
}
|
||||
|
||||
syncthing.filter('natural', function () {
|
||||
return function (input, valid) {
|
||||
return input.toFixed(decimals(input, valid));
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('binary', function () {
|
||||
return function (input) {
|
||||
if (input === undefined) {
|
||||
return '0 ';
|
||||
}
|
||||
if (input > 1024 * 1024 * 1024) {
|
||||
input /= 1024 * 1024 * 1024;
|
||||
return input.toFixed(decimals(input, 2)) + ' Gi';
|
||||
}
|
||||
if (input > 1024 * 1024) {
|
||||
input /= 1024 * 1024;
|
||||
return input.toFixed(decimals(input, 2)) + ' Mi';
|
||||
}
|
||||
if (input > 1024) {
|
||||
input /= 1024;
|
||||
return input.toFixed(decimals(input, 2)) + ' Ki';
|
||||
}
|
||||
return Math.round(input) + ' ';
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('metric', function () {
|
||||
return function (input) {
|
||||
if (input === undefined) {
|
||||
return '0 ';
|
||||
}
|
||||
if (input > 1000 * 1000 * 1000) {
|
||||
input /= 1000 * 1000 * 1000;
|
||||
return input.toFixed(decimals(input, 2)) + ' G';
|
||||
}
|
||||
if (input > 1000 * 1000) {
|
||||
input /= 1000 * 1000;
|
||||
return input.toFixed(decimals(input, 2)) + ' M';
|
||||
}
|
||||
if (input > 1000) {
|
||||
input /= 1000;
|
||||
return input.toFixed(decimals(input, 2)) + ' k';
|
||||
}
|
||||
return Math.round(input) + ' ';
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('short', function () {
|
||||
return function (input) {
|
||||
return input.substr(0, 6);
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.filter('alwaysNumber', function () {
|
||||
return function (input) {
|
||||
if (input === undefined) {
|
||||
return 0;
|
||||
}
|
||||
return input;
|
||||
};
|
||||
});
|
||||
|
||||
syncthing.directive('optionEditor', function () {
|
||||
return {
|
||||
restrict: 'C',
|
||||
replace: true,
|
||||
transclude: true,
|
||||
scope: {
|
||||
setting: '=setting',
|
||||
},
|
||||
template: '<input type="text" ng-model="config.Options[setting.id]"></input>',
|
||||
};
|
||||
});
|
||||
7
gui/bootstrap/css/bootstrap-theme.min.css
vendored
Executable file
7
gui/bootstrap/css/bootstrap-theme.min.css
vendored
Executable file
File diff suppressed because one or more lines are too long
7
gui/bootstrap/css/bootstrap.min.css
vendored
Executable file
7
gui/bootstrap/css/bootstrap.min.css
vendored
Executable file
File diff suppressed because one or more lines are too long
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.eot
Executable file
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.eot
Executable file
Binary file not shown.
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.svg
Executable file
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.svg
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 61 KiB |
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.ttf
Executable file
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.ttf
Executable file
Binary file not shown.
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.woff
Executable file
BIN
gui/bootstrap/fonts/glyphicons-halflings-regular.woff
Executable file
Binary file not shown.
7
gui/bootstrap/js/bootstrap.min.js
vendored
Executable file
7
gui/bootstrap/js/bootstrap.min.js
vendored
Executable file
File diff suppressed because one or more lines are too long
BIN
gui/favicon.png
Normal file
BIN
gui/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.8 KiB |
280
gui/index.html
Normal file
280
gui/index.html
Normal file
@@ -0,0 +1,280 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" ng-app="syncthing">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
<link rel="shortcut icon" href="favicon.png">
|
||||
|
||||
<title>syncthing</title>
|
||||
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<style type="text/css">
|
||||
|
||||
body {
|
||||
padding-top: 70px;
|
||||
padding-bottom: 70px;
|
||||
}
|
||||
|
||||
.text-monospace {
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
.table-condensed>thead>tr>th, .table-condensed>tbody>tr>th, .table-condensed>tfoot>tr>th, .table-condensed>thead>tr>td, .table-condensed>tbody>tr>td, .table-condensed>tfoot>tr>td {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
thead tr th {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.logo {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
top: -5px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body ng-controller="SyncthingCtrl">
|
||||
<div class="navbar navbar-fixed-top navbar-default">
|
||||
<div class="container">
|
||||
<a class="navbar-brand"><img class="logo" src="st-logo-128.png" width="32" height="32"> Syncthing</a>
|
||||
<div ng-if="!configInSync">
|
||||
<form class="navbar-form navbar-right">
|
||||
<button type="button" class="btn btn-primary" ng-click="restart()">Restart Now</button>
|
||||
</form>
|
||||
<p class="navbar-text navbar-right">The configuration has been changed but not activated. Syncthing must restart to activate the new configuration.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div ng-if="errorList().length > 0" class="alert alert-warning">
|
||||
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"hh:mm:ss.sss"}}:</small> {{friendlyNodes(err.Error)}}</p>
|
||||
<button type="button" class="pull-right btn btn-warning" ng-click="clearErrors()">OK</button>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title">Cluster</h3></div>
|
||||
<table class="table table-condensed">
|
||||
<tbody>
|
||||
<!-- myself -->
|
||||
<tr class="text-muted" ng-repeat="nodeCfg in thisNode()">
|
||||
<td style="width:12%">
|
||||
<span class="label label-default">
|
||||
<span class="glyphicon glyphicon-ok"></span> This node
|
||||
</span>
|
||||
</td>
|
||||
<td style="width:10%">
|
||||
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>
|
||||
</td>
|
||||
<td style="width:20%">{{version}}</td>
|
||||
<td style="width:25%">(this node)</td>
|
||||
<td style="width:9%" class="text-right">
|
||||
{{inbps | metric}}bps
|
||||
<span class="text-muted glyphicon glyphicon-chevron-down"></span>
|
||||
</td>
|
||||
<td style="width:9%" class="text-right">
|
||||
{{outbps | metric}}bps
|
||||
<span class="text-muted glyphicon glyphicon-chevron-up"></span>
|
||||
</td>
|
||||
<td style="width:7%" class="text-right">
|
||||
<button type="button" ng-click="editNode(nodeCfg)" class="btn btn-default btn-xs"><span class="glyphicon glyphicon-pencil"></span> Edit</button>
|
||||
</td>
|
||||
</tr>
|
||||
<!-- all other nodes -->
|
||||
<tr ng-repeat="nodeCfg in otherNodes()">
|
||||
<td>
|
||||
<span class="label label-{{nodeClass(nodeCfg)}}">
|
||||
<span class="glyphicon glyphicon-{{nodeIcon(nodeCfg)}}"></span> {{nodeStatus(nodeCfg)}}
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>
|
||||
</td>
|
||||
<td>{{nodeVer(nodeCfg)}}</td>
|
||||
<td>{{nodeAddr(nodeCfg)}}</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[nodeCfg.NodeID].InBytesTotal | binary}}B">{{connections[nodeCfg.NodeID].inbps | metric}}bps</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-chevron-down"></span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[nodeCfg.NodeID].OutBytesTotal | binary}}B">{{connections[nodeCfg.NodeID].outbps | metric}}bps</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-chevron-up"></span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<button type="button" ng-click="editNode(nodeCfg)" class="btn btn-default btn-xs"><span class="glyphicon glyphicon-pencil"></span> Edit</button>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
<td class="text-right">
|
||||
<button type="button" class="btn btn-default btn-xs" ng-click="addNode()"><span class="glyphicon glyphicon-plus"></span> Add</button>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title">Repository</h3></div>
|
||||
<div class="panel-body">
|
||||
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
|
||||
|
||||
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="panel" ng-class="{'panel-success': model.needBytes === 0, 'panel-primary': model.needBytes !== 0}">
|
||||
<div class="panel-heading"><h3 class="panel-title">Synchronization</h3></div>
|
||||
<div class="panel-body">
|
||||
<div class="progress">
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
|
||||
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
|
||||
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
|
||||
{{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
|
||||
</div>
|
||||
</div>
|
||||
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title"><a href="" data-toggle="collapse" data-target="#system">System</a></h3></div>
|
||||
<div id="system" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<p>{{system.sys | binary}}B RAM allocated, {{system.alloc | binary}}B in use</p>
|
||||
<p>{{system.cpuPercent | alwaysNumber | natural:1}}% CPU, {{system.goroutines | alwaysNumber}} goroutines</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title"><a href="" data-toggle="collapse" data-target="#settingsTable">Settings</a></h3></div>
|
||||
<div id="settingsTable" class="panel-collapse collapse">
|
||||
<div class="panel-body">
|
||||
<form role="form">
|
||||
<div class="form-group" ng-repeat="setting in settings">
|
||||
<div ng-if="setting.type == 'text' || setting.type == 'number'">
|
||||
<label for="{{setting.id}}">{{setting.descr}}</label>
|
||||
<input id="{{setting.id}}" class="form-control" type="{{setting.type}}" ng-model="config.Options[setting.id]"></input>
|
||||
</div>
|
||||
<div class="checkbox" ng-if="setting.type == 'bool'">
|
||||
<label>
|
||||
{{setting.descr}} <input id="{{setting.id}}" type="checkbox" ng-model="config.Options[setting.id]"></input>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div class="panel-footer">
|
||||
<button type="button" class="btn btn-sm btn-default" ng-click="saveSettings()">Save</button>
|
||||
<small><span class="text-muted">Changes take effect when restarting syncthing.</span></small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="navbar navbar-default navbar-fixed-bottom">
|
||||
<div class="container">
|
||||
<p class="navbar-text">{{version}}</p>
|
||||
<ul class="nav navbar-nav navbar-right">
|
||||
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/releases">Latest Release</a></li>
|
||||
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/wiki">Documentation</a></li>
|
||||
<li><a class="navbar-link" href="https://github.com/calmh/syncthing/issues">Bugs</a></li>
|
||||
<li><a class="navbar-link" href="https://github.com/calmh/syncthing">Source Code</a></li>
|
||||
</ul>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="networkError" class="modal fade">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header alert alert-danger">
|
||||
<h4 class="modal-title">
|
||||
<span class="glyphicon glyphicon-exclamation-sign"></span>
|
||||
Connection Error
|
||||
</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>
|
||||
Syncthing seems to be down, or there is a problem with your Internet connection.
|
||||
Retrying…
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="editNode" class="modal fade">
|
||||
<div class="modal-dialog modal-lg">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
|
||||
<h4 class="modal-title">Edit Node</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<form role="form">
|
||||
<div class="form-group">
|
||||
<label for="nodeID">Node ID</label>
|
||||
<input placeholder="YUFJOUDPORCMA..." ng-disabled="editingExisting" id="nodeID" class="form-control" type="text" ng-model="currentNode.NodeID"></input>
|
||||
<p class="help-block">The node ID can be found in the logs or in the "Add Node" dialog on the other node.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="name">Name</label>
|
||||
<input placeholder="Home Server" id="name" class="form-control" type="text" ng-model="currentNode.Name"></input>
|
||||
<p class="help-block">Shown instead of Node ID in the cluster status.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="addresses">Addresses</label>
|
||||
<input placeholder="dynamic" ng-disabled="currentNode.NodeID == myID" id="addresses" class="form-control" type="text" ng-model="currentNode.AddressesStr"></input>
|
||||
<p class="help-block">Enter comma separated <span class="text-monospace">ip:port</span> addresses or <span class="text-monospace">dynamic</span> to perform automatic discovery of the address.</p>
|
||||
</div>
|
||||
</form>
|
||||
<div ng-show="!editingExisting">
|
||||
When adding a new node, keep in mind that <em>this node</em> must be added on the other side too. The Node ID of this node is:
|
||||
<pre>{{myID}}</pre>
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="button" class="btn btn-primary" ng-click="saveNode()">Save</button>
|
||||
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
|
||||
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left" ng-click="deleteNode()">Delete</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="angular.min.js"></script>
|
||||
<script src="jquery-2.0.3.min.js"></script>
|
||||
<script src="bootstrap/js/bootstrap.min.js"></script>
|
||||
<script src="app.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
6
gui/jquery-2.0.3.min.js
vendored
Normal file
6
gui/jquery-2.0.3.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
gui/st-logo-128.png
Normal file
BIN
gui/st-logo-128.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
5
integration/.gitignore
vendored
Normal file
5
integration/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
files-*
|
||||
conf-*
|
||||
md5-*
|
||||
genfiles
|
||||
md5r
|
||||
42
integration/genfiles.go
Normal file
42
integration/genfiles.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
mr "math/rand"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func name() string {
|
||||
var b [16]byte
|
||||
rand.Reader.Read(b[:])
|
||||
return fmt.Sprintf("%x", b[:])
|
||||
}
|
||||
|
||||
func main() {
|
||||
var files int
|
||||
var maxexp int
|
||||
|
||||
flag.IntVar(&files, "files", 1000, "Number of files")
|
||||
flag.IntVar(&maxexp, "maxexp", 20, "Maximum file size (max = 2^n + 128*1024 B)")
|
||||
flag.Parse()
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
n := name()
|
||||
p0 := path.Join(string(n[0]), n[0:2])
|
||||
os.MkdirAll(p0, 0755)
|
||||
s := 1 << uint(mr.Intn(maxexp))
|
||||
a := 128 * 1024
|
||||
if a > s {
|
||||
a = s
|
||||
}
|
||||
s += mr.Intn(a)
|
||||
b := make([]byte, s)
|
||||
rand.Reader.Read(b)
|
||||
p1 := path.Join(p0, n)
|
||||
ioutil.WriteFile(p1, b, 0644)
|
||||
}
|
||||
}
|
||||
59
integration/md5r.go
Normal file
59
integration/md5r.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
if len(args) == 0 {
|
||||
args = []string{"."}
|
||||
}
|
||||
|
||||
for _, path := range args {
|
||||
err := filepath.Walk(path, walker)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func walker(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
sum, err := md5file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("%s %s\n", sum, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func md5file(fname string) (hash string, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := md5.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
hash = fmt.Sprintf("%x", hb)
|
||||
|
||||
return
|
||||
}
|
||||
74
integration/test.sh
Executable file
74
integration/test.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -rf files-* conf-* md5-*
|
||||
|
||||
extraopts=""
|
||||
p=$(pwd)
|
||||
|
||||
go build genfiles.go
|
||||
go build md5r.go
|
||||
|
||||
echo "Setting up (keys)..."
|
||||
i1=$(syncthing --home conf-1 2>&1 | awk '/My ID/ {print $7}')
|
||||
echo $i1
|
||||
i2=$(syncthing --home conf-2 2>&1 | awk '/My ID/ {print $7}')
|
||||
echo $i2
|
||||
i3=$(syncthing --home conf-3 2>&1 | awk '/My ID/ {print $7}')
|
||||
echo $i3
|
||||
|
||||
echo "Setting up (files)..."
|
||||
for i in 1 2 3 ; do
|
||||
cat >conf-$i/syncthing.ini <<EOT
|
||||
[repository]
|
||||
dir = $p/files-$i
|
||||
|
||||
[nodes]
|
||||
$i1 = 127.0.0.1:22001
|
||||
$i2 = 127.0.0.1:22002
|
||||
$i3 = 127.0.0.1:22003
|
||||
|
||||
[settings]
|
||||
gui-enabled = false
|
||||
listen-address = :2200$i
|
||||
EOT
|
||||
|
||||
mkdir files-$i
|
||||
pushd files-$i >/dev/null
|
||||
../genfiles -maxexp 21 -files 400
|
||||
touch empty-$i
|
||||
../md5r > ../md5-$i
|
||||
popd >/dev/null
|
||||
done
|
||||
|
||||
echo "Starting..."
|
||||
for i in 1 2 3 ; do
|
||||
sleep 1
|
||||
syncthing --home conf-$i $extraopts &
|
||||
done
|
||||
|
||||
cat md5-* | sort > md5-tot
|
||||
while true ; do
|
||||
read
|
||||
echo Verifying...
|
||||
|
||||
conv=0
|
||||
for i in 1 2 3 ; do
|
||||
pushd files-$i >/dev/null
|
||||
../md5r | sort > ../md5-$i
|
||||
popd >/dev/null
|
||||
if ! cmp md5-$i md5-tot >/dev/null ; then
|
||||
echo $i unconverged
|
||||
else
|
||||
conv=$((conv + 1))
|
||||
echo $i converged
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $conv == 3 ]] ; then
|
||||
kill %1
|
||||
kill %2
|
||||
kill %3
|
||||
exit
|
||||
fi
|
||||
done
|
||||
|
||||
352
main.go
352
main.go
@@ -1,352 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/ini"
|
||||
"github.com/calmh/syncthing/discover"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
docopt "github.com/docopt/docopt.go"
|
||||
)
|
||||
|
||||
const (
|
||||
confDirName = ".syncthing"
|
||||
confFileName = "syncthing.ini"
|
||||
usage = `Usage:
|
||||
syncthing [options]
|
||||
|
||||
Options:
|
||||
-l <addr> Listening address [default: :22000]
|
||||
-p <addr> Enable HTTP profiler on addr
|
||||
--home <path> Home directory
|
||||
--delete Delete files that were deleted on a peer node
|
||||
--ro Local repository is read only
|
||||
--scan-intv <s> Repository scan interval, in seconds [default: 60]
|
||||
--conn-intv <s> Node reconnect interval, in seconds [default: 15]
|
||||
--no-stats Don't print transfer statistics
|
||||
|
||||
Help Options:
|
||||
-h, --help Show this help
|
||||
--version Show version
|
||||
|
||||
Debug Options:
|
||||
--trace-file Trace file operations
|
||||
--trace-net Trace network operations
|
||||
--trace-idx Trace sent indexes
|
||||
`
|
||||
)
|
||||
|
||||
var (
|
||||
config ini.Config
|
||||
nodeAddrs = make(map[string][]string)
|
||||
)
|
||||
|
||||
// Options
|
||||
var (
|
||||
confDir = path.Join(getHomeDir(), confDirName)
|
||||
addr string
|
||||
prof string
|
||||
readOnly bool
|
||||
scanIntv int
|
||||
connIntv int
|
||||
traceNet bool
|
||||
traceFile bool
|
||||
traceIdx bool
|
||||
printStats bool
|
||||
doDelete bool
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Useful for debugging; to be adjusted.
|
||||
log.SetFlags(log.Ltime | log.Lshortfile)
|
||||
|
||||
arguments, _ := docopt.Parse(usage, nil, true, "syncthing 0.1", false)
|
||||
|
||||
addr = arguments["-l"].(string)
|
||||
prof, _ = arguments["-p"].(string)
|
||||
readOnly, _ = arguments["--ro"].(bool)
|
||||
|
||||
if arguments["--home"] != nil {
|
||||
confDir, _ = arguments["--home"].(string)
|
||||
}
|
||||
|
||||
scanIntv, _ = strconv.Atoi(arguments["--scan-intv"].(string))
|
||||
if scanIntv == 0 {
|
||||
fatalln("Invalid --scan-intv")
|
||||
}
|
||||
|
||||
connIntv, _ = strconv.Atoi(arguments["--conn-intv"].(string))
|
||||
if connIntv == 0 {
|
||||
fatalln("Invalid --conn-intv")
|
||||
}
|
||||
|
||||
doDelete = arguments["--delete"].(bool)
|
||||
traceFile = arguments["--trace-file"].(bool)
|
||||
traceNet = arguments["--trace-net"].(bool)
|
||||
traceIdx = arguments["--trace-idx"].(bool)
|
||||
printStats = !arguments["--no-stats"].(bool)
|
||||
|
||||
// Ensure that our home directory exists and that we have a certificate and key.
|
||||
|
||||
ensureDir(confDir)
|
||||
cert, err := loadCert(confDir)
|
||||
if err != nil {
|
||||
newCertificate(confDir)
|
||||
cert, err = loadCert(confDir)
|
||||
fatalErr(err)
|
||||
}
|
||||
|
||||
myID := string(certId(cert.Certificate[0]))
|
||||
infoln("My ID:", myID)
|
||||
|
||||
if prof != "" {
|
||||
okln("Profiler listening on", prof)
|
||||
go func() {
|
||||
http.ListenAndServe(prof, nil)
|
||||
}()
|
||||
}
|
||||
|
||||
// The TLS configuration is used for both the listening socket and outgoing
|
||||
// connections.
|
||||
|
||||
cfg := &tls.Config{
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
ServerName: "syncthing",
|
||||
NextProtos: []string{"bep/1.0"},
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
// Load the configuration file, if it exists.
|
||||
|
||||
cf, err := os.Open(path.Join(confDir, confFileName))
|
||||
if err != nil {
|
||||
fatalln("No config file")
|
||||
config = ini.Config{}
|
||||
}
|
||||
config = ini.Parse(cf)
|
||||
cf.Close()
|
||||
|
||||
var dir = config.Get("repository", "dir")
|
||||
|
||||
// Create a map of desired node connections based on the configuration file
|
||||
// directives.
|
||||
|
||||
for nodeID, addrs := range config.OptionMap("nodes") {
|
||||
addrs := strings.Fields(addrs)
|
||||
nodeAddrs[nodeID] = addrs
|
||||
}
|
||||
|
||||
m := NewModel(dir)
|
||||
|
||||
// Walk the repository and update the local model before establishing any
|
||||
// connections to other nodes.
|
||||
|
||||
infoln("Iniial repository scan in progress")
|
||||
loadIndex(m)
|
||||
updateLocalModel(m)
|
||||
|
||||
// Routine to listen for incoming connections
|
||||
infoln("Listening for incoming connections")
|
||||
go listen(myID, addr, m, cfg)
|
||||
|
||||
// Routine to connect out to configured nodes
|
||||
infoln("Attempting to connect to other nodes")
|
||||
go connect(myID, addr, nodeAddrs, m, cfg)
|
||||
|
||||
// Routine to pull blocks from other nodes to synchronize the local
|
||||
// repository. Does not run when we are in read only (publish only) mode.
|
||||
if !readOnly {
|
||||
infoln("Cleaning out incomplete synchronizations")
|
||||
CleanTempFiles(dir)
|
||||
okln("Ready to synchronize")
|
||||
m.Start()
|
||||
}
|
||||
|
||||
// Periodically scan the repository and update the local model.
|
||||
// XXX: Should use some fsnotify mechanism.
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Duration(scanIntv) * time.Second)
|
||||
updateLocalModel(m)
|
||||
}
|
||||
}()
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func listen(myID string, addr string, m *Model, cfg *tls.Config) {
|
||||
l, err := tls.Listen("tcp", addr, cfg)
|
||||
fatalErr(err)
|
||||
|
||||
listen:
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if traceNet {
|
||||
debugln("NET: Connect from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
tc := conn.(*tls.Conn)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
remoteID := certId(tc.ConnectionState().PeerCertificates[0].Raw)
|
||||
|
||||
if remoteID == myID {
|
||||
warnf("Connect from myself (%s) - should not happen", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(remoteID) {
|
||||
warnf("Connect from connected node (%s)", remoteID)
|
||||
}
|
||||
|
||||
for nodeID := range nodeAddrs {
|
||||
if nodeID == remoteID {
|
||||
nc := protocol.NewConnection(remoteID, conn, conn, m)
|
||||
m.AddNode(nc)
|
||||
okln("Connected to nodeID", remoteID, "(in)")
|
||||
continue listen
|
||||
}
|
||||
}
|
||||
|
||||
warnln("Connect from unknown node", remoteID)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func connect(myID string, addr string, nodeAddrs map[string][]string, m *Model, cfg *tls.Config) {
|
||||
_, portstr, err := net.SplitHostPort(addr)
|
||||
fatalErr(err)
|
||||
port, _ := strconv.Atoi(portstr)
|
||||
|
||||
infoln("Starting local discovery")
|
||||
disc, err := discover.NewDiscoverer(myID, port)
|
||||
if err != nil {
|
||||
warnln("No local discovery possible")
|
||||
}
|
||||
|
||||
for {
|
||||
nextNode:
|
||||
for nodeID, addrs := range nodeAddrs {
|
||||
if nodeID == myID {
|
||||
continue
|
||||
}
|
||||
if m.ConnectedTo(nodeID) {
|
||||
continue
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if addr == "dynamic" {
|
||||
var ok bool
|
||||
if disc != nil {
|
||||
addr, ok = disc.Lookup(nodeID)
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if traceNet {
|
||||
debugln("NET: Dial", nodeID, addr)
|
||||
}
|
||||
conn, err := tls.Dial("tcp", addr, cfg)
|
||||
if err != nil {
|
||||
if traceNet {
|
||||
debugln("NET:", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
remoteID := certId(conn.ConnectionState().PeerCertificates[0].Raw)
|
||||
if remoteID != nodeID {
|
||||
warnln("Unexpected nodeID", remoteID, "!=", nodeID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
nc := protocol.NewConnection(nodeID, conn, conn, m)
|
||||
okln("Connected to node", remoteID, "(out)")
|
||||
m.AddNode(nc)
|
||||
if traceNet {
|
||||
t0 := time.Now()
|
||||
nc.Ping()
|
||||
timing("NET: Ping reply", t0)
|
||||
}
|
||||
continue nextNode
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(connIntv) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func updateLocalModel(m *Model) {
|
||||
files := Walk(m.Dir(), m)
|
||||
m.ReplaceLocal(files)
|
||||
saveIndex(m)
|
||||
}
|
||||
|
||||
func saveIndex(m *Model) {
|
||||
fname := fmt.Sprintf("%x.idx", sha1.Sum([]byte(m.Dir())))
|
||||
idxf, err := os.Create(path.Join(confDir, fname))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
protocol.WriteIndex(idxf, m.ProtocolIndex())
|
||||
idxf.Close()
|
||||
}
|
||||
|
||||
func loadIndex(m *Model) {
|
||||
fname := fmt.Sprintf("%x.idx", sha1.Sum([]byte(m.Dir())))
|
||||
idxf, err := os.Open(path.Join(confDir, fname))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer idxf.Close()
|
||||
|
||||
idx, err := protocol.ReadIndex(idxf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m.SeedIndex(idx)
|
||||
}
|
||||
|
||||
func ensureDir(dir string) {
|
||||
fi, err := os.Stat(dir)
|
||||
if os.IsNotExist(err) {
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
fatalErr(err)
|
||||
} else if fi.Mode()&0077 != 0 {
|
||||
err := os.Chmod(dir, 0700)
|
||||
fatalErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getHomeDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
fatalln("No home directory?")
|
||||
}
|
||||
return home
|
||||
}
|
||||
373
model.go
373
model.go
@@ -1,373 +0,0 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
|
||||
Locking
|
||||
=======
|
||||
|
||||
The model has read and write locks. These must be acquired as appropriate by
|
||||
public methods. To prevent deadlock situations, private methods should never
|
||||
acquire locks, but document what locks they require.
|
||||
|
||||
TODO(jb): Keep global and per node transfer and performance statistics.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
type Model struct {
|
||||
sync.RWMutex
|
||||
dir string
|
||||
updated int64
|
||||
global map[string]File // the latest version of each file as it exists in the cluster
|
||||
local map[string]File // the files we currently have locally on disk
|
||||
remote map[string]map[string]File
|
||||
need map[string]bool // the files we need to update
|
||||
nodes map[string]*protocol.Connection
|
||||
}
|
||||
|
||||
const (
|
||||
RemoteFetchers = 4
|
||||
FlagDeleted = 1 << 12
|
||||
)
|
||||
|
||||
func NewModel(dir string) *Model {
|
||||
m := &Model{
|
||||
dir: dir,
|
||||
global: make(map[string]File),
|
||||
local: make(map[string]File),
|
||||
remote: make(map[string]map[string]File),
|
||||
need: make(map[string]bool),
|
||||
nodes: make(map[string]*protocol.Connection),
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Model) Start() {
|
||||
go m.puller()
|
||||
}
|
||||
|
||||
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if traceNet {
|
||||
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
|
||||
}
|
||||
|
||||
m.remote[nodeID] = make(map[string]File)
|
||||
for _, f := range fs {
|
||||
if f.Flags&FlagDeleted != 0 && !doDelete {
|
||||
// Files marked as deleted do not even enter the model
|
||||
continue
|
||||
}
|
||||
mf := File{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: int64(f.Modified),
|
||||
}
|
||||
var offset uint64
|
||||
for _, b := range f.Blocks {
|
||||
mf.Blocks = append(mf.Blocks, Block{
|
||||
Offset: offset,
|
||||
Length: b.Length,
|
||||
Hash: b.Hash,
|
||||
})
|
||||
offset += uint64(b.Length)
|
||||
}
|
||||
m.remote[nodeID][f.Name] = mf
|
||||
}
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
}
|
||||
|
||||
func (m *Model) SeedIndex(fs []protocol.FileInfo) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.local = make(map[string]File)
|
||||
for _, f := range fs {
|
||||
mf := File{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: int64(f.Modified),
|
||||
}
|
||||
var offset uint64
|
||||
for _, b := range f.Blocks {
|
||||
mf.Blocks = append(mf.Blocks, Block{
|
||||
Offset: offset,
|
||||
Length: b.Length,
|
||||
Hash: b.Hash,
|
||||
})
|
||||
offset += uint64(b.Length)
|
||||
}
|
||||
m.local[f.Name] = mf
|
||||
}
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
}
|
||||
|
||||
func (m *Model) Close(node string) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if traceNet {
|
||||
debugf("NET CLOSE: %s", node)
|
||||
}
|
||||
|
||||
delete(m.remote, node)
|
||||
delete(m.nodes, node)
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
}
|
||||
|
||||
func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
if traceNet && nodeID != "<local>" {
|
||||
debugf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
|
||||
}
|
||||
fn := path.Join(m.dir, name)
|
||||
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
buf := buffers.Get(int(size))
|
||||
_, err = fd.ReadAt(buf, int64(offset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (m *Model) RequestGlobal(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
m.RLock()
|
||||
nc := m.nodes[nodeID]
|
||||
m.RUnlock()
|
||||
|
||||
if traceNet {
|
||||
debugf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
|
||||
}
|
||||
|
||||
return nc.Request(name, offset, size, hash)
|
||||
}
|
||||
|
||||
func (m *Model) ReplaceLocal(fs []File) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
var updated bool
|
||||
var newLocal = make(map[string]File)
|
||||
|
||||
for _, f := range fs {
|
||||
newLocal[f.Name] = f
|
||||
if ef := m.local[f.Name]; ef.Modified != f.Modified {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
|
||||
if m.markDeletedLocals(newLocal) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if len(newLocal) != len(m.local) {
|
||||
updated = true
|
||||
}
|
||||
|
||||
if updated {
|
||||
m.local = newLocal
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
m.updated = time.Now().Unix()
|
||||
go m.broadcastIndex()
|
||||
}
|
||||
}
|
||||
|
||||
// Must be called with the read lock held.
|
||||
func (m *Model) broadcastIndex() {
|
||||
idx := m.protocolIndex()
|
||||
for _, node := range m.nodes {
|
||||
if traceNet {
|
||||
debugf("NET IDX(out): %s: %d files", node.ID, len(idx))
|
||||
}
|
||||
node.Index(idx)
|
||||
}
|
||||
}
|
||||
|
||||
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
|
||||
// Must be called with the write lock held.
|
||||
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
|
||||
// For every file in the existing local table, check if they are also
|
||||
// present in the new local table. If they are not, check that we already
|
||||
// had the newest version available according to the global table and if so
|
||||
// note the file as having been deleted.
|
||||
var updated bool
|
||||
for n, f := range m.local {
|
||||
if _, ok := newLocal[n]; !ok {
|
||||
if gf := m.global[n]; gf.Modified <= f.Modified {
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
f.Flags = FlagDeleted
|
||||
f.Modified = f.Modified + 1
|
||||
f.Blocks = nil
|
||||
updated = true
|
||||
}
|
||||
newLocal[n] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func (m *Model) UpdateLocal(f File) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if ef, ok := m.local[f.Name]; !ok || ef.Modified != f.Modified {
|
||||
m.local[f.Name] = f
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
m.updated = time.Now().Unix()
|
||||
go m.broadcastIndex()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) Dir() string {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.dir
|
||||
}
|
||||
|
||||
func (m *Model) HaveFiles() []File {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
var files []File
|
||||
for _, file := range m.local {
|
||||
files = append(files, file)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (m *Model) LocalFile(name string) (File, bool) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
f, ok := m.local[name]
|
||||
return f, ok
|
||||
}
|
||||
|
||||
func (m *Model) GlobalFile(name string) (File, bool) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
f, ok := m.global[name]
|
||||
return f, ok
|
||||
}
|
||||
|
||||
// Must be called with the write lock held.
|
||||
func (m *Model) recomputeGlobal() {
|
||||
var newGlobal = make(map[string]File)
|
||||
|
||||
for n, f := range m.local {
|
||||
newGlobal[n] = f
|
||||
}
|
||||
|
||||
for _, fs := range m.remote {
|
||||
for n, f := range fs {
|
||||
if cf, ok := newGlobal[n]; !ok || cf.Modified < f.Modified {
|
||||
newGlobal[n] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.global = newGlobal
|
||||
}
|
||||
|
||||
// Must be called with the write lock held.
|
||||
func (m *Model) recomputeNeed() {
|
||||
m.need = make(map[string]bool)
|
||||
for n, f := range m.global {
|
||||
hf, ok := m.local[n]
|
||||
if !ok || f.Modified > hf.Modified {
|
||||
m.need[n] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Must be called with the read lock held.
|
||||
func (m *Model) whoHas(name string) []string {
|
||||
var remote []string
|
||||
|
||||
gf := m.global[name]
|
||||
for node, files := range m.remote {
|
||||
if file, ok := files[name]; ok && file.Modified == gf.Modified {
|
||||
remote = append(remote, node)
|
||||
}
|
||||
}
|
||||
|
||||
return remote
|
||||
}
|
||||
|
||||
func (m *Model) ConnectedTo(nodeID string) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
_, ok := m.nodes[nodeID]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *Model) ProtocolIndex() []protocol.FileInfo {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
return m.protocolIndex()
|
||||
}
|
||||
|
||||
// Must be called with the read lock held.
|
||||
func (m *Model) protocolIndex() []protocol.FileInfo {
|
||||
var index []protocol.FileInfo
|
||||
for _, f := range m.local {
|
||||
mf := protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: int64(f.Modified),
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
mf.Blocks = append(mf.Blocks, protocol.BlockInfo{
|
||||
Length: b.Length,
|
||||
Hash: b.Hash,
|
||||
})
|
||||
}
|
||||
if traceIdx {
|
||||
var flagComment string
|
||||
if mf.Flags&FlagDeleted != 0 {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
debugf("IDX: %q m=%d f=%o%s (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, len(mf.Blocks))
|
||||
}
|
||||
index = append(index, mf)
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (m *Model) AddNode(node *protocol.Connection) {
|
||||
m.Lock()
|
||||
m.nodes[node.ID] = node
|
||||
m.Unlock()
|
||||
m.RLock()
|
||||
idx := m.protocolIndex()
|
||||
m.RUnlock()
|
||||
|
||||
if traceNet {
|
||||
debugf("NET IDX(out): %s: %d files", node.ID, len(idx))
|
||||
}
|
||||
node.Index(idx)
|
||||
}
|
||||
212
model_puller.go
212
model_puller.go
@@ -1,212 +0,0 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
|
||||
Locking
|
||||
=======
|
||||
|
||||
These methods are never called from the outside so don't follow the locking
|
||||
policy in model.go. Instead, appropriate locks are acquired when needed and
|
||||
held for as short a time as possible.
|
||||
|
||||
TODO(jb): Refactor this into smaller and cleaner pieces.
|
||||
|
||||
TODO(jb): Some kind of coalescing / rate limiting of index sending, so we don't
|
||||
send hundreds of index updates in a short period if time when deleting files
|
||||
etc.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
)
|
||||
|
||||
func (m *Model) pullFile(name string) error {
|
||||
m.RLock()
|
||||
var localFile = m.local[name]
|
||||
var globalFile = m.global[name]
|
||||
m.RUnlock()
|
||||
|
||||
filename := path.Join(m.dir, name)
|
||||
sdir := path.Dir(filename)
|
||||
|
||||
_, err := os.Stat(sdir)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
os.MkdirAll(sdir, 0777)
|
||||
}
|
||||
|
||||
tmpFilename := tempName(filename, globalFile.Modified)
|
||||
tmpFile, err := os.Create(tmpFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
contentChan := make(chan content, 32)
|
||||
var applyDone sync.WaitGroup
|
||||
applyDone.Add(1)
|
||||
go func() {
|
||||
applyContent(contentChan, tmpFile)
|
||||
applyDone.Done()
|
||||
}()
|
||||
|
||||
local, remote := localFile.Blocks.To(globalFile.Blocks)
|
||||
var fetchDone sync.WaitGroup
|
||||
|
||||
// One local copy routing
|
||||
|
||||
fetchDone.Add(1)
|
||||
go func() {
|
||||
for _, block := range local {
|
||||
data, err := m.Request("<local>", name, block.Offset, block.Length, block.Hash)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
contentChan <- content{
|
||||
offset: int64(block.Offset),
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
fetchDone.Done()
|
||||
}()
|
||||
|
||||
// N remote copy routines
|
||||
|
||||
m.RLock()
|
||||
var nodeIDs = m.whoHas(name)
|
||||
m.RUnlock()
|
||||
var remoteBlocksChan = make(chan Block)
|
||||
go func() {
|
||||
for _, block := range remote {
|
||||
remoteBlocksChan <- block
|
||||
}
|
||||
close(remoteBlocksChan)
|
||||
}()
|
||||
|
||||
// XXX: This should be rewritten into something nicer that takes differing
|
||||
// peer performance into account.
|
||||
|
||||
for i := 0; i < RemoteFetchers; i++ {
|
||||
for _, nodeID := range nodeIDs {
|
||||
fetchDone.Add(1)
|
||||
go func(nodeID string) {
|
||||
for block := range remoteBlocksChan {
|
||||
data, err := m.RequestGlobal(nodeID, name, block.Offset, block.Length, block.Hash)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
contentChan <- content{
|
||||
offset: int64(block.Offset),
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
fetchDone.Done()
|
||||
}(nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
fetchDone.Wait()
|
||||
close(contentChan)
|
||||
applyDone.Wait()
|
||||
|
||||
rf, err := os.Open(tmpFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rf.Close()
|
||||
|
||||
writtenBlocks, err := Blocks(rf, BlockSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(writtenBlocks) != len(globalFile.Blocks) {
|
||||
return fmt.Errorf("%s: incorrect number of blocks after sync", tmpFilename)
|
||||
}
|
||||
for i := range writtenBlocks {
|
||||
if bytes.Compare(writtenBlocks[i].Hash, globalFile.Blocks[i].Hash) != 0 {
|
||||
return fmt.Errorf("%s: hash mismatch after sync\n %v\n %v", tmpFilename, writtenBlocks[i], globalFile.Blocks[i])
|
||||
}
|
||||
}
|
||||
|
||||
err = os.Chtimes(tmpFilename, time.Unix(globalFile.Modified, 0), time.Unix(globalFile.Modified, 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Rename(tmpFilename, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Model) puller() {
|
||||
for {
|
||||
for {
|
||||
var n string
|
||||
var f File
|
||||
|
||||
m.RLock()
|
||||
for n = range m.need {
|
||||
break // just pick first name
|
||||
}
|
||||
if len(n) != 0 {
|
||||
f = m.global[n]
|
||||
}
|
||||
m.RUnlock()
|
||||
|
||||
if len(n) == 0 {
|
||||
// we got nothing
|
||||
break
|
||||
}
|
||||
|
||||
var err error
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
if traceFile {
|
||||
debugf("FILE: Pull %q", n)
|
||||
}
|
||||
err = m.pullFile(n)
|
||||
} else {
|
||||
if traceFile {
|
||||
debugf("FILE: Remove %q", n)
|
||||
}
|
||||
// Cheerfully ignore errors here
|
||||
_ = os.Remove(path.Join(m.dir, n))
|
||||
}
|
||||
if err == nil {
|
||||
m.UpdateLocal(f)
|
||||
} else {
|
||||
warnln(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
type content struct {
|
||||
offset int64
|
||||
data []byte
|
||||
}
|
||||
|
||||
func applyContent(cc <-chan content, dst io.WriterAt) error {
|
||||
var err error
|
||||
|
||||
for c := range cc {
|
||||
_, err = dst.WriteAt(c.data, c.offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffers.Put(c.data)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
275
model_test.go
275
model_test.go
@@ -1,275 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
func TestNewModel(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
|
||||
if m == nil {
|
||||
t.Fatalf("NewModel returned nil")
|
||||
}
|
||||
|
||||
if len(m.need) > 0 {
|
||||
t.Errorf("New model should have no Need")
|
||||
}
|
||||
|
||||
if len(m.local) > 0 {
|
||||
t.Errorf("New model should have no Have")
|
||||
}
|
||||
}
|
||||
|
||||
var testDataExpected = map[string]File{
|
||||
"foo": File{
|
||||
Name: "foo",
|
||||
Flags: 0644,
|
||||
Modified: 1384244572,
|
||||
Blocks: []Block{{Offset: 0x0, Length: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
|
||||
},
|
||||
"bar": File{
|
||||
Name: "bar",
|
||||
Flags: 0644,
|
||||
Modified: 1384244579,
|
||||
Blocks: []Block{{Offset: 0x0, Length: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
|
||||
},
|
||||
"baz/quux": File{
|
||||
Name: "baz/quux",
|
||||
Flags: 0644,
|
||||
Modified: 1384244676,
|
||||
Blocks: []Block{{Offset: 0x0, Length: 0x9, Hash: []uint8{0xc1, 0x54, 0xd9, 0x4e, 0x94, 0xba, 0x72, 0x98, 0xa6, 0xad, 0xb0, 0x52, 0x3a, 0xfe, 0x34, 0xd1, 0xb6, 0xa5, 0x81, 0xd6, 0xb8, 0x93, 0xa7, 0x63, 0xd4, 0x5d, 0xdc, 0x5e, 0x20, 0x9d, 0xcb, 0x83}}},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUpdateLocal(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if len(m.need) > 0 {
|
||||
t.Fatalf("Model with only local data should have no need")
|
||||
}
|
||||
|
||||
if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 {
|
||||
t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 {
|
||||
t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2)
|
||||
}
|
||||
for name, file := range testDataExpected {
|
||||
if f, ok := m.local[name]; ok {
|
||||
if !reflect.DeepEqual(f, file) {
|
||||
t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Missing file %q in local table", name)
|
||||
}
|
||||
if f, ok := m.global[name]; ok {
|
||||
if !reflect.DeepEqual(f, file) {
|
||||
t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("Missing file %q in global table", name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified {
|
||||
t.Fatalf("Incorrect local for %q", f.Name)
|
||||
}
|
||||
if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified {
|
||||
t.Fatalf("Incorrect global for %q", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteUpdateExisting(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "foo",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index(string("42"), []protocol.FileInfo{newFile})
|
||||
|
||||
if l := len(m.need); l != 1 {
|
||||
t.Errorf("Model missing Need for one file (%d != 1)", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteAddNew(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "a new file",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index(string("42"), []protocol.FileInfo{newFile})
|
||||
|
||||
if l1, l2 := len(m.need), 1; l1 != l2 {
|
||||
t.Errorf("Model len(m.need) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteUpdateOld(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
oldTimeStamp := int64(1234)
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "foo",
|
||||
Modified: oldTimeStamp,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index(string("42"), []protocol.FileInfo{newFile})
|
||||
|
||||
if l1, l2 := len(m.need), 0; l1 != l2 {
|
||||
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
ot := time.Now().Unix()
|
||||
newFile := File{
|
||||
Name: "a new file",
|
||||
Modified: ot,
|
||||
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.UpdateLocal(newFile)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
// The deleted file is kept in the local and global tables and marked as deleted.
|
||||
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
if m.local["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in local table")
|
||||
}
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in global table")
|
||||
}
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
|
||||
// Another update should change nothing
|
||||
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
if m.local["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in local table")
|
||||
}
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in global table")
|
||||
}
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForgetNode(t *testing.T) {
|
||||
m := NewModel("foo")
|
||||
fs := Walk("testdata", m)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.need), 0; l1 != l2 {
|
||||
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
newFile := protocol.FileInfo{
|
||||
Name: "new file",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
m.Index(string("42"), []protocol.FileInfo{newFile})
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.need), 1; l1 != l2 {
|
||||
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
|
||||
m.Close(string("42"))
|
||||
|
||||
if l1, l2 := len(m.local), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.global), len(fs); l1 != l2 {
|
||||
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
if l1, l2 := len(m.need), 0; l1 != l2 {
|
||||
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
|
||||
}
|
||||
}
|
||||
@@ -1,26 +1,29 @@
|
||||
Block Exchange Protocol v1.0
|
||||
============================
|
||||
Block Exchange Protocol v1
|
||||
==========================
|
||||
|
||||
Introduction and Definitions
|
||||
----------------------------
|
||||
|
||||
The BEP is used between two or more _nodes_ thus forming a _cluster_.
|
||||
Each node has a _repository_ of files described by the _local model_,
|
||||
containing modifications times and block hashes. The local model is sent
|
||||
to the other nodes in the cluster. The union of all files in the local
|
||||
models, with files selected for most recent modification time, forms the
|
||||
_global model_. Each node strives to get it's repository in synch with
|
||||
the global model by requesting missing blocks from the other nodes.
|
||||
BEP is used between two or more _nodes_ thus forming a _cluster_. Each
|
||||
node has one or more _repositories_ of files described by the _local
|
||||
model_, containing metadata and block hashes. The local model is sent to
|
||||
the other nodes in the cluster. The union of all files in the local
|
||||
models, with files selected for highest change version, forms the
|
||||
_global model_. Each node strives to get it's repositories in sync with
|
||||
the global model by requesting missing or outdated blocks from the other
|
||||
nodes in the cluster.
|
||||
|
||||
File data is described and transferred in units of _blocks_, each being
|
||||
128 KiB (131072 bytes) in size.
|
||||
|
||||
Transport and Authentication
|
||||
----------------------------
|
||||
|
||||
The BEP itself does not provide retransmissions, compression, encryption
|
||||
nor authentication. It is expected that this is performed at lower
|
||||
layers of the networking stack. A typical deployment stack should be
|
||||
similar to the following:
|
||||
BEP itself does not provide retransmissions, compression, encryption nor
|
||||
authentication. It is expected that this is performed at lower layers of
|
||||
the networking stack. The typical deployment stack is the following:
|
||||
|
||||
|-----------------------------|
|
||||
+-----------------------------|
|
||||
| Block Exchange Protocol |
|
||||
|-----------------------------|
|
||||
| Compression (RFC 1951) |
|
||||
@@ -48,69 +51,132 @@ message boundary.
|
||||
Messages
|
||||
--------
|
||||
|
||||
Every message starts with one 32 bit word indicating the message version
|
||||
and type. For BEP v1.0 the Version field is set to zero. Future versions
|
||||
with incompatible message formats will increment the Version field. The
|
||||
reserved bits must be set to zero.
|
||||
Every message starts with one 32 bit word indicating the message
|
||||
version, type and ID.
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Ver=0 | Message ID | Type | Reserved |
|
||||
| Ver | Type | Message ID | Reply To |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
All data following the message header is in XDR (RFC 1014) encoding.
|
||||
The actual data types in use by BEP, in XDR naming convention, are:
|
||||
For BEP v1 the Version field is set to zero. Future versions with
|
||||
incompatible message formats will increment the Version field.
|
||||
|
||||
- unsigned int -- unsigned 32 bit integer
|
||||
- hyper -- signed 64 bit integer
|
||||
- unsigned hyper -- signed 64 bit integer
|
||||
- opaque<> -- variable length opaque data
|
||||
- string<> -- variable length string
|
||||
The Type field indicates the type of data following the message header
|
||||
and is one of the integers defined below.
|
||||
|
||||
The encoding of opaque<> and string<> are identical, the distinction is
|
||||
solely in interpretation. Opaque data should not be interpreted as such,
|
||||
but can be compared bytewise to other opaque data. All strings use the
|
||||
UTF-8 encoding.
|
||||
The Message ID is set to a unique value for each transmitted message. In
|
||||
request messages the Reply To is set to zero. In response messages it is
|
||||
set to the message ID of the corresponding request.
|
||||
|
||||
All data following the message header is in XDR (RFC 1014) encoding. All
|
||||
fields smaller than 32 bits and all variable length data is padded to a
|
||||
multiple of 32 bits. The actual data types in use by BEP, in XDR naming
|
||||
convention, are:
|
||||
|
||||
- (unsigned) int -- (unsigned) 32 bit integer
|
||||
- (unsigned) hyper -- (unsigned) 64 bit integer
|
||||
- opaque<> -- variable length opaque data
|
||||
- string<> -- variable length string
|
||||
|
||||
The transmitted length of string and opaque data is the length of actual
|
||||
data, excluding any added padding. The encoding of opaque<> and string<>
|
||||
are identical, the distinction being solely in interpretation. Opaque
|
||||
data should not be interpreted but can be compared bytewise to other
|
||||
opaque data. All strings use the UTF-8 encoding.
|
||||
|
||||
### Index (Type = 1)
|
||||
|
||||
The Index message defines the contents of the senders repository. A Index
|
||||
message is sent by each peer immediately upon connection and whenever the
|
||||
local repository contents changes. However, if a peer has no data to
|
||||
advertise (the repository is empty, or it is set to only import data) it
|
||||
is allowed but not required to send an empty Index message (a file list of
|
||||
zero length). If the repository contents change from non-empty to empty,
|
||||
an empty Index message must be sent. There is no response to the Index
|
||||
message.
|
||||
The Index message defines the contents of the senders repository. An
|
||||
Index message is sent by each peer immediately upon connection. A peer
|
||||
with no data to advertise (the repository is empty, or it is set to only
|
||||
import data) is allowed but not required to send an empty Index message
|
||||
(a file list of zero length). If the repository contents change from
|
||||
non-empty to empty, an empty Index message must be sent. There is no
|
||||
response to the Index message.
|
||||
|
||||
struct IndexMessage {
|
||||
FileInfo Files<>;
|
||||
}
|
||||
#### Graphical Representation
|
||||
|
||||
struct FileInfo {
|
||||
string Name<>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
|
||||
struct BlockInfo {
|
||||
unsigned int Length;
|
||||
opaque Hash<>
|
||||
}
|
||||
|
||||
The file name is the part relative to the repository root. The
|
||||
modification time is expressed as the number of seconds since the Unix
|
||||
Epoch. The hash algorithm is implied by the hash length. Currently, the
|
||||
hash must be 32 bytes long and computed by SHA256.
|
||||
|
||||
The flags field is made up of the following single bit flags:
|
||||
IndexMessage Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |D| Unix Perm. & Mode |
|
||||
| Length of Repository |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Repository (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Files |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more FileInfo Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
FileInfo Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Modified (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Blocks |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more BlockInfo Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
BlockInfo Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Size |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Hash |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Hash (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Repository field identifies the repository that the index message
|
||||
pertains to. For single repository implementations an empty repository
|
||||
ID is acceptable, or the word "default". The Name is the file name path
|
||||
relative to the repository root. The combination of Repository and Name
|
||||
uniquely identifies each file in a cluster.
|
||||
|
||||
The Version field is a counter that is initially zero for each file. It
|
||||
is incremented each time a change is detected. The combination of
|
||||
Repository, Name and Version uniquely identifies the contents of a file
|
||||
at a certain point in time.
|
||||
|
||||
The Flags field is made up of the following single bit flags:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |I|D| Unix Perm. & Mode |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
- The lower 12 bits hold the common Unix permission and mode bits.
|
||||
@@ -118,63 +184,202 @@ The flags field is made up of the following single bit flags:
|
||||
- Bit 19 ("D") is set when the file has been deleted. The block list
|
||||
shall contain zero blocks and the modification time indicates the
|
||||
time of deletion or, if deletion time is not reliably determinable,
|
||||
one second past the last know modification time.
|
||||
the last known modification time and a higher version number.
|
||||
|
||||
- Bit 0 through 18 are reserved for future use and shall be set to
|
||||
- Bit 18 ("I") is set when the file is invalid and unavailable for
|
||||
synchronization. A peer may set this bit to indicate that it can
|
||||
temporarily not serve data for the file.
|
||||
|
||||
- Bit 0 through 17 are reserved for future use and shall be set to
|
||||
zero.
|
||||
|
||||
The hash algorithm is implied by the Hash length. Currently, the hash
|
||||
must be 32 bytes long and computed by SHA256.
|
||||
|
||||
The Modified time is expressed as the number of seconds since the Unix
|
||||
Epoch. In the rare occasion that a file is simultaneously and
|
||||
independently modified by two nodes in the same cluster and thus end up
|
||||
on the same Version number after modification, the Modified field is
|
||||
used as a tie breaker.
|
||||
|
||||
The Size field is the size of the file, in bytes.
|
||||
|
||||
The Blocks list contains the size and hash for each block in the file.
|
||||
Each block represents a 128 KiB slice of the file, except for the last
|
||||
block which may represent a smaller amount of data.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct IndexMessage {
|
||||
string Repository<>;
|
||||
FileInfo Files<>;
|
||||
}
|
||||
|
||||
struct FileInfo {
|
||||
string Name<>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
unsigned int Version;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
|
||||
struct BlockInfo {
|
||||
unsigned int Size;
|
||||
opaque Hash<>;
|
||||
}
|
||||
|
||||
### Request (Type = 2)
|
||||
|
||||
The Request message expresses the desire to receive a data block
|
||||
corresponding to a part of a certain file in the peer's repository.
|
||||
|
||||
The requested block must correspond exactly to one block seen in the
|
||||
peer's Index message. The hash field must be set to the expected value by
|
||||
the sender. The receiver may validate that this is actually the case
|
||||
before transmitting data. Each Request message must be met with a Response
|
||||
#### Graphical Representation
|
||||
|
||||
RequestMessage Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Repository |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Repository (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Offset (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Size |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Repository and Name fields are as documented for the Index message.
|
||||
The Offset and Size fields specify the region of the file to be
|
||||
transferred. This should equate to exactly one block as seen in an Index
|
||||
message.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct RequestMessage {
|
||||
string Repository<>;
|
||||
string Name<>;
|
||||
unsigned hyper Offset;
|
||||
unsigned int Length;
|
||||
opaque Hash<>;
|
||||
unsigned int Size;
|
||||
}
|
||||
|
||||
The hash algorithm is implied by the hash length. Currently, the hash
|
||||
must be 32 bytes long and computed by SHA256.
|
||||
|
||||
The Message ID in the header must set to a unique value to be able to
|
||||
correlate the request with the response message.
|
||||
|
||||
### Response (Type = 3)
|
||||
|
||||
The Response message is sent in response to a Request message. In case the
|
||||
requested data was not available (an outdated block was requested, or
|
||||
the file has been deleted), the Data field is empty.
|
||||
The Response message is sent in response to a Request message.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### Fields
|
||||
|
||||
The Data field contains either a full 128 KiB block, a shorter block in
|
||||
the case of the last block in a file, or is empty (zero length) if the
|
||||
requested block is not available.
|
||||
|
||||
#### XDR
|
||||
|
||||
struct ResponseMessage {
|
||||
opaque Data<>
|
||||
}
|
||||
|
||||
The Message ID in the header is used to correlate requests and
|
||||
responses.
|
||||
|
||||
### Ping (Type = 4)
|
||||
|
||||
The Ping message is used to determine that a connection is alive, and to
|
||||
keep connections alive through state tracking network elements such as
|
||||
firewalls and NAT gateways. The Ping message has no contents.
|
||||
|
||||
struct PingMessage {
|
||||
}
|
||||
|
||||
### Pong (Type = 5)
|
||||
|
||||
The Pong message is sent in response to a Ping. The Pong message has no
|
||||
contents, but copies the Message ID from the Ping.
|
||||
|
||||
struct PongMessage {
|
||||
### Index Update (Type = 6)
|
||||
|
||||
This message has exactly the same structure as the Index message.
|
||||
However instead of replacing the contents of the repository in the
|
||||
model, the Index Update merely amends it with new or updated file
|
||||
information. Any files not mentioned in an Index Update are left
|
||||
unchanged.
|
||||
|
||||
### Options (Type = 7)
|
||||
|
||||
This informational message provides information about the client
|
||||
configuration, version, etc. It is sent at connection initiation and,
|
||||
optionally, when any of the sent parameters have changed. The message is
|
||||
in the form of a list of (key, value) pairs, both of string type.
|
||||
|
||||
Key ID:s apart from the well known ones are implementation specific. An
|
||||
implementation is expected to ignore unknown keys. An implementation may
|
||||
impose limits on key and value size.
|
||||
|
||||
Well known keys:
|
||||
|
||||
- "clientId" -- The name of the implementation. Example: "syncthing".
|
||||
|
||||
- "clientVersion" -- The version of the client. Example: "v1.0.33-47". The
|
||||
Following the SemVer 2.0 specification for version strings is
|
||||
encouraged but not enforced.
|
||||
|
||||
#### Graphical Representation
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Options |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more KeyValue Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
KeyValue Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Key |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Key (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Value |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Value (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
#### XDR
|
||||
|
||||
struct OptionsMessage {
|
||||
KeyValue Options<>;
|
||||
}
|
||||
|
||||
struct KeyValue {
|
||||
string Key<>;
|
||||
string Value<>;
|
||||
}
|
||||
|
||||
Example Exchange
|
||||
@@ -190,7 +395,7 @@ Example Exchange
|
||||
7. <-Response
|
||||
8. <-Response
|
||||
9. <-Response
|
||||
10. Index->
|
||||
10. Index Update->
|
||||
...
|
||||
11. Ping->
|
||||
12. <-Pong
|
||||
@@ -201,8 +406,7 @@ of the data in the cluster. In this example, peer A has four missing or
|
||||
outdated blocks. At 2 through 5 peer A sends requests for these blocks.
|
||||
The requests are received by peer B, who retrieves the data from the
|
||||
repository and transmits Response records (6 through 9). Node A updates
|
||||
their repository contents and transmits an updated Index message (10).
|
||||
their repository contents and transmits an Index Update message (10).
|
||||
Both peers enter idle state after 10. At some later time 11, peer A
|
||||
determines that it has not seen data from B for some time and sends a
|
||||
Ping request. A response is sent at 12.
|
||||
|
||||
|
||||
51
protocol/common_test.go
Normal file
51
protocol/common_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package protocol
|
||||
|
||||
import "io"
|
||||
|
||||
type TestModel struct {
|
||||
data []byte
|
||||
repo string
|
||||
name string
|
||||
offset int64
|
||||
size int
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (t *TestModel) Index(nodeID string, files []FileInfo) {
|
||||
}
|
||||
|
||||
func (t *TestModel) IndexUpdate(nodeID string, files []FileInfo) {
|
||||
}
|
||||
|
||||
func (t *TestModel) Request(nodeID, repo, name string, offset int64, size int) ([]byte, error) {
|
||||
t.repo = repo
|
||||
t.name = name
|
||||
t.offset = offset
|
||||
t.size = size
|
||||
return t.data, nil
|
||||
}
|
||||
|
||||
func (t *TestModel) Close(nodeID string, err error) {
|
||||
t.closed = true
|
||||
}
|
||||
|
||||
type ErrPipe struct {
|
||||
io.PipeWriter
|
||||
written int
|
||||
max int
|
||||
err error
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (e *ErrPipe) Write(data []byte) (int, error) {
|
||||
if e.closed {
|
||||
return 0, e.err
|
||||
}
|
||||
if e.written+len(data) > e.max {
|
||||
n, _ := e.PipeWriter.Write(data[:e.max-e.written])
|
||||
e.PipeWriter.CloseWithError(e.err)
|
||||
e.closed = true
|
||||
return n, e.err
|
||||
}
|
||||
return e.PipeWriter.Write(data)
|
||||
}
|
||||
34
protocol/header.go
Normal file
34
protocol/header.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package protocol
|
||||
|
||||
import "github.com/calmh/syncthing/xdr"
|
||||
|
||||
type header struct {
|
||||
version int
|
||||
msgID int
|
||||
msgType int
|
||||
}
|
||||
|
||||
func (h header) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
u := encodeHeader(h)
|
||||
return xw.WriteUint32(u)
|
||||
}
|
||||
|
||||
func (h *header) decodeXDR(xr *xdr.Reader) error {
|
||||
u := xr.ReadUint32()
|
||||
*h = decodeHeader(u)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func encodeHeader(h header) uint32 {
|
||||
return uint32(h.version&0xf)<<28 +
|
||||
uint32(h.msgID&0xfff)<<16 +
|
||||
uint32(h.msgType&0xff)<<8
|
||||
}
|
||||
|
||||
func decodeHeader(u uint32) header {
|
||||
return header{
|
||||
version: int(u>>28) & 0xf,
|
||||
msgID: int(u>>16) & 0xfff,
|
||||
msgType: int(u>>8) & 0xff,
|
||||
}
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
)
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type marshalWriter struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeString(s string) {
|
||||
w.writeBytes([]byte(s))
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeBytes(bs []byte) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
w.writeUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
_, w.err = w.w.Write(bs)
|
||||
if p := pad(len(bs)); p > 0 {
|
||||
w.w.Write(padBytes[:p])
|
||||
}
|
||||
w.tot += len(bs) + pad(len(bs))
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeUint32(v uint32) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var b [4]byte
|
||||
b[0] = byte(v >> 24)
|
||||
b[1] = byte(v >> 16)
|
||||
b[2] = byte(v >> 8)
|
||||
b[3] = byte(v)
|
||||
_, w.err = w.w.Write(b[:])
|
||||
w.tot += 4
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeUint64(v uint64) {
|
||||
if w.err != nil {
|
||||
return
|
||||
}
|
||||
var b [8]byte
|
||||
b[0] = byte(v >> 56)
|
||||
b[1] = byte(v >> 48)
|
||||
b[2] = byte(v >> 40)
|
||||
b[3] = byte(v >> 32)
|
||||
b[4] = byte(v >> 24)
|
||||
b[5] = byte(v >> 16)
|
||||
b[6] = byte(v >> 8)
|
||||
b[7] = byte(v)
|
||||
_, w.err = w.w.Write(b[:])
|
||||
w.tot += 8
|
||||
}
|
||||
|
||||
type marshalReader struct {
|
||||
r io.Reader
|
||||
tot int
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *marshalReader) readString() string {
|
||||
bs := r.readBytes()
|
||||
defer buffers.Put(bs)
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func (r *marshalReader) readBytes() []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
l := int(r.readUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
b := buffers.Get(l + pad(l))
|
||||
_, r.err = io.ReadFull(r.r, b)
|
||||
r.tot += int(l + pad(l))
|
||||
return b[:l]
|
||||
}
|
||||
|
||||
func (r *marshalReader) readUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
var b [4]byte
|
||||
_, r.err = io.ReadFull(r.r, b[:])
|
||||
r.tot += 4
|
||||
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
}
|
||||
|
||||
func (r *marshalReader) readUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
var b [8]byte
|
||||
_, r.err = io.ReadFull(r.r, b[:])
|
||||
r.tot += 8
|
||||
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
|
||||
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
|
||||
}
|
||||
35
protocol/message_types.go
Normal file
35
protocol/message_types.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package protocol
|
||||
|
||||
type IndexMessage struct {
|
||||
Repository string // max:64
|
||||
Files []FileInfo // max:100000
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
Name string // max:1024
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version uint32
|
||||
Blocks []BlockInfo // max:100000
|
||||
}
|
||||
|
||||
type BlockInfo struct {
|
||||
Size uint32
|
||||
Hash []byte // max:64
|
||||
}
|
||||
|
||||
type RequestMessage struct {
|
||||
Repository string // max:64
|
||||
Name string // max:1024
|
||||
Offset uint64
|
||||
Size uint32
|
||||
}
|
||||
|
||||
type OptionsMessage struct {
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Key string // max:64
|
||||
Value string // max:1024
|
||||
}
|
||||
286
protocol/message_xdr.go
Normal file
286
protocol/message_xdr.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
|
||||
func (o IndexMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o IndexMessage) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Repository) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Repository)
|
||||
if len(o.Files) > 100000 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Files)))
|
||||
for i := range o.Files {
|
||||
o.Files[i].encodeXDR(xw)
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *IndexMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Repository = xr.ReadStringMax(64)
|
||||
_FilesSize := int(xr.ReadUint32())
|
||||
if _FilesSize > 100000 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
o.Files = make([]FileInfo, _FilesSize)
|
||||
for i := range o.Files {
|
||||
(&o.Files[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o FileInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o FileInfo) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Name) > 1024 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
xw.WriteUint64(uint64(o.Modified))
|
||||
xw.WriteUint32(o.Version)
|
||||
if len(o.Blocks) > 100000 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Blocks)))
|
||||
for i := range o.Blocks {
|
||||
o.Blocks[i].encodeXDR(xw)
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *FileInfo) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfo) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Name = xr.ReadStringMax(1024)
|
||||
o.Flags = xr.ReadUint32()
|
||||
o.Modified = int64(xr.ReadUint64())
|
||||
o.Version = xr.ReadUint32()
|
||||
_BlocksSize := int(xr.ReadUint32())
|
||||
if _BlocksSize > 100000 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
o.Blocks = make([]BlockInfo, _BlocksSize)
|
||||
for i := range o.Blocks {
|
||||
(&o.Blocks[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o BlockInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o BlockInfo) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Size)
|
||||
if len(o.Hash) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteBytes(o.Hash)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *BlockInfo) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *BlockInfo) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *BlockInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Size = xr.ReadUint32()
|
||||
o.Hash = xr.ReadBytesMax(64)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o RequestMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o RequestMessage) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Repository) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Repository)
|
||||
if len(o.Name) > 1024 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint64(o.Offset)
|
||||
xw.WriteUint32(o.Size)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *RequestMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Repository = xr.ReadStringMax(64)
|
||||
o.Name = xr.ReadStringMax(1024)
|
||||
o.Offset = xr.ReadUint64()
|
||||
o.Size = xr.ReadUint32()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o OptionsMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o OptionsMessage) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o OptionsMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Options) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
o.Options[i].encodeXDR(xw)
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *OptionsMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *OptionsMessage) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *OptionsMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func (o Option) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o Option) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (o Option) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if len(o.Key) > 64 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Key)
|
||||
if len(o.Value) > 1024 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.Value)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *Option) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *Option) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *Option) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Key = xr.ReadStringMax(64)
|
||||
o.Value = xr.ReadStringMax(1024)
|
||||
return xr.Error()
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package protocol
|
||||
|
||||
import "io"
|
||||
|
||||
type request struct {
|
||||
name string
|
||||
offset uint64
|
||||
size uint32
|
||||
hash []byte
|
||||
}
|
||||
|
||||
type header struct {
|
||||
version int
|
||||
msgID int
|
||||
msgType int
|
||||
}
|
||||
|
||||
func encodeHeader(h header) uint32 {
|
||||
return uint32(h.version&0xf)<<28 +
|
||||
uint32(h.msgID&0xfff)<<16 +
|
||||
uint32(h.msgType&0xff)<<8
|
||||
}
|
||||
|
||||
func decodeHeader(u uint32) header {
|
||||
return header{
|
||||
version: int(u>>28) & 0xf,
|
||||
msgID: int(u>>16) & 0xfff,
|
||||
msgType: int(u>>8) & 0xff,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeHeader(h header) {
|
||||
w.writeUint32(encodeHeader(h))
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeIndex(idx []FileInfo) {
|
||||
w.writeUint32(uint32(len(idx)))
|
||||
for _, f := range idx {
|
||||
w.writeString(f.Name)
|
||||
w.writeUint32(f.Flags)
|
||||
w.writeUint64(uint64(f.Modified))
|
||||
w.writeUint32(uint32(len(f.Blocks)))
|
||||
for _, b := range f.Blocks {
|
||||
w.writeUint32(b.Length)
|
||||
w.writeBytes(b.Hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WriteIndex(w io.Writer, idx []FileInfo) (int, error) {
|
||||
mw := marshalWriter{w, 0, nil}
|
||||
mw.writeIndex(idx)
|
||||
return mw.tot, mw.err
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeRequest(r request) {
|
||||
w.writeString(r.name)
|
||||
w.writeUint64(r.offset)
|
||||
w.writeUint32(r.size)
|
||||
w.writeBytes(r.hash)
|
||||
}
|
||||
|
||||
func (w *marshalWriter) writeResponse(data []byte) {
|
||||
w.writeBytes(data)
|
||||
}
|
||||
|
||||
func (r *marshalReader) readHeader() header {
|
||||
return decodeHeader(r.readUint32())
|
||||
}
|
||||
|
||||
func (r *marshalReader) readIndex() []FileInfo {
|
||||
nfiles := r.readUint32()
|
||||
files := make([]FileInfo, nfiles)
|
||||
for i := range files {
|
||||
files[i].Name = r.readString()
|
||||
files[i].Flags = r.readUint32()
|
||||
files[i].Modified = int64(r.readUint64())
|
||||
nblocks := r.readUint32()
|
||||
blocks := make([]BlockInfo, nblocks)
|
||||
for j := range blocks {
|
||||
blocks[j].Length = r.readUint32()
|
||||
blocks[j].Hash = r.readBytes()
|
||||
}
|
||||
files[i].Blocks = blocks
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func ReadIndex(r io.Reader) ([]FileInfo, error) {
|
||||
mr := marshalReader{r, 0, nil}
|
||||
idx := mr.readIndex()
|
||||
return idx, mr.err
|
||||
}
|
||||
|
||||
func (r *marshalReader) readRequest() request {
|
||||
var req request
|
||||
req.name = r.readString()
|
||||
req.offset = r.readUint64()
|
||||
req.size = r.readUint32()
|
||||
req.hash = r.readBytes()
|
||||
return req
|
||||
}
|
||||
|
||||
func (r *marshalReader) readResponse() []byte {
|
||||
return r.readBytes()
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestIndex(t *testing.T) {
|
||||
idx := []FileInfo{
|
||||
{
|
||||
"Foo",
|
||||
0755,
|
||||
1234567890,
|
||||
[]BlockInfo{
|
||||
{12345678, []byte("hash hash hash")},
|
||||
{23456781, []byte("ash hash hashh")},
|
||||
{34567812, []byte("sh hash hashha")},
|
||||
},
|
||||
}, {
|
||||
"Quux/Quux",
|
||||
0644,
|
||||
2345678901,
|
||||
[]BlockInfo{
|
||||
{45678123, []byte("4321 hash hash hash")},
|
||||
{56781234, []byte("3214 ash hash hashh")},
|
||||
{67812345, []byte("2143 sh hash hashha")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var buf = new(bytes.Buffer)
|
||||
var wr = marshalWriter{buf, 0, nil}
|
||||
wr.writeIndex(idx)
|
||||
|
||||
var rd = marshalReader{buf, 0, nil}
|
||||
var idx2 = rd.readIndex()
|
||||
|
||||
if !reflect.DeepEqual(idx, idx2) {
|
||||
t.Errorf("Index marshal error:\n%#v\n%#v\n", idx, idx2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
f := func(name string, offset uint64, size uint32, hash []byte) bool {
|
||||
var buf = new(bytes.Buffer)
|
||||
var req = request{name, offset, size, hash}
|
||||
var wr = marshalWriter{buf, 0, nil}
|
||||
wr.writeRequest(req)
|
||||
var rd = marshalReader{buf, 0, nil}
|
||||
var req2 = rd.readRequest()
|
||||
return req.name == req2.name &&
|
||||
req.offset == req2.offset &&
|
||||
req.size == req2.size &&
|
||||
bytes.Compare(req.hash, req2.hash) == 0
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponse(t *testing.T) {
|
||||
f := func(data []byte) bool {
|
||||
var buf = new(bytes.Buffer)
|
||||
var wr = marshalWriter{buf, 0, nil}
|
||||
wr.writeResponse(data)
|
||||
var rd = marshalReader{buf, 0, nil}
|
||||
var read = rd.readResponse()
|
||||
return bytes.Compare(read, data) == 0
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteIndex(b *testing.B) {
|
||||
idx := []FileInfo{
|
||||
{
|
||||
"Foo",
|
||||
0777,
|
||||
1234567890,
|
||||
[]BlockInfo{
|
||||
{12345678, []byte("hash hash hash")},
|
||||
{23456781, []byte("ash hash hashh")},
|
||||
{34567812, []byte("sh hash hashha")},
|
||||
},
|
||||
}, {
|
||||
"Quux/Quux",
|
||||
0644,
|
||||
2345678901,
|
||||
[]BlockInfo{
|
||||
{45678123, []byte("4321 hash hash hash")},
|
||||
{56781234, []byte("3214 ash hash hashh")},
|
||||
{67812345, []byte("2143 sh hash hashha")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var wr = marshalWriter{ioutil.Discard, 0, nil}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.writeIndex(idx)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteRequest(b *testing.B) {
|
||||
var req = request{"blah blah", 1231323, 13123123, []byte("hash hash hash")}
|
||||
var wr = marshalWriter{ioutil.Discard, 0, nil}
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.writeRequest(req)
|
||||
}
|
||||
}
|
||||
@@ -3,59 +3,83 @@ package protocol
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
|
||||
const BlockSize = 128 * 1024
|
||||
|
||||
const (
|
||||
messageTypeIndex = 1
|
||||
messageTypeRequest = 2
|
||||
messageTypeResponse = 3
|
||||
messageTypePing = 4
|
||||
messageTypePong = 5
|
||||
messageTypeIndexUpdate = 6
|
||||
messageTypeOptions = 7
|
||||
)
|
||||
|
||||
const (
|
||||
messageTypeReserved = iota
|
||||
messageTypeIndex
|
||||
messageTypeRequest
|
||||
messageTypeResponse
|
||||
messageTypePing
|
||||
messageTypePong
|
||||
FlagDeleted = 1 << 12
|
||||
FlagInvalid = 1 << 13
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("Connection closed")
|
||||
|
||||
type FileInfo struct {
|
||||
Name string
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Blocks []BlockInfo
|
||||
}
|
||||
|
||||
type BlockInfo struct {
|
||||
Length uint32
|
||||
Hash []byte
|
||||
}
|
||||
var (
|
||||
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
|
||||
ErrClosed = errors.New("connection closed")
|
||||
)
|
||||
|
||||
type Model interface {
|
||||
// An index was received from the peer node
|
||||
Index(nodeID string, files []FileInfo)
|
||||
// An index update was received from the peer node
|
||||
IndexUpdate(nodeID string, files []FileInfo)
|
||||
// A request was made by the peer node
|
||||
Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error)
|
||||
Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error)
|
||||
// The peer node closed the connection
|
||||
Close(nodeID string)
|
||||
Close(nodeID string, err error)
|
||||
}
|
||||
|
||||
type Connection struct {
|
||||
receiver Model
|
||||
reader io.Reader
|
||||
mreader *marshalReader
|
||||
writer io.Writer
|
||||
mwriter *marshalWriter
|
||||
wLock sync.RWMutex
|
||||
closed bool
|
||||
closedLock sync.RWMutex
|
||||
awaiting map[int]chan interface{}
|
||||
nextId int
|
||||
ID string
|
||||
sync.RWMutex
|
||||
|
||||
id string
|
||||
receiver Model
|
||||
reader io.Reader
|
||||
xr *xdr.Reader
|
||||
writer io.Writer
|
||||
xw *xdr.Writer
|
||||
closed bool
|
||||
awaiting map[int]chan asyncResult
|
||||
nextID int
|
||||
indexSent map[string]map[string][2]int64
|
||||
peerOptions map[string]string
|
||||
myOptions map[string]string
|
||||
optionsLock sync.Mutex
|
||||
|
||||
hasSentIndex bool
|
||||
hasRecvdIndex bool
|
||||
|
||||
statisticsLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) *Connection {
|
||||
type asyncResult struct {
|
||||
val []byte
|
||||
err error
|
||||
}
|
||||
|
||||
const (
|
||||
pingTimeout = 2 * time.Minute
|
||||
pingIdleTime = 5 * time.Minute
|
||||
)
|
||||
|
||||
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) *Connection {
|
||||
flrd := flate.NewReader(reader)
|
||||
flwr, err := flate.NewWriter(writer, flate.BestSpeed)
|
||||
if err != nil {
|
||||
@@ -63,177 +87,358 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
|
||||
}
|
||||
|
||||
c := Connection{
|
||||
receiver: receiver,
|
||||
reader: flrd,
|
||||
mreader: &marshalReader{flrd, 0, nil},
|
||||
writer: flwr,
|
||||
mwriter: &marshalWriter{flwr, 0, nil},
|
||||
awaiting: make(map[int]chan interface{}),
|
||||
ID: nodeID,
|
||||
id: nodeID,
|
||||
receiver: receiver,
|
||||
reader: flrd,
|
||||
xr: xdr.NewReader(flrd),
|
||||
writer: flwr,
|
||||
xw: xdr.NewWriter(flwr),
|
||||
awaiting: make(map[int]chan asyncResult),
|
||||
indexSent: make(map[string]map[string][2]int64),
|
||||
}
|
||||
|
||||
go c.readerLoop()
|
||||
go c.pingerLoop()
|
||||
|
||||
if options != nil {
|
||||
c.myOptions = options
|
||||
go func() {
|
||||
c.Lock()
|
||||
header{0, c.nextID, messageTypeOptions}.encodeXDR(c.xw)
|
||||
var om OptionsMessage
|
||||
for k, v := range options {
|
||||
om.Options = append(om.Options, Option{k, v})
|
||||
}
|
||||
om.encodeXDR(c.xw)
|
||||
err := c.xw.Error()
|
||||
if err == nil {
|
||||
err = c.flush()
|
||||
}
|
||||
if err != nil {
|
||||
log.Println("Warning: Write error during initial handshake:", err)
|
||||
}
|
||||
c.nextID++
|
||||
c.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// Index writes the list of file information to the connected peer node
|
||||
func (c *Connection) Index(idx []FileInfo) {
|
||||
c.wLock.Lock()
|
||||
defer c.wLock.Unlock()
|
||||
func (c *Connection) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
c.mwriter.writeHeader(header{0, c.nextId, messageTypeIndex})
|
||||
c.nextId = (c.nextId + 1) & 0xfff
|
||||
c.mwriter.writeIndex(idx)
|
||||
c.flush()
|
||||
// Index writes the list of file information to the connected peer node
|
||||
func (c *Connection) Index(repo string, idx []FileInfo) {
|
||||
c.Lock()
|
||||
var msgType int
|
||||
if c.indexSent[repo] == nil {
|
||||
// This is the first time we send an index.
|
||||
msgType = messageTypeIndex
|
||||
|
||||
c.indexSent[repo] = make(map[string][2]int64)
|
||||
for _, f := range idx {
|
||||
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
||||
}
|
||||
} else {
|
||||
// We have sent one full index. Only send updates now.
|
||||
msgType = messageTypeIndexUpdate
|
||||
var diff []FileInfo
|
||||
for _, f := range idx {
|
||||
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
|
||||
diff = append(diff, f)
|
||||
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
||||
}
|
||||
}
|
||||
idx = diff
|
||||
}
|
||||
|
||||
header{0, c.nextID, msgType}.encodeXDR(c.xw)
|
||||
_, err := IndexMessage{repo, idx}.encodeXDR(c.xw)
|
||||
if err == nil {
|
||||
err = c.flush()
|
||||
}
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
c.hasSentIndex = true
|
||||
c.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
||||
func (c *Connection) Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
c.wLock.Lock()
|
||||
rc := make(chan interface{})
|
||||
c.awaiting[c.nextId] = rc
|
||||
c.mwriter.writeHeader(header{0, c.nextId, messageTypeRequest})
|
||||
c.mwriter.writeRequest(request{name, offset, size, hash})
|
||||
c.flush()
|
||||
c.nextId = (c.nextId + 1) & 0xfff
|
||||
c.wLock.Unlock()
|
||||
|
||||
// Reading something that might be nil from a possibly closed channel...
|
||||
// r0<~
|
||||
|
||||
var data []byte
|
||||
i, ok := <-rc
|
||||
if ok {
|
||||
if d, ok := i.([]byte); ok {
|
||||
data = d
|
||||
}
|
||||
func (c *Connection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
|
||||
c.Lock()
|
||||
if c.closed {
|
||||
c.Unlock()
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
var err error
|
||||
i, ok = <-rc
|
||||
if ok {
|
||||
if e, ok := i.(error); ok {
|
||||
err = e
|
||||
}
|
||||
rc := make(chan asyncResult)
|
||||
c.awaiting[c.nextID] = rc
|
||||
header{0, c.nextID, messageTypeRequest}.encodeXDR(c.xw)
|
||||
_, err := RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
|
||||
if err == nil {
|
||||
err = c.flush()
|
||||
}
|
||||
return data, err
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
c.close(err)
|
||||
return nil, err
|
||||
}
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
c.Unlock()
|
||||
|
||||
res, ok := <-rc
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
return res.val, res.err
|
||||
}
|
||||
|
||||
func (c *Connection) Ping() bool {
|
||||
c.wLock.Lock()
|
||||
rc := make(chan interface{})
|
||||
c.awaiting[c.nextId] = rc
|
||||
c.mwriter.writeHeader(header{0, c.nextId, messageTypePing})
|
||||
c.flush()
|
||||
c.nextId = (c.nextId + 1) & 0xfff
|
||||
c.wLock.Unlock()
|
||||
func (c *Connection) ping() bool {
|
||||
c.Lock()
|
||||
if c.closed {
|
||||
c.Unlock()
|
||||
return false
|
||||
}
|
||||
rc := make(chan asyncResult, 1)
|
||||
c.awaiting[c.nextID] = rc
|
||||
header{0, c.nextID, messageTypePing}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
c.close(err)
|
||||
return false
|
||||
} else if c.xw.Error() != nil {
|
||||
c.Unlock()
|
||||
c.close(c.xw.Error())
|
||||
return false
|
||||
}
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
c.Unlock()
|
||||
|
||||
_, ok := <-rc
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *Connection) Stop() {
|
||||
res, ok := <-rc
|
||||
return ok && res.err == nil
|
||||
}
|
||||
|
||||
type flusher interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
func (c *Connection) flush() {
|
||||
func (c *Connection) flush() error {
|
||||
if f, ok := c.writer.(flusher); ok {
|
||||
f.Flush()
|
||||
return f.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Connection) close() {
|
||||
c.closedLock.Lock()
|
||||
func (c *Connection) close(err error) {
|
||||
c.Lock()
|
||||
if c.closed {
|
||||
c.Unlock()
|
||||
return
|
||||
}
|
||||
c.closed = true
|
||||
c.closedLock.Unlock()
|
||||
c.wLock.Lock()
|
||||
for _, ch := range c.awaiting {
|
||||
close(ch)
|
||||
}
|
||||
c.awaiting = nil
|
||||
c.wLock.Unlock()
|
||||
c.receiver.Close(c.ID)
|
||||
c.Unlock()
|
||||
|
||||
c.receiver.Close(c.id, err)
|
||||
}
|
||||
|
||||
func (c *Connection) isClosed() bool {
|
||||
c.closedLock.RLock()
|
||||
defer c.closedLock.RUnlock()
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.closed
|
||||
}
|
||||
|
||||
func (c *Connection) readerLoop() {
|
||||
for !c.isClosed() {
|
||||
hdr := c.mreader.readHeader()
|
||||
if c.mreader.err != nil {
|
||||
c.close()
|
||||
break
|
||||
loop:
|
||||
for {
|
||||
var hdr header
|
||||
hdr.decodeXDR(c.xr)
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
}
|
||||
if hdr.version != 0 {
|
||||
c.close(fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version))
|
||||
break loop
|
||||
}
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
files := c.mreader.readIndex()
|
||||
if c.mreader.err != nil {
|
||||
c.close()
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
} else {
|
||||
c.receiver.Index(c.ID, files)
|
||||
c.receiver.Index(c.id, im.Files)
|
||||
}
|
||||
c.Lock()
|
||||
c.hasRecvdIndex = true
|
||||
c.Unlock()
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
} else {
|
||||
c.receiver.IndexUpdate(c.id, im.Files)
|
||||
}
|
||||
|
||||
case messageTypeRequest:
|
||||
c.processRequest(hdr.msgID)
|
||||
var req RequestMessage
|
||||
req.decodeXDR(c.xr)
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
}
|
||||
go c.processRequest(hdr.msgID, req)
|
||||
|
||||
case messageTypeResponse:
|
||||
data := c.mreader.readResponse()
|
||||
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
|
||||
|
||||
if c.mreader.err != nil {
|
||||
c.close()
|
||||
} else {
|
||||
c.wLock.RLock()
|
||||
rc, ok := c.awaiting[hdr.msgID]
|
||||
c.wLock.RUnlock()
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
}
|
||||
|
||||
if ok {
|
||||
rc <- data
|
||||
rc <- c.mreader.err
|
||||
delete(c.awaiting, hdr.msgID)
|
||||
close(rc)
|
||||
}
|
||||
c.Lock()
|
||||
rc, ok := c.awaiting[hdr.msgID]
|
||||
delete(c.awaiting, hdr.msgID)
|
||||
c.Unlock()
|
||||
|
||||
if ok {
|
||||
rc <- asyncResult{data, c.xr.Error()}
|
||||
close(rc)
|
||||
}
|
||||
|
||||
case messageTypePing:
|
||||
c.wLock.Lock()
|
||||
c.mwriter.writeUint32(encodeHeader(header{0, hdr.msgID, messageTypePong}))
|
||||
c.flush()
|
||||
c.wLock.Unlock()
|
||||
c.Lock()
|
||||
header{0, hdr.msgID, messageTypePong}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.Unlock()
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
break loop
|
||||
} else if c.xw.Error() != nil {
|
||||
c.close(c.xw.Error())
|
||||
break loop
|
||||
}
|
||||
|
||||
case messageTypePong:
|
||||
c.wLock.Lock()
|
||||
if rc, ok := c.awaiting[hdr.msgID]; ok {
|
||||
rc <- true
|
||||
c.RLock()
|
||||
rc, ok := c.awaiting[hdr.msgID]
|
||||
c.RUnlock()
|
||||
|
||||
if ok {
|
||||
rc <- asyncResult{}
|
||||
close(rc)
|
||||
|
||||
c.Lock()
|
||||
delete(c.awaiting, hdr.msgID)
|
||||
c.Unlock()
|
||||
}
|
||||
c.wLock.Unlock()
|
||||
|
||||
case messageTypeOptions:
|
||||
var om OptionsMessage
|
||||
om.decodeXDR(c.xr)
|
||||
if c.xr.Error() != nil {
|
||||
c.close(c.xr.Error())
|
||||
break loop
|
||||
}
|
||||
|
||||
c.optionsLock.Lock()
|
||||
c.peerOptions = make(map[string]string, len(om.Options))
|
||||
for _, opt := range om.Options {
|
||||
c.peerOptions[opt.Key] = opt.Value
|
||||
}
|
||||
c.optionsLock.Unlock()
|
||||
|
||||
if mh, rh := c.myOptions["clusterHash"], c.peerOptions["clusterHash"]; len(mh) > 0 && len(rh) > 0 && mh != rh {
|
||||
c.close(ErrClusterHash)
|
||||
break loop
|
||||
}
|
||||
|
||||
default:
|
||||
c.close(fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType))
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) processRequest(msgID int) {
|
||||
req := c.mreader.readRequest()
|
||||
if c.mreader.err != nil {
|
||||
c.close()
|
||||
} else {
|
||||
go func() {
|
||||
data, _ := c.receiver.Request(c.ID, req.name, req.offset, req.size, req.hash)
|
||||
c.wLock.Lock()
|
||||
c.mwriter.writeUint32(encodeHeader(header{0, msgID, messageTypeResponse}))
|
||||
c.mwriter.writeResponse(data)
|
||||
buffers.Put(data)
|
||||
c.flush()
|
||||
c.wLock.Unlock()
|
||||
}()
|
||||
func (c *Connection) processRequest(msgID int, req RequestMessage) {
|
||||
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
|
||||
|
||||
c.Lock()
|
||||
header{0, msgID, messageTypeResponse}.encodeXDR(c.xw)
|
||||
_, err := c.xw.WriteBytes(data)
|
||||
if err == nil {
|
||||
err = c.flush()
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
buffers.Put(data)
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) pingerLoop() {
|
||||
var rc = make(chan bool, 1)
|
||||
for {
|
||||
time.Sleep(pingIdleTime / 2)
|
||||
|
||||
c.RLock()
|
||||
ready := c.hasRecvdIndex && c.hasSentIndex
|
||||
c.RUnlock()
|
||||
|
||||
if ready {
|
||||
go func() {
|
||||
rc <- c.ping()
|
||||
}()
|
||||
select {
|
||||
case ok := <-rc:
|
||||
if !ok {
|
||||
c.close(fmt.Errorf("ping failure"))
|
||||
}
|
||||
case <-time.After(pingTimeout):
|
||||
c.close(fmt.Errorf("ping timeout"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
At time.Time
|
||||
InBytesTotal int
|
||||
OutBytesTotal int
|
||||
}
|
||||
|
||||
func (c *Connection) Statistics() Statistics {
|
||||
c.statisticsLock.Lock()
|
||||
defer c.statisticsLock.Unlock()
|
||||
|
||||
stats := Statistics{
|
||||
At: time.Now(),
|
||||
InBytesTotal: int(c.xr.Tot()),
|
||||
OutBytesTotal: int(c.xw.Tot()),
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
func (c *Connection) Option(key string) string {
|
||||
c.optionsLock.Lock()
|
||||
defer c.optionsLock.Unlock()
|
||||
return c.peerOptions[key]
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHeaderFunctions(t *testing.T) {
|
||||
@@ -19,19 +22,176 @@ func TestHeaderFunctions(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPad(t *testing.T) {
|
||||
tests := [][]int{
|
||||
{0, 0},
|
||||
{1, 3},
|
||||
{2, 2},
|
||||
{3, 1},
|
||||
{4, 0},
|
||||
{32, 0},
|
||||
{33, 3},
|
||||
func TestPing(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection("c0", ar, bw, nil, nil)
|
||||
c1 := NewConnection("c1", br, aw, nil, nil)
|
||||
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
}
|
||||
for _, tc := range tests {
|
||||
if p := pad(tc[0]); p != tc[1] {
|
||||
t.Errorf("Incorrect padding for %d bytes, %d != %d", tc[0], p, tc[1])
|
||||
if ok := c1.ping(); !ok {
|
||||
t.Error("c1 ping failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErr(t *testing.T) {
|
||||
e := errors.New("something broke")
|
||||
|
||||
for i := 0; i < 12; i++ {
|
||||
for j := 0; j < 12; j++ {
|
||||
m0 := &TestModel{}
|
||||
m1 := &TestModel{}
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
c0 := NewConnection("c0", ar, ebw, m0, nil)
|
||||
NewConnection("c1", br, eaw, m1, nil)
|
||||
|
||||
res := c0.ping()
|
||||
if (i < 4 || j < 4) && res {
|
||||
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
|
||||
} else if (i >= 8 && j >= 8) && !res {
|
||||
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestResponseErr(t *testing.T) {
|
||||
e := errors.New("something broke")
|
||||
|
||||
var pass bool
|
||||
for i := 0; i < 48; i++ {
|
||||
for j := 0; j < 38; j++ {
|
||||
m0 := &TestModel{data: []byte("response data")}
|
||||
m1 := &TestModel{}
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
NewConnection("c0", ar, ebw, m0, nil)
|
||||
c1 := NewConnection("c1", br, eaw, m1, nil)
|
||||
|
||||
d, err := c1.Request("default", "tn", 1234, 5678)
|
||||
if err == e || err == ErrClosed {
|
||||
t.Logf("Error at %d+%d bytes", i, j)
|
||||
if !m1.closed {
|
||||
t.Error("c1 not closed")
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
if !m0.closed {
|
||||
t.Error("c0 not closed")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if string(d) != "response data" {
|
||||
t.Errorf("Incorrect response data %q", string(d))
|
||||
}
|
||||
if m0.repo != "default" {
|
||||
t.Errorf("Incorrect repo %q", m0.repo)
|
||||
}
|
||||
if m0.name != "tn" {
|
||||
t.Errorf("Incorrect name %q", m0.name)
|
||||
}
|
||||
if m0.offset != 1234 {
|
||||
t.Errorf("Incorrect offset %d", m0.offset)
|
||||
}
|
||||
if m0.size != 5678 {
|
||||
t.Errorf("Incorrect size %d", m0.size)
|
||||
}
|
||||
t.Logf("Pass at %d+%d bytes", i, j)
|
||||
pass = true
|
||||
}
|
||||
}
|
||||
if !pass {
|
||||
t.Error("Never passed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersionErr(t *testing.T) {
|
||||
m0 := &TestModel{}
|
||||
m1 := &TestModel{}
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection("c0", ar, bw, m0, nil)
|
||||
NewConnection("c1", br, aw, m1, nil)
|
||||
|
||||
c0.xw.WriteUint32(encodeHeader(header{
|
||||
version: 2,
|
||||
msgID: 0,
|
||||
msgType: 0,
|
||||
}))
|
||||
c0.flush()
|
||||
|
||||
if !m1.closed {
|
||||
t.Error("Connection should close due to unknown version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeErr(t *testing.T) {
|
||||
m0 := &TestModel{}
|
||||
m1 := &TestModel{}
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection("c0", ar, bw, m0, nil)
|
||||
NewConnection("c1", br, aw, m1, nil)
|
||||
|
||||
c0.xw.WriteUint32(encodeHeader(header{
|
||||
version: 0,
|
||||
msgID: 0,
|
||||
msgType: 42,
|
||||
}))
|
||||
c0.flush()
|
||||
|
||||
if !m1.closed {
|
||||
t.Error("Connection should close due to unknown message type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
m0 := &TestModel{}
|
||||
m1 := &TestModel{}
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection("c0", ar, bw, m0, nil)
|
||||
NewConnection("c1", br, aw, m1, nil)
|
||||
|
||||
c0.close(nil)
|
||||
|
||||
ok := c0.isClosed()
|
||||
if !ok {
|
||||
t.Fatal("Connection should be closed")
|
||||
}
|
||||
|
||||
// None of these should panic, some should return an error
|
||||
|
||||
ok = c0.ping()
|
||||
if ok {
|
||||
t.Error("Ping should not return true")
|
||||
}
|
||||
|
||||
c0.Index("default", nil)
|
||||
c0.Index("default", nil)
|
||||
|
||||
_, err := c0.Request("default", "foo", 0, 0)
|
||||
if err == nil {
|
||||
t.Error("Request should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
[repository]
|
||||
dir = /Users/jb/Synced
|
||||
|
||||
# The nodes section lists the nodes that make up the cluster. The format is
|
||||
# <certificate id> = <space separated list of addresses>
|
||||
# The special address "dynamic" means that outbound connections will not be
|
||||
# attempted, but inbound connections are accepted.
|
||||
|
||||
[nodes]
|
||||
ITZXTZ7A32DWV3NLNR5W4M3CHVBW56NA = 172.16.32.1:22000 192.23.34.56:22000
|
||||
CUGAE43Y5N64CRJU26YFH6MTWPSBLSUL = dynamic
|
||||
7
util.go
7
util.go
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "time"
|
||||
|
||||
func timing(name string, t0 time.Time) {
|
||||
debugf("%s: %.02f ms", name, time.Since(t0).Seconds()*1000)
|
||||
}
|
||||
117
walk.go
117
walk.go
@@ -1,117 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const BlockSize = 128 * 1024
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Blocks BlockList
|
||||
}
|
||||
|
||||
func (f File) Dump() {
|
||||
fmt.Printf("%s\n", f.Name)
|
||||
for _, b := range f.Blocks {
|
||||
fmt.Printf(" %dB @ %d: %x\n", b.Length, b.Offset, b.Hash)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func isTempName(name string) bool {
|
||||
return strings.HasPrefix(path.Base(name), ".syncthing.")
|
||||
}
|
||||
|
||||
func tempName(name string, modified int64) string {
|
||||
tdir := path.Dir(name)
|
||||
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
|
||||
return path.Join(tdir, tname)
|
||||
}
|
||||
|
||||
func genWalker(base string, res *[]File, model *Model) filepath.WalkFunc {
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isTempName(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode()&os.ModeType == 0 {
|
||||
rn, err := filepath.Rel(base, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modified := fi.ModTime().Unix()
|
||||
|
||||
hf, ok := model.LocalFile(rn)
|
||||
if ok && hf.Modified == modified {
|
||||
// No change
|
||||
*res = append(*res, hf)
|
||||
} else {
|
||||
if traceFile {
|
||||
debugf("FILE: Hash %q", p)
|
||||
}
|
||||
fd, err := os.Open(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
blocks, err := Blocks(fd, BlockSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := File{
|
||||
Name: rn,
|
||||
Flags: uint32(info.Mode()),
|
||||
Modified: modified,
|
||||
Blocks: blocks,
|
||||
}
|
||||
*res = append(*res, f)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Walk(dir string, model *Model) []File {
|
||||
var files []File
|
||||
fn := genWalker(dir, &files, model)
|
||||
err := filepath.Walk(dir, fn)
|
||||
if err != nil {
|
||||
warnln(err)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func cleanTempFile(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode()&os.ModeType == 0 && isTempName(path) {
|
||||
if traceFile {
|
||||
debugf("FILE: Remove %q", path)
|
||||
}
|
||||
os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CleanTempFiles(dir string) {
|
||||
filepath.Walk(dir, cleanTempFile)
|
||||
}
|
||||
42
walk_test.go
42
walk_test.go
@@ -1,42 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testdata = []struct {
|
||||
name string
|
||||
size int
|
||||
hash string
|
||||
}{
|
||||
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
|
||||
{"baz/quux", 9, "c154d94e94ba7298a6adb0523afe34d1b6a581d6b893a763d45ddc5e209dcb83"},
|
||||
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
|
||||
}
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
m := new(Model)
|
||||
files := Walk("testdata", m)
|
||||
|
||||
if l1, l2 := len(files), len(testdata); l1 != l2 {
|
||||
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
|
||||
}
|
||||
|
||||
for i := range testdata {
|
||||
if n1, n2 := testdata[i].name, files[i].Name; n1 != n2 {
|
||||
t.Errorf("Incorrect file name %q != %q for case #%d", n1, n2, i)
|
||||
}
|
||||
|
||||
if h1, h2 := fmt.Sprintf("%x", files[i].Blocks[0].Hash), testdata[i].hash; h1 != h2 {
|
||||
t.Errorf("Incorrect hash %q != %q for case #%d", h1, h2, i)
|
||||
}
|
||||
|
||||
t0 := time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
|
||||
t1 := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
|
||||
if mt := files[i].Modified; mt < t0 || mt > t1 {
|
||||
t.Errorf("Unrealistic modtime %d for test %d", mt, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
393
xdr/cmd/coder/main.go
Normal file
393
xdr/cmd/coder/main.go
Normal file
@@ -0,0 +1,393 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var output string
|
||||
|
||||
type field struct {
|
||||
Name string
|
||||
IsBasic bool
|
||||
IsSlice bool
|
||||
IsMap bool
|
||||
FieldType string
|
||||
KeyType string
|
||||
Encoder string
|
||||
Convert string
|
||||
Max int
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{range $field := .Fields}}
|
||||
{{if not $field.IsSlice}}
|
||||
{{if ne $field.Convert ""}}
|
||||
xw.Write{{$field.Encoder}}({{$field.Convert}}(o.{{$field.Name}}))
|
||||
{{else if $field.IsBasic}}
|
||||
{{if ge $field.Max 1}}
|
||||
if len(o.{{$field.Name}}) > {{$field.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$field.Encoder}}(o.{{$field.Name}})
|
||||
{{else}}
|
||||
o.{{$field.Name}}.encodeXDR(xw)
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $field.Max 1}}
|
||||
if len(o.{{$field.Name}}) > {{$field.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$field.Name}})))
|
||||
for i := range o.{{$field.Name}} {
|
||||
{{if ne $field.Convert ""}}
|
||||
xw.Write{{$field.Encoder}}({{$field.Convert}}(o.{{$field.Name}}[i]))
|
||||
{{else if $field.IsBasic}}
|
||||
xw.Write{{$field.Encoder}}(o.{{$field.Name}}[i])
|
||||
{{else}}
|
||||
o.{{$field.Name}}[i].encodeXDR(xw)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
{{range $field := .Fields}}
|
||||
{{if not $field.IsSlice}}
|
||||
{{if ne $field.Convert ""}}
|
||||
o.{{$field.Name}} = {{$field.FieldType}}(xr.Read{{$field.Encoder}}())
|
||||
{{else if $field.IsBasic}}
|
||||
{{if ge $field.Max 1}}
|
||||
o.{{$field.Name}} = xr.Read{{$field.Encoder}}Max({{$field.Max}})
|
||||
{{else}}
|
||||
o.{{$field.Name}} = xr.Read{{$field.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$field.Name}}).decodeXDR(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$field.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $field.Max 1}}
|
||||
if _{{$field.Name}}Size > {{$field.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
o.{{$field.Name}} = make([]{{$field.FieldType}}, _{{$field.Name}}Size)
|
||||
for i := range o.{{$field.Name}} {
|
||||
{{if ne $field.Convert ""}}
|
||||
o.{{$field.Name}}[i] = {{$field.FieldType}}(xr.Read{{$field.Encoder}}())
|
||||
{{else if $field.IsBasic}}
|
||||
o.{{$field.Name}}[i] = xr.Read{{$field.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$field.Name}}[i]).decodeXDR(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
||||
|
||||
type typeSet struct {
|
||||
Type string
|
||||
Encoder string
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
"uint32": typeSet{"", "Uint32"},
|
||||
"int64": typeSet{"uint64", "Uint64"},
|
||||
"uint64": typeSet{"", "Uint64"},
|
||||
"int": typeSet{"uint64", "Uint64"},
|
||||
"string": typeSet{"", "String"},
|
||||
"[]byte": typeSet{"", "Bytes"},
|
||||
"bool": typeSet{"", "Bool"},
|
||||
}
|
||||
|
||||
func handleStruct(name string, t *ast.StructType) {
|
||||
var fs []field
|
||||
for _, sf := range t.Fields.List {
|
||||
if len(sf.Names) == 0 {
|
||||
// We don't handle anonymous fields
|
||||
continue
|
||||
}
|
||||
|
||||
fn := sf.Names[0].Name
|
||||
var max = 0
|
||||
if sf.Comment != nil {
|
||||
c := sf.Comment.List[0].Text
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
}
|
||||
|
||||
var f field
|
||||
switch ft := sf.Type.(type) {
|
||||
case *ast.Ident:
|
||||
tn := ft.Name
|
||||
if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = field{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = field{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.ArrayType:
|
||||
if ft.Len != nil {
|
||||
// We don't handle arrays
|
||||
continue
|
||||
}
|
||||
|
||||
tn := ft.Elt.(*ast.Ident).Name
|
||||
if enc, ok := xdrEncoders["[]"+tn]; ok {
|
||||
f = field{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = field{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = field{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
switch output {
|
||||
case "code":
|
||||
generateCode(name, fs)
|
||||
case "diagram":
|
||||
generateDiagram(name, fs)
|
||||
case "xdr":
|
||||
generateXdr(name, fs)
|
||||
}
|
||||
}
|
||||
|
||||
func generateCode(name string, fs []field) {
|
||||
var buf bytes.Buffer
|
||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
|
||||
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
|
||||
|
||||
bs, err = format.Source(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(bs))
|
||||
}
|
||||
|
||||
func generateDiagram(sn string, fs []field) {
|
||||
fmt.Println(sn + " Structure:")
|
||||
fmt.Println()
|
||||
fmt.Println(" 0 1 2 3")
|
||||
fmt.Println(" 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
||||
line := "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"
|
||||
fmt.Println(line)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
sl := f.IsSlice
|
||||
|
||||
if sl {
|
||||
fmt.Printf("| %s |\n", center("Number of "+f.Name, 61))
|
||||
fmt.Println(line)
|
||||
}
|
||||
switch tn {
|
||||
case "uint16":
|
||||
fmt.Printf("| %s | %s |\n", center(f.Name, 29), center("0x0000", 29))
|
||||
fmt.Println(line)
|
||||
case "uint32":
|
||||
fmt.Printf("| %s |\n", center(f.Name, 61))
|
||||
fmt.Println(line)
|
||||
case "int64", "uint64":
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Printf("+ %s +\n", center(f.Name+" (64 bits)", 61))
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Println(line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Printf("| %s |\n", center("Length of "+f.Name, 61))
|
||||
fmt.Println(line)
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Printf("\\ %s \\\n", center(f.Name+" (variable length)", 61))
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Println(line)
|
||||
default:
|
||||
if sl {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Printf("/ %s /\n", center("", 61))
|
||||
fmt.Printf("\\ %s \\\n", center(tn, 61))
|
||||
fmt.Printf("/ %s /\n", center("", 61))
|
||||
} else {
|
||||
fmt.Printf("| %s |\n", center(tn, 61))
|
||||
}
|
||||
fmt.Println(line)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func generateXdr(sn string, fs []field) {
|
||||
fmt.Printf("struct %s {\n", sn)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
fn := f.Name
|
||||
suf := ""
|
||||
if f.IsSlice {
|
||||
suf = "<>"
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "uint16":
|
||||
fmt.Printf("\tunsigned short %s%s;\n", fn, suf)
|
||||
case "uint32":
|
||||
fmt.Printf("\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Printf("\thyper %s%s;\n", fn, suf)
|
||||
case "uint64":
|
||||
fmt.Printf("\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Printf("\tstring %s<>;\n", fn)
|
||||
case "byte":
|
||||
fmt.Printf("\topaque %s<>;\n", fn)
|
||||
default:
|
||||
fmt.Printf("\t%s %s%s;\n", tn, fn, suf)
|
||||
}
|
||||
}
|
||||
fmt.Println("}")
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func center(s string, w int) string {
|
||||
w -= len(s)
|
||||
l := w / 2
|
||||
r := l
|
||||
if l+r < w {
|
||||
r++
|
||||
}
|
||||
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
|
||||
}
|
||||
|
||||
func inspector(fset *token.FileSet) func(ast.Node) bool {
|
||||
return func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
switch t := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
name := n.Name.Name
|
||||
handleStruct(name, t)
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&output, "output", "code", "code,xdr,diagram")
|
||||
flag.Parse()
|
||||
fname := flag.Arg(0)
|
||||
|
||||
// Create the AST by parsing src.
|
||||
fset := token.NewFileSet() // positions are relative to fset
|
||||
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//ast.Print(fset, f)
|
||||
|
||||
if output == "code" {
|
||||
headerTpl.Execute(os.Stdout, map[string]string{"Package": f.Name.Name})
|
||||
}
|
||||
|
||||
i := inspector(fset)
|
||||
ast.Inspect(f, i)
|
||||
}
|
||||
99
xdr/reader.go
Normal file
99
xdr/reader.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var ErrElementSizeExceeded = errors.New("element size exceeded")
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return string(r.ReadBytes())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
return string(r.ReadBytesMax(max))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ErrElementSizeExceeded
|
||||
return nil
|
||||
}
|
||||
if l+pad(l) > len(dst) {
|
||||
dst = make([]byte, l+pad(l))
|
||||
} else {
|
||||
dst = dst[:l+pad(l)]
|
||||
}
|
||||
_, r.err = io.ReadFull(r.r, dst)
|
||||
r.tot += l + pad(l)
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
r.tot += 4
|
||||
return uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
r.tot += 4
|
||||
return uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
r.tot += 8
|
||||
return uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
}
|
||||
|
||||
func (r *Reader) Tot() int {
|
||||
return r.tot
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
return r.err
|
||||
}
|
||||
110
xdr/writer.go
Normal file
110
xdr/writer.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
return w.WriteBytes([]byte(s))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
|
||||
if p := pad(len(bs)); w.err == nil && p > 0 {
|
||||
n, w.err = w.w.Write(padBytes[:p])
|
||||
l += n
|
||||
}
|
||||
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
w.b[2] = 0
|
||||
w.b[3] = 0
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
w.b[3] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
w.b[3] = byte(v >> 32)
|
||||
w.b[4] = byte(v >> 24)
|
||||
w.b[5] = byte(v >> 16)
|
||||
w.b[6] = byte(v >> 8)
|
||||
w.b[7] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:8])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tot() int {
|
||||
return w.tot
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
return w.err
|
||||
}
|
||||
57
xdr/xdr_test.go
Normal file
57
xdr/xdr_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestPad(t *testing.T) {
|
||||
tests := [][]int{
|
||||
{0, 0},
|
||||
{1, 3},
|
||||
{2, 2},
|
||||
{3, 1},
|
||||
{4, 0},
|
||||
{32, 0},
|
||||
{33, 3},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
if p := pad(tc[0]); p != tc[1] {
|
||||
t.Errorf("Incorrect padding for %d bytes, %d != %d", tc[0], p, tc[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
r.ReadBytes()
|
||||
res := r.ReadBytes()
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesGiven(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
res := make([]byte, 12)
|
||||
res = r.ReadBytesInto(res)
|
||||
res = r.ReadBytesInto(res)
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user